コード例 #1
0
ファイル: http.go プロジェクト: Terry-Mao/bfs
func NewServer(s *Store, c *conf.Config) (svr *Server, err error) {
	svr = &Server{
		store: s,
		conf:  c,
		rl:    rate.NewLimiter(rate.Limit(c.Limit.Read.Rate), c.Limit.Read.Brust),
		wl:    rate.NewLimiter(rate.Limit(c.Limit.Write.Rate), c.Limit.Write.Brust),
		dl:    rate.NewLimiter(rate.Limit(c.Limit.Delete.Rate), c.Limit.Delete.Brust),
	}
	if svr.statSvr, err = net.Listen("tcp", c.StatListen); err != nil {
		log.Errorf("net.Listen(%s) error(%v)", c.StatListen, err)
		return
	}
	if svr.apiSvr, err = net.Listen("tcp", c.ApiListen); err != nil {
		log.Errorf("net.Listen(%s) error(%v)", c.ApiListen, err)
		return
	}
	if svr.adminSvr, err = net.Listen("tcp", c.AdminListen); err != nil {
		log.Errorf("net.Listen(%s) error(%v)", c.AdminListen, err)
		return
	}
	go svr.startStat()
	go svr.startApi()
	go svr.startAdmin()
	if c.Pprof {
		go StartPprof(c.PprofListen)
	}
	return
}
コード例 #2
0
ファイル: sampling.go プロジェクト: trythings/trythings
// NewLimitedSampler returns a sampling policy that randomly samples a given
// fraction of requests.  It also enforces a limit on the number of traces per
// second.  It tries to trace every request with a trace header, but will not
// exceed the qps limit to do it.
func NewLimitedSampler(fraction, maxqps float64) (SamplingPolicy, error) {
	if !(fraction >= 0) {
		return nil, fmt.Errorf("invalid fraction %f", fraction)
	}
	if !(maxqps >= 0) {
		return nil, fmt.Errorf("invalid maxqps %f", maxqps)
	}
	// Set a limit on the number of accumulated "tokens", to limit bursts of
	// traced requests.  Use one more than a second's worth of tokens, or 100,
	// whichever is smaller.
	// See https://godoc.org/golang.org/x/time/rate#NewLimiter.
	maxTokens := 100
	if maxqps < 99.0 {
		maxTokens = 1 + int(maxqps)
	}
	var seed int64
	if err := binary.Read(crand.Reader, binary.LittleEndian, &seed); err != nil {
		seed = time.Now().UnixNano()
	}
	s := sampler{
		fraction: fraction,
		Limiter:  rate.NewLimiter(rate.Limit(maxqps), maxTokens),
		Rand:     rand.New(rand.NewSource(seed)),
	}
	return &s, nil
}
コード例 #3
0
ファイル: watch.go プロジェクト: hongchaodeng/etcd
func NewWatchProxy(c *clientv3.Client) pb.WatchServer {
	wp := &watchProxy{
		cw:           c.Watcher,
		ctx:          clientv3.WithRequireLeader(c.Ctx()),
		retryLimiter: rate.NewLimiter(rate.Limit(retryPerSecond), retryPerSecond),
		leaderc:      make(chan struct{}),
	}
	wp.ranges = newWatchRanges(wp)
	go func() {
		// a new streams without opening any watchers won't catch
		// a lost leader event, so have a special watch to monitor it
		rev := int64((uint64(1) << 63) - 2)
		for wp.ctx.Err() == nil {
			wch := wp.cw.Watch(wp.ctx, lostLeaderKey, clientv3.WithRev(rev))
			for range wch {
			}
			wp.mu.Lock()
			close(wp.leaderc)
			wp.leaderc = make(chan struct{})
			wp.mu.Unlock()
			wp.retryLimiter.Wait(wp.ctx)
		}
		wp.mu.Lock()
		<-wp.ctx.Done()
		wp.mu.Unlock()
		wp.wg.Wait()
		wp.ranges.stop()
	}()
	return wp
}
コード例 #4
0
ファイル: limiter.go プロジェクト: syncthing/syncthing
func (lim *limiter) CommitConfiguration(from, to config.Configuration) bool {
	if from.Options.MaxRecvKbps == to.Options.MaxRecvKbps &&
		from.Options.MaxSendKbps == to.Options.MaxSendKbps &&
		from.Options.LimitBandwidthInLan == to.Options.LimitBandwidthInLan {
		return true
	}

	// The rate variables are in KiB/s in the config (despite the camel casing
	// of the name). We multiply by 1024 to get bytes/s.

	if to.Options.MaxRecvKbps <= 0 {
		lim.read.SetLimit(rate.Inf)
	} else {
		lim.read.SetLimit(1024 * rate.Limit(to.Options.MaxRecvKbps))
	}

	if to.Options.MaxSendKbps < 0 {
		lim.write.SetLimit(rate.Inf)
	} else {
		lim.write.SetLimit(1024 * rate.Limit(to.Options.MaxSendKbps))
	}

	lim.limitsLAN.set(to.Options.LimitBandwidthInLan)

	sendLimitStr := "is unlimited"
	recvLimitStr := "is unlimited"
	if to.Options.MaxSendKbps > 0 {
		sendLimitStr = fmt.Sprintf("limit is %d KiB/s", to.Options.MaxSendKbps)
	}
	if to.Options.MaxRecvKbps > 0 {
		recvLimitStr = fmt.Sprintf("limit is %d KiB/s", to.Options.MaxRecvKbps)
	}
	l.Infof("Send rate %s, receive rate %s", sendLimitStr, recvLimitStr)

	if to.Options.LimitBandwidthInLan {
		l.Infoln("Rate limits apply to LAN connections")
	} else {
		l.Infoln("Rate limits do not apply to LAN connections")
	}

	return true
}
コード例 #5
0
ファイル: logbridge_test.go プロジェクト: petertseng/p2
func TestRateLimiting(t *testing.T) {
	inputSize := 100
	input := make([]byte, 0, inputSize*2)
	for i := 0; i < inputSize; i++ {
		inputLine := []byte{byte((i % 26) + 65), newLine}
		input = append(input, inputLine...)
	}
	fmt.Printf("input: %d", len(input))

	bridgeCapacity := 6
	reader := bytes.NewReader(input)

	lineLimit := 3
	metReg := metrics.NewRegistry()
	lb := NewLogBridge(reader,
		ioutil.Discard,
		ioutil.Discard,
		logging.DefaultLogger,
		lineLimit,
		1024,
		metReg,
		"log_lines",
		"log_bytes",
		"dropped_lines",
		"time_spent_throttled_ms")
	// We're testing these, so we finely control their parameters
	lb.logLineRateLimit = rate.NewLimiter(rate.Limit(inputSize), inputSize)
	lb.logByteRateLimit = rate.NewLimiter(rate.Limit(1024), 1024)
	lb.LossyCopy(reader, bridgeCapacity)

	loggedLines := lb.logLinesCount.Count()
	droppedLines := lb.droppedLineCount.Count()
	if loggedLines == 0 {
		t.Errorf("Expected some logs to get through.")
	}
	if loggedLines == int64(inputSize) {
		t.Errorf("Expected some lines to get dropped")
	}
	if droppedLines == 0 {
		t.Errorf("Expected dropped lines to be non-zero")
	}
}
コード例 #6
0
ファイル: vault_test.go プロジェクト: achanda/nomad
func TestVaultClient_LookupToken_RateLimit(t *testing.T) {
	v := testutil.NewTestVault(t).Start()
	defer v.Stop()

	logger := log.New(os.Stderr, "", log.LstdFlags)
	client, err := NewVaultClient(v.Config, logger, nil)
	if err != nil {
		t.Fatalf("failed to build vault client: %v", err)
	}
	client.SetActive(true)
	defer client.Stop()
	client.setLimit(rate.Limit(1.0))

	waitForConnection(client, t)

	// Spin up many requests. These should block
	ctx, cancel := context.WithCancel(context.Background())

	cancels := 0
	numRequests := 10
	unblock := make(chan struct{})
	for i := 0; i < numRequests; i++ {
		go func() {
			// Ensure all the goroutines are made
			time.Sleep(10 * time.Millisecond)

			// Lookup ourselves
			_, err := client.LookupToken(ctx, v.Config.Token)
			if err != nil {
				if err == context.Canceled {
					cancels += 1
					return
				}
				t.Fatalf("self lookup failed: %v", err)
				return
			}

			// Cancel the context
			cancel()
			time.AfterFunc(1*time.Second, func() { close(unblock) })
		}()
	}

	select {
	case <-time.After(5 * time.Second):
		t.Fatalf("timeout")
	case <-unblock:
	}

	desired := numRequests - 1
	if cancels != desired {
		t.Fatalf("Incorrect number of cancels; got %d; want %d", cancels, desired)
	}
}
コード例 #7
0
ファイル: querysrv.go プロジェクト: syncthing/syncthing
func (s *querysrv) limit(remote net.IP) bool {
	key := remote.String()

	bkt, ok := s.limiter.Get(key)
	if ok {
		bkt := bkt.(*rate.Limiter)
		if !bkt.Allow() {
			// Rate limit exceeded; ignore packet
			return true
		}
	} else {
		// limitAvg is in packets per ten seconds.
		s.limiter.Add(key, rate.NewLimiter(rate.Limit(limitAvg)/10, limitBurst))
	}

	return false
}
コード例 #8
0
ファイル: main.go プロジェクト: RossRothenstine/stress
func Main() int {
	flag.Parse()
	if msgsPerSecond <= 0 {
		fmt.Printf("Messages per second cannot be <= 0")
		return 1
	}

	conn, err := amqp.Dial(amqpURLString)
	if err != nil {
		fmt.Printf("dial: %v\n", err)
		return 1
	}
	closed := make(chan *amqp.Error)
	conn.NotifyClose(closed)

	ch, err := conn.Channel()
	if err != nil {
		fmt.Printf("channel: %v\n", err)
		return 1
	}

	stresser := &stress.Stresser{
		Limit: rate.NewLimiter(rate.Limit(msgsPerSecond), msgsPerSecond),
		New: func() stress.Worker {
			return &AMQPPublisherWorker{
				Body:     body,
				Channel:  ch,
				Exchange: exchange,
			}
		},
	}
	go stresser.Start()
	defer stresser.Stop()

	interrupt := make(chan os.Signal)
	signal.Notify(interrupt, os.Kill, os.Interrupt)

	select {
	case <-interrupt:
		return 0
	case err := <-closed:
		fmt.Printf("amqp: ", err)
		return 1
	}

}
コード例 #9
0
ファイル: 44_rate_limit.go プロジェクト: gyuho/learn
func (s *stresser) start() {
	ctx, cancel := context.WithCancel(context.Background())

	wg := &sync.WaitGroup{}
	wg.Add(s.N)

	s.mu.Lock()
	s.wg = wg
	s.rateLimiter = rate.NewLimiter(rate.Limit(s.qps), s.qps)
	s.cancel = cancel
	s.mu.Unlock()

	for i := 0; i < s.N; i++ {
		go s.run(ctx)
	}

	<-ctx.Done()
}
コード例 #10
0
ファイル: stresser.go プロジェクト: menglingwei/etcd
func newStressBuilder(s string, sc *stressConfig) stressBuilder {
	switch s {
	case "nop":
		return func(*member) Stresser {
			return &nopStresser{
				start: time.Now(),
				qps:   sc.qps,
			}
		}
	case "default":
		// TODO: Too intensive stressers can panic etcd member with
		// 'out of memory' error. Put rate limits in server side.
		stressN := 100
		l := rate.NewLimiter(rate.Limit(sc.qps), sc.qps)

		return func(m *member) Stresser {
			if sc.v2 {
				return &stresserV2{
					Endpoint:       m.ClientURL,
					keySize:        sc.keySize,
					keySuffixRange: sc.keySuffixRange,
					N:              stressN,
				}
			} else {
				return &stresser{
					Endpoint:       m.grpcAddr(),
					keyLargeSize:   sc.keyLargeSize,
					keySize:        sc.keySize,
					keySuffixRange: sc.keySuffixRange,
					N:              stressN,
					rateLimiter:    l,
				}
			}
		}
	default:
		plog.Panicf("unknown stresser type: %s\n", s)
	}

	return nil // never reach here
}
コード例 #11
0
ファイル: watch_latency.go プロジェクト: pulcy/vault-monkey
func watchLatencyFunc(cmd *cobra.Command, args []string) {
	key := string(mustRandBytes(watchLKeySize))
	value := string(mustRandBytes(watchLValueSize))

	client := mustCreateConn()
	stream := v3.NewWatcher(client)
	wch := stream.Watch(context.TODO(), key)

	bar = pb.New(watchLTotal)
	bar.Format("Bom !")
	bar.Start()

	limiter := rate.NewLimiter(rate.Limit(watchLPutRate), watchLPutRate)
	r := newReport()
	rc := r.Run()

	for i := 0; i < watchLTotal; i++ {
		// limit key put as per reqRate
		if err := limiter.Wait(context.TODO()); err != nil {
			break
		}
		_, err := client.Put(context.TODO(), string(key), value)

		if err != nil {
			fmt.Fprintf(os.Stderr, "Failed to Put for watch latency benchmark: %v\n", err)
			os.Exit(1)
		}
		st := time.Now()
		<-wch
		r.Results() <- report.Result{Err: err, Start: st, End: time.Now()}
		bar.Increment()
	}

	close(r.Results())
	bar.Finish()
	fmt.Printf("%s", <-rc)
}
コード例 #12
0
ファイル: cluster.go プロジェクト: jonboulle/etcd
func (c *cluster) bootstrap(agentEndpoints []string) error {
	size := len(agentEndpoints)

	members := make([]*member, size)
	memberNameURLs := make([]string, size)
	for i, u := range agentEndpoints {
		agent, err := client.NewAgent(u)
		if err != nil {
			return err
		}
		host, _, err := net.SplitHostPort(u)
		if err != nil {
			return err
		}
		members[i] = &member{
			Agent:        agent,
			Endpoint:     u,
			Name:         fmt.Sprintf("etcd-%d", i),
			ClientURL:    fmt.Sprintf("http://%s:2379", host),
			PeerURL:      fmt.Sprintf("http://%s:%d", host, peerURLPort),
			FailpointURL: fmt.Sprintf("http://%s:%d", host, failpointPort),
		}
		memberNameURLs[i] = members[i].ClusterEntry()
	}
	clusterStr := strings.Join(memberNameURLs, ",")
	token := fmt.Sprint(rand.Int())

	for i, m := range members {
		flags := append(
			m.Flags(),
			"--data-dir", c.datadir,
			"--initial-cluster-token", token,
			"--initial-cluster", clusterStr)

		if _, err := m.Agent.Start(flags...); err != nil {
			// cleanup
			for _, m := range members[:i] {
				m.Agent.Terminate()
			}
			return err
		}
	}

	// TODO: Too intensive stressers can panic etcd member with
	// 'out of memory' error. Put rate limits in server side.
	stressN := 100
	c.Stressers = make([]Stresser, len(members))
	limiter := rate.NewLimiter(rate.Limit(c.stressQPS), c.stressQPS)
	for i, m := range members {
		if c.v2Only {
			c.Stressers[i] = &stresserV2{
				Endpoint:       m.ClientURL,
				keySize:        c.stressKeySize,
				keySuffixRange: c.stressKeySuffixRange,
				N:              stressN,
			}
		} else {
			c.Stressers[i] = &stresser{
				Endpoint:       m.grpcAddr(),
				keyLargeSize:   c.stressKeyLargeSize,
				keySize:        c.stressKeySize,
				keySuffixRange: c.stressKeySuffixRange,
				N:              stressN,
				rateLimiter:    limiter,
			}
		}
		go c.Stressers[i].Stress()
	}

	c.Size = size
	c.Members = members
	return nil
}
コード例 #13
0
ファイル: main.go プロジェクト: syncthing/syncthing
func main() {
	log.SetFlags(log.Lshortfile | log.LstdFlags)

	var dir, extAddress, proto string

	flag.StringVar(&listen, "listen", ":22067", "Protocol listen address")
	flag.StringVar(&dir, "keys", ".", "Directory where cert.pem and key.pem is stored")
	flag.DurationVar(&networkTimeout, "network-timeout", networkTimeout, "Timeout for network operations between the client and the relay.\n\tIf no data is received between the client and the relay in this period of time, the connection is terminated.\n\tFurthermore, if no data is sent between either clients being relayed within this period of time, the session is also terminated.")
	flag.DurationVar(&pingInterval, "ping-interval", pingInterval, "How often pings are sent")
	flag.DurationVar(&messageTimeout, "message-timeout", messageTimeout, "Maximum amount of time we wait for relevant messages to arrive")
	flag.IntVar(&sessionLimitBps, "per-session-rate", sessionLimitBps, "Per session rate limit, in bytes/s")
	flag.IntVar(&globalLimitBps, "global-rate", globalLimitBps, "Global rate limit, in bytes/s")
	flag.BoolVar(&debug, "debug", debug, "Enable debug output")
	flag.StringVar(&statusAddr, "status-srv", ":22070", "Listen address for status service (blank to disable)")
	flag.StringVar(&poolAddrs, "pools", defaultPoolAddrs, "Comma separated list of relay pool addresses to join")
	flag.StringVar(&providedBy, "provided-by", "", "An optional description about who provides the relay")
	flag.StringVar(&extAddress, "ext-address", "", "An optional address to advertise as being available on.\n\tAllows listening on an unprivileged port with port forwarding from e.g. 443, and be connected to on port 443.")
	flag.StringVar(&proto, "protocol", "tcp", "Protocol used for listening. 'tcp' for IPv4 and IPv6, 'tcp4' for IPv4, 'tcp6' for IPv6")
	flag.BoolVar(&natEnabled, "nat", false, "Use UPnP/NAT-PMP to acquire external port mapping")
	flag.IntVar(&natLease, "nat-lease", 60, "NAT lease length in minutes")
	flag.IntVar(&natRenewal, "nat-renewal", 30, "NAT renewal frequency in minutes")
	flag.IntVar(&natTimeout, "nat-timeout", 10, "NAT discovery timeout in seconds")
	flag.Parse()

	if extAddress == "" {
		extAddress = listen
	}

	if len(providedBy) > 30 {
		log.Fatal("Provided-by cannot be longer than 30 characters")
	}

	addr, err := net.ResolveTCPAddr(proto, extAddress)
	if err != nil {
		log.Fatal(err)
	}

	laddr, err := net.ResolveTCPAddr(proto, listen)
	if err != nil {
		log.Fatal(err)
	}
	if laddr.IP != nil && !laddr.IP.IsUnspecified() {
		laddr.Port = 0
		transport, ok := http.DefaultTransport.(*http.Transport)
		if ok {
			transport.Dial = (&net.Dialer{
				Timeout:   30 * time.Second,
				LocalAddr: laddr,
			}).Dial
		}
	}

	log.Println(LongVersion)

	maxDescriptors, err := osutil.MaximizeOpenFileLimit()
	if maxDescriptors > 0 {
		// Assume that 20% of FD's are leaked/unaccounted for.
		descriptorLimit = int64(maxDescriptors*80) / 100
		log.Println("Connection limit", descriptorLimit)

		go monitorLimits()
	} else if err != nil && runtime.GOOS != "windows" {
		log.Println("Assuming no connection limit, due to error retrieving rlimits:", err)
	}

	sessionAddress = addr.IP[:]
	sessionPort = uint16(addr.Port)

	certFile, keyFile := filepath.Join(dir, "cert.pem"), filepath.Join(dir, "key.pem")
	cert, err := tls.LoadX509KeyPair(certFile, keyFile)
	if err != nil {
		log.Println("Failed to load keypair. Generating one, this might take a while...")
		cert, err = tlsutil.NewCertificate(certFile, keyFile, "strelaysrv", 3072)
		if err != nil {
			log.Fatalln("Failed to generate X509 key pair:", err)
		}
	}

	tlsCfg := &tls.Config{
		Certificates:           []tls.Certificate{cert},
		NextProtos:             []string{protocol.ProtocolName},
		ClientAuth:             tls.RequestClientCert,
		SessionTicketsDisabled: true,
		InsecureSkipVerify:     true,
		MinVersion:             tls.VersionTLS12,
		CipherSuites: []uint16{
			tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
			tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
			tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
			tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
			tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
			tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
		},
	}

	id := syncthingprotocol.NewDeviceID(cert.Certificate[0])
	if debug {
		log.Println("ID:", id)
	}

	wrapper := config.Wrap("config", config.New(id))
	wrapper.SetOptions(config.OptionsConfiguration{
		NATLeaseM:   natLease,
		NATRenewalM: natRenewal,
		NATTimeoutS: natTimeout,
	})
	natSvc := nat.NewService(id, wrapper)
	mapping := mapping{natSvc.NewMapping(nat.TCP, addr.IP, addr.Port)}

	if natEnabled {
		go natSvc.Serve()
		found := make(chan struct{})
		mapping.OnChanged(func(_ *nat.Mapping, _, _ []nat.Address) {
			select {
			case found <- struct{}{}:
			default:
			}
		})

		// Need to wait a few extra seconds, since NAT library waits exactly natTimeout seconds on all interfaces.
		timeout := time.Duration(natTimeout+2) * time.Second
		log.Printf("Waiting %s to acquire NAT mapping", timeout)

		select {
		case <-found:
			log.Printf("Found NAT mapping: %s", mapping.ExternalAddresses())
		case <-time.After(timeout):
			log.Println("Timeout out waiting for NAT mapping.")
		}
	}

	if sessionLimitBps > 0 {
		sessionLimiter = rate.NewLimiter(rate.Limit(sessionLimitBps), 2*sessionLimitBps)
	}
	if globalLimitBps > 0 {
		globalLimiter = rate.NewLimiter(rate.Limit(globalLimitBps), 2*globalLimitBps)
	}

	if statusAddr != "" {
		go statusService(statusAddr)
	}

	uri, err := url.Parse(fmt.Sprintf("relay://%s/?id=%s&pingInterval=%s&networkTimeout=%s&sessionLimitBps=%d&globalLimitBps=%d&statusAddr=%s&providedBy=%s", mapping.Address(), id, pingInterval, networkTimeout, sessionLimitBps, globalLimitBps, statusAddr, providedBy))
	if err != nil {
		log.Fatalln("Failed to construct URI", err)
	}

	log.Println("URI:", uri.String())

	if poolAddrs == defaultPoolAddrs {
		log.Println("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
		log.Println("!!  Joining default relay pools, this relay will be available for public use. !!")
		log.Println(`!!      Use the -pools="" command line option to make the relay private.      !!`)
		log.Println("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
	}

	pools = strings.Split(poolAddrs, ",")
	for _, pool := range pools {
		pool = strings.TrimSpace(pool)
		if len(pool) > 0 {
			go poolHandler(pool, uri, mapping)
		}
	}

	go listener(proto, listen, tlsCfg)

	sigs := make(chan os.Signal, 1)
	signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
	<-sigs

	// Gracefully close all connections, hoping that clients will be faster
	// to realize that the relay is now gone.

	sessionMut.RLock()
	for _, session := range activeSessions {
		session.CloseConns()
	}

	for _, session := range pendingSessions {
		session.CloseConns()
	}
	sessionMut.RUnlock()

	outboxesMut.RLock()
	for _, outbox := range outboxes {
		close(outbox)
	}
	outboxesMut.RUnlock()

	time.Sleep(500 * time.Millisecond)
}
コード例 #14
0
ファイル: put.go プロジェクト: hongchaodeng/etcd
func putFunc(cmd *cobra.Command, args []string) {
	if keySpaceSize <= 0 {
		fmt.Fprintf(os.Stderr, "expected positive --key-space-size, got (%v)", keySpaceSize)
		os.Exit(1)
	}

	requests := make(chan v3.Op, totalClients)
	if putRate == 0 {
		putRate = math.MaxInt32
	}
	limit := rate.NewLimiter(rate.Limit(putRate), 1)
	clients := mustCreateClients(totalClients, totalConns)
	k, v := make([]byte, keySize), string(mustRandBytes(valSize))

	bar = pb.New(putTotal)
	bar.Format("Bom !")
	bar.Start()

	r := newReport()
	for i := range clients {
		wg.Add(1)
		go func(c *v3.Client) {
			defer wg.Done()
			for op := range requests {
				limit.Wait(context.Background())

				st := time.Now()
				_, err := c.Do(context.Background(), op)
				r.Results() <- report.Result{Err: err, Start: st, End: time.Now()}
				bar.Increment()
			}
		}(clients[i])
	}

	go func() {
		for i := 0; i < putTotal; i++ {
			if seqKeys {
				binary.PutVarint(k, int64(i%keySpaceSize))
			} else {
				binary.PutVarint(k, int64(rand.Intn(keySpaceSize)))
			}
			requests <- v3.OpPut(string(k), v)
		}
		close(requests)
	}()

	if compactInterval > 0 {
		go func() {
			for {
				time.Sleep(compactInterval)
				compactKV(clients)
			}
		}()
	}

	rc := r.Run()
	wg.Wait()
	close(r.Results())
	bar.Finish()
	fmt.Println(<-rc)
}
コード例 #15
0
ファイル: watcher.go プロジェクト: pulcy/vault-monkey
func performWatchOnPrefixes(ctx context.Context, getClient getClientFunc, round int) {
	runningTime := 60 * time.Second // time for which operation should be performed
	noOfPrefixes := 36              // total number of prefixes which will be watched upon
	watchPerPrefix := 10            // number of watchers per prefix
	reqRate := 30                   // put request per second
	keyPrePrefix := 30              // max number of keyPrePrefixs for put operation

	prefixes := stringutil.UniqueStrings(5, noOfPrefixes)
	keys := stringutil.RandomStrings(10, keyPrePrefix)

	roundPrefix := fmt.Sprintf("%16x", round)

	var (
		revision int64
		wg       sync.WaitGroup
		gr       *clientv3.GetResponse
		err      error
	)

	client := getClient()
	defer client.Close()

	gr, err = getKey(ctx, client, "non-existent")
	if err != nil {
		log.Fatalf("failed to get the initial revision: %v", err)
	}
	revision = gr.Header.Revision

	ctxt, cancel := context.WithDeadline(ctx, time.Now().Add(runningTime))
	defer cancel()

	// generate and put keys in cluster
	limiter := rate.NewLimiter(rate.Limit(reqRate), reqRate)

	go func() {
		for _, key := range keys {
			for _, prefix := range prefixes {
				if err = limiter.Wait(ctxt); err != nil {
					return
				}
				if err = putKeyAtMostOnce(ctxt, client, roundPrefix+"-"+prefix+"-"+key); err != nil {
					log.Fatalf("failed to put key: %v", err)
					return
				}
			}
		}
	}()

	ctxc, cancelc := context.WithCancel(ctx)

	wcs := make([]clientv3.WatchChan, 0)
	rcs := make([]*clientv3.Client, 0)

	for _, prefix := range prefixes {
		for j := 0; j < watchPerPrefix; j++ {
			rc := getClient()
			rcs = append(rcs, rc)

			watchPrefix := roundPrefix + "-" + prefix

			wc := rc.Watch(ctxc, watchPrefix, clientv3.WithPrefix(), clientv3.WithRev(revision))
			wcs = append(wcs, wc)

			wg.Add(1)
			go func() {
				defer wg.Done()
				checkWatchResponse(wc, watchPrefix, keys)
			}()
		}
	}
	wg.Wait()

	cancelc()

	// verify all watch channels are closed
	for e, wc := range wcs {
		if _, ok := <-wc; ok {
			log.Fatalf("expected wc to be closed, but received %v", e)
		}
	}

	for _, rc := range rcs {
		rc.Close()
	}

	if err = deletePrefix(ctx, client, roundPrefix); err != nil {
		log.Fatalf("failed to clean up keys after test: %v", err)
	}
}
コード例 #16
0
ファイル: main.go プロジェクト: hongchaodeng/etcd
func main() {
	endpointStr := flag.String("agent-endpoints", "localhost:9027", "HTTP RPC endpoints of agents. Do not specify the schema.")
	clientPorts := flag.String("client-ports", "", "etcd client port for each agent endpoint")
	peerPorts := flag.String("peer-ports", "", "etcd peer port for each agent endpoint")
	failpointPorts := flag.String("failpoint-ports", "", "etcd failpoint port for each agent endpoint")

	datadir := flag.String("data-dir", "agent.etcd", "etcd data directory location on agent machine.")
	stressKeyLargeSize := flag.Uint("stress-key-large-size", 32*1024+1, "the size of each large key written into etcd.")
	stressKeySize := flag.Uint("stress-key-size", 100, "the size of each small key written into etcd.")
	stressKeySuffixRange := flag.Uint("stress-key-count", 250000, "the count of key range written into etcd.")
	limit := flag.Int("limit", -1, "the limit of rounds to run failure set (-1 to run without limits).")
	stressQPS := flag.Int("stress-qps", 10000, "maximum number of stresser requests per second.")
	schedCases := flag.String("schedule-cases", "", "test case schedule")
	consistencyCheck := flag.Bool("consistency-check", true, "true to check consistency (revision, hash)")
	stresserType := flag.String("stresser", "keys,lease", "comma separated list of stressers (keys, lease, v2keys, nop).")
	failureTypes := flag.String("failures", "default,failpoints", "specify failures (concat of \"default\" and \"failpoints\").")
	externalFailures := flag.String("external-failures", "", "specify a path of script for enabling/disabling an external fault injector")
	enablePprof := flag.Bool("enable-pprof", false, "true to enable pprof")
	flag.Parse()

	eps := strings.Split(*endpointStr, ",")
	cports := portsFromArg(*clientPorts, len(eps), defaultClientPort)
	pports := portsFromArg(*peerPorts, len(eps), defaultPeerPort)
	fports := portsFromArg(*failpointPorts, len(eps), defaultFailpointPort)
	agents := make([]agentConfig, len(eps))

	for i := range eps {
		agents[i].endpoint = eps[i]
		agents[i].clientPort = cports[i]
		agents[i].peerPort = pports[i]
		agents[i].failpointPort = fports[i]
		agents[i].datadir = *datadir
	}

	c := &cluster{agents: agents}
	if err := c.bootstrap(); err != nil {
		plog.Fatal(err)
	}
	defer c.Terminate()

	// ensure cluster is fully booted to know failpoints are available
	c.WaitHealth()

	var failures []failure

	if failureTypes != nil && *failureTypes != "" {
		failures = makeFailures(*failureTypes, c)
	}

	if externalFailures != nil && *externalFailures != "" {
		if len(failures) != 0 {
			plog.Errorf("specify only one of -failures or -external-failures")
			os.Exit(1)
		}
		failures = append(failures, newFailureExternal(*externalFailures))
	}

	if len(failures) == 0 {
		plog.Infof("no failures\n")
		failures = append(failures, newFailureNop())
	}

	schedule := failures
	if schedCases != nil && *schedCases != "" {
		cases := strings.Split(*schedCases, " ")
		schedule = make([]failure, len(cases))
		for i := range cases {
			caseNum := 0
			n, err := fmt.Sscanf(cases[i], "%d", &caseNum)
			if n == 0 || err != nil {
				plog.Fatalf(`couldn't parse case "%s" (%v)`, cases[i], err)
			}
			schedule[i] = failures[caseNum]
		}
	}

	scfg := stressConfig{
		rateLimiter:    rate.NewLimiter(rate.Limit(*stressQPS), *stressQPS),
		keyLargeSize:   int(*stressKeyLargeSize),
		keySize:        int(*stressKeySize),
		keySuffixRange: int(*stressKeySuffixRange),
		numLeases:      10,
		keysPerLease:   10,
	}

	t := &tester{
		failures: schedule,
		cluster:  c,
		limit:    *limit,

		scfg:         scfg,
		stresserType: *stresserType,
		doChecks:     *consistencyCheck,
	}

	sh := statusHandler{status: &t.status}
	http.Handle("/status", sh)
	http.Handle("/metrics", prometheus.Handler())

	if *enablePprof {
		http.Handle(pprofPrefix+"/", http.HandlerFunc(pprof.Index))
		http.Handle(pprofPrefix+"/profile", http.HandlerFunc(pprof.Profile))
		http.Handle(pprofPrefix+"/symbol", http.HandlerFunc(pprof.Symbol))
		http.Handle(pprofPrefix+"/cmdline", http.HandlerFunc(pprof.Cmdline))
		http.Handle(pprofPrefix+"/trace", http.HandlerFunc(pprof.Trace))
		http.Handle(pprofPrefix+"/heap", pprof.Handler("heap"))
		http.Handle(pprofPrefix+"/goroutine", pprof.Handler("goroutine"))
		http.Handle(pprofPrefix+"/threadcreate", pprof.Handler("threadcreate"))
		http.Handle(pprofPrefix+"/block", pprof.Handler("block"))
	}

	go func() { plog.Fatal(http.ListenAndServe(":9028", nil)) }()

	t.runLoop()
}
コード例 #17
0
ファイル: receiver.go プロジェクト: tgres/tgres
func (r *Receiver) SetMaxFlushRate(mfr int) {
	r.flushLimiter = rate.NewLimiter(rate.Limit(mfr), mfr)
}