Пример #1
0
func newFarm(
	redisInstances string,
	writeQuorumStr string,
	connectTimeout, readTimeout, writeTimeout time.Duration,
	redisMCPI int,
	hash func(string) uint32,
	readStrategy farm.ReadStrategy,
	repairStrategy farm.RepairStrategy,
	maxSize int,
	statsdSampleRate float64,
	bucketPrefix string,
) (*farm.Farm, error) {
	// Build instrumentation.
	instr := statsd.New(stats, float32(statsdSampleRate), bucketPrefix)

	// Parse out and build clusters.
	clusters := []cluster.Cluster{}
	for i, clusterInstances := range strings.Split(redisInstances, ";") {
		addresses := stripBlank(strings.Split(clusterInstances, ","))
		if len(addresses) <= 0 {
			continue
		}
		clusters = append(clusters, cluster.New(
			pool.New(
				addresses,
				connectTimeout, readTimeout, writeTimeout,
				redisMCPI,
				hash,
			),
			maxSize,
			instr,
		))
		log.Printf("Redis cluster %d: %d instance(s)", i+1, len(addresses))
	}
	if len(clusters) <= 0 {
		return nil, fmt.Errorf("no cluster(s)")
	}

	// Evaluate writeQuorum.
	writeQuorum, err := evaluateScalarPercentage(writeQuorumStr, len(clusters))
	if err != nil {
		return nil, err
	}

	// Build and return Farm.
	return farm.New(
		clusters,
		writeQuorum,
		readStrategy,
		repairStrategy,
		instr,
	), nil
}
Пример #2
0
func newFarm(
	redisInstances string,
	writeQuorumStr string,
	connectTimeout, readTimeout, writeTimeout time.Duration,
	redisMCPI int,
	hash func(string) uint32,
	readStrategy farm.ReadStrategy,
	repairStrategy farm.RepairStrategy,
	maxSize int,
	selectGap time.Duration,
	instr instrumentation.Instrumentation,
) (*farm.Farm, error) {
	clusters, err := farm.ParseFarmString(
		redisInstances,
		connectTimeout,
		readTimeout,
		writeTimeout,
		redisMCPI,
		hash,
		maxSize,
		selectGap,
		instr,
	)
	if err != nil {
		return nil, err
	}
	log.Printf("%d cluster(s)", len(clusters))

	writeQuorum, err := evaluateScalarPercentage(
		writeQuorumStr,
		len(clusters),
	)
	if err != nil {
		return nil, err
	}

	return farm.New(
		clusters,
		writeQuorum,
		readStrategy,
		repairStrategy,
		instr,
	), nil
}
Пример #3
0
func main() {
	var (
		redisInstances          = flag.String("redis.instances", "", "Semicolon-separated list of comma-separated lists of Redis instances")
		redisConnectTimeout     = flag.Duration("redis.connect.timeout", 3*time.Second, "Redis connect timeout")
		redisReadTimeout        = flag.Duration("redis.read.timeout", 3*time.Second, "Redis read timeout")
		redisWriteTimeout       = flag.Duration("redis.write.timeout", 3*time.Second, "Redis write timeout")
		redisMCPI               = flag.Int("redis.mcpi", 2, "Max connections per Redis instance")
		redisHash               = flag.String("redis.hash", "murmur3", "Redis hash function: murmur3, fnv, fnva")
		selectGap               = flag.Duration("select.gap", 0*time.Millisecond, "delay between pipeline read invocations when Selecting over multiple keys")
		maxSize                 = flag.Int("max.size", 10000, "Maximum number of events per key")
		batchSize               = flag.Int("batch.size", 100, "keys to select per request")
		maxKeysPerSecond        = flag.Int64("max.keys.per.second", 1000, "max keys per second to walk")
		scanLogInterval         = flag.Duration("scan.log.interval", 5*time.Second, "how often to report scan rates in log")
		once                    = flag.Bool("once", false, "walk entire keyspace once and exit (default false, walk forever)")
		statsdAddress           = flag.String("statsd.address", "", "Statsd address (blank to disable)")
		statsdSampleRate        = flag.Float64("statsd.sample.rate", 0.1, "Statsd sample rate for normal metrics")
		statsdBucketPrefix      = flag.String("statsd.bucket.prefix", "myservice.", "Statsd bucket key prefix, including trailing period")
		prometheusNamespace     = flag.String("prometheus.namespace", "roshiwalker", "Prometheus key namespace, excluding trailing punctuation")
		prometheusMaxSummaryAge = flag.Duration("prometheus.max.summary.age", 10*time.Second, "Prometheus max age for instantaneous histogram data")
		httpAddress             = flag.String("http.address", ":6060", "HTTP listen address (profiling/metrics endpoints only)")
	)
	flag.Parse()
	log.SetOutput(os.Stdout)
	log.SetFlags(log.Lmicroseconds)

	// Validate integer arguments.
	if *maxKeysPerSecond < int64(*batchSize) {
		log.Fatal("max keys per second should be bigger than batch size")
	}

	// Set up instrumentation.
	statter := g2s.Noop()
	if *statsdAddress != "" {
		var err error
		statter, err = g2s.Dial("udp", *statsdAddress)
		if err != nil {
			log.Fatal(err)
		}
	}
	prometheusInstr := prometheus.New(*prometheusNamespace, *prometheusMaxSummaryAge)
	prometheusInstr.Install("/metrics", http.DefaultServeMux)
	instr := instrumentation.NewMultiInstrumentation(
		statsd.New(statter, float32(*statsdSampleRate), *statsdBucketPrefix),
		prometheusInstr,
	)

	// Parse hash function.
	var hashFunc func(string) uint32
	switch strings.ToLower(*redisHash) {
	case "murmur3":
		hashFunc = pool.Murmur3
	case "fnv":
		hashFunc = pool.FNV
	case "fnva":
		hashFunc = pool.FNVa
	default:
		log.Fatalf("unknown hash %q", *redisHash)
	}

	// Set up the clusters.
	clusters, err := farm.ParseFarmString(
		*redisInstances,
		*redisConnectTimeout, *redisReadTimeout, *redisWriteTimeout,
		*redisMCPI,
		hashFunc,
		*maxSize,
		*selectGap,
		instr,
	)
	if err != nil {
		log.Fatal(err)
	}

	// HTTP server for profiling.
	go func() { log.Print(http.ListenAndServe(*httpAddress, nil)) }()

	// Set up our rate limiter. Remember: it's per-key, not per-request.
	var (
		freq   = time.Duration(1/(*maxKeysPerSecond)) * time.Second
		bucket = tb.NewBucket(*maxKeysPerSecond, freq)
	)

	// Build the farm.
	var (
		readStrategy   = farm.SendAllReadAll
		repairStrategy = farm.AllRepairs // blocking
		writeQuorum    = len(clusters)   // 100%
		dst            = farm.New(clusters, writeQuorum, readStrategy, repairStrategy, instr)
	)

	// Perform the walk.
	defer func(t time.Time) { log.Printf("total walk complete, %s", time.Since(t)) }(time.Now())
	for {
		src := scan(clusters, *batchSize, *scanLogInterval) // new key set
		walkOnce(dst, bucket, src, *maxSize, instr)
		if *once {
			break
		}
	}
}
Пример #4
0
func main() {
	var (
		redisInstances      = flag.String("redis.instances", "", "Semicolon-separated list of comma-separated lists of Redis instances")
		redisConnectTimeout = flag.Duration("redis.connect.timeout", 3*time.Second, "Redis connect timeout")
		redisReadTimeout    = flag.Duration("redis.read.timeout", 3*time.Second, "Redis read timeout")
		redisWriteTimeout   = flag.Duration("redis.write.timeout", 3*time.Second, "Redis write timeout")
		redisMCPI           = flag.Int("redis.mcpi", 2, "Max connections per Redis instance")
		redisHash           = flag.String("redis.hash", "murmur3", "Redis hash function: murmur3, fnv, fnva")
		maxSize             = flag.Int("max.size", 10000, "Maximum number of events per key")
		batchSize           = flag.Int("batch.size", 100, "keys to select per request")
		maxKeysPerSecond    = flag.Int64("max.keys.per.second", 1000, "max keys per second to walk")
		scanLogInterval     = flag.Duration("scan.log.interval", 5*time.Second, "how often to report scan rates in log")
		once                = flag.Bool("once", false, "walk entire keyspace once and exit (default false, walk forever)")
		statsdAddress       = flag.String("statsd.address", "", "Statsd address (blank to disable)")
		statsdSampleRate    = flag.Float64("statsd.sample.rate", 0.1, "Statsd sample rate for normal metrics")
		statsdBucketPrefix  = flag.String("statsd.bucket.prefix", "myservice.", "Statsd bucket key prefix, including trailing period")
		httpAddress         = flag.String("http.address", ":6060", "HTTP listen address (profiling endpoints only)")
	)
	flag.Parse()
	log.SetFlags(log.Lmicroseconds)

	// Validate integer arguments.
	if *maxKeysPerSecond < int64(*batchSize) {
		log.Fatal("max keys per second should be bigger than batch size")
	}

	// Set up statsd instrumentation, if it's specified.
	stats := g2s.Noop()
	if *statsdAddress != "" {
		var err error
		stats, err = g2s.Dial("udp", *statsdAddress)
		if err != nil {
			log.Fatal(err)
		}
	}
	instr := statsd.New(stats, float32(*statsdSampleRate), *statsdBucketPrefix)

	// Parse hash function.
	var hashFunc func(string) uint32
	switch strings.ToLower(*redisHash) {
	case "murmur3":
		hashFunc = pool.Murmur3
	case "fnv":
		hashFunc = pool.FNV
	case "fnva":
		hashFunc = pool.FNVa
	default:
		log.Fatalf("unknown hash '%s'", *redisHash)
	}

	// Set up the clusters.
	clusters, err := makeClusters(
		*redisInstances,
		*redisConnectTimeout, *redisReadTimeout, *redisWriteTimeout,
		*redisMCPI,
		hashFunc,
		*maxSize,
		instr,
	)
	if err != nil {
		log.Fatal(err)
	}

	// HTTP server for profiling
	go func() { log.Print(http.ListenAndServe(*httpAddress, nil)) }()

	// Set up our rate limiter. Remember: it's per-key, not per-request.
	freq := time.Duration(1/(*maxKeysPerSecond)) * time.Second
	bucket := tb.NewBucket(*maxKeysPerSecond, freq)

	// Build the farm
	readStrategy := farm.SendAllReadAll
	repairStrategy := farm.AllRepairs // blocking
	dst := farm.New(clusters, len(clusters), readStrategy, repairStrategy, instr)

	// Perform the walk
	begin := time.Now()
	for {
		src := scan(clusters, *batchSize, *scanLogInterval) // new key set
		walkOnce(dst, bucket, src, *maxSize, instr)
		if *once {
			break
		}
	}
	log.Printf("walk complete in %s", time.Since(begin))
}