示例#1
0
文件: main.go 项目: jbouwman/roshi
func makeClusters(
	redisInstances string,
	connectTimeout, readTimeout, writeTimeout time.Duration,
	redisMCPI int,
	hashFunc func(string) uint32,
	maxSize int,
	instr instrumentation.Instrumentation,
) ([]cluster.Cluster, error) {
	clusters := []cluster.Cluster{}
	for i, clusterInstances := range strings.Split(redisInstances, ";") {
		addresses := stripBlank(strings.Split(clusterInstances, ","))
		if len(addresses) <= 0 {
			continue
		}
		clusters = append(clusters, cluster.New(
			pool.New(
				addresses,
				connectTimeout, readTimeout, writeTimeout,
				redisMCPI,
				hashFunc,
			),
			maxSize,
			instr,
		))
		log.Printf("Redis cluster %d: %d instance(s)", i+1, len(addresses))
	}
	if len(clusters) <= 0 {
		return []cluster.Cluster{}, fmt.Errorf("no cluster(s)")
	}
	return clusters, nil
}
示例#2
0
// ParseFarmString parses a farm declaration string into a slice of clusters.
// A farm string is a semicolon-separated list of cluster strings. A cluster
// string is a comma-separated list of Redis instances. All whitespace is
// ignored.
//
// An example farm string is:
//
//  "foo1:6379, foo2:6379; bar1:6379, bar2:6379, bar3:6379, bar4:6379"
//
func ParseFarmString(
	farmString string,
	connectTimeout, readTimeout, writeTimeout time.Duration,
	redisMCPI int,
	hash func(string) uint32,
	maxSize int,
	selectGap time.Duration,
	instr instrumentation.Instrumentation,
) ([]cluster.Cluster, error) {
	var (
		seen     = map[string]int{}
		clusters = []cluster.Cluster{}
	)
	for i, clusterString := range strings.Split(stripWhitespace(farmString), ";") {
		hostPorts := []string{}
		for _, hostPort := range strings.Split(clusterString, ",") {
			if hostPort == "" {
				continue
			}
			toks := strings.Split(hostPort, ":")
			if len(toks) != 2 {
				return []cluster.Cluster{}, fmt.Errorf("invalid host-port %q", hostPort)
			}
			if _, err := strconv.ParseUint(toks[1], 10, 16); err != nil {
				return []cluster.Cluster{}, fmt.Errorf("invalid port %q in host-port %q (%s)", toks[1], hostPort, err)
			}
			seen[hostPort]++
			hostPorts = append(hostPorts, hostPort)
		}
		if len(hostPorts) <= 0 {
			return []cluster.Cluster{}, fmt.Errorf("empty cluster %d (%q)", i+1, clusterString)
		}
		clusters = append(clusters, cluster.New(
			pool.New(hostPorts, connectTimeout, readTimeout, writeTimeout, redisMCPI, hash),
			maxSize,
			selectGap,
			instr,
		))
		log.Printf("cluster %d: %d instance(s)", i+1, len(hostPorts))
	}

	if len(clusters) <= 0 {
		return []cluster.Cluster{}, fmt.Errorf("no clusters specified")
	}

	duplicates := []string{}
	for hostPort, count := range seen {
		if count > 1 {
			duplicates = append(duplicates, hostPort)
		}
	}
	if len(duplicates) > 0 {
		return []cluster.Cluster{}, fmt.Errorf("duplicate instances found: %s", strings.Join(duplicates, ", "))
	}

	return clusters, nil
}
示例#3
0
文件: main.go 项目: neurodrone/roshi
func newFarm(
	redisInstances string,
	writeQuorumStr string,
	connectTimeout, readTimeout, writeTimeout time.Duration,
	redisMCPI int,
	hash func(string) uint32,
	readStrategy farm.ReadStrategy,
	repairStrategy farm.RepairStrategy,
	maxSize int,
	statsdSampleRate float64,
	bucketPrefix string,
) (*farm.Farm, error) {
	// Build instrumentation.
	instr := statsd.New(stats, float32(statsdSampleRate), bucketPrefix)

	// Parse out and build clusters.
	clusters := []cluster.Cluster{}
	for i, clusterInstances := range strings.Split(redisInstances, ";") {
		addresses := stripBlank(strings.Split(clusterInstances, ","))
		if len(addresses) <= 0 {
			continue
		}
		clusters = append(clusters, cluster.New(
			pool.New(
				addresses,
				connectTimeout, readTimeout, writeTimeout,
				redisMCPI,
				hash,
			),
			maxSize,
			instr,
		))
		log.Printf("Redis cluster %d: %d instance(s)", i+1, len(addresses))
	}
	if len(clusters) <= 0 {
		return nil, fmt.Errorf("no cluster(s)")
	}

	// Evaluate writeQuorum.
	writeQuorum, err := evaluateScalarPercentage(writeQuorumStr, len(clusters))
	if err != nil {
		return nil, err
	}

	// Build and return Farm.
	return farm.New(
		clusters,
		writeQuorum,
		readStrategy,
		repairStrategy,
		instr,
	), nil
}
示例#4
0
func integrationCluster(t *testing.T, addresses string, maxSize int) cluster.Cluster {
	p := pool.New(
		strings.Split(addresses, ","),
		1*time.Second, // connect timeout
		1*time.Second, // read timeout
		1*time.Second, // write timeout
		10,            // max connections per instance
		pool.Murmur3,  // hash
	)

	for i := 0; i < p.Size(); i++ {
		p.WithIndex(i, func(conn redis.Conn) error {
			_, err := conn.Do("FLUSHDB")
			if err != nil {
				t.Fatal(err)
			}
			return nil
		})
	}

	return cluster.New(p, maxSize, nil)
}
示例#5
0
func TestRecovery(t *testing.T) {
	binary := "redis-server"
	absBinary, err := exec.LookPath(binary)
	if err != nil {
		t.Fatalf("%s: %s", binary, err)
	}

	// Build a cluster.
	var (
		port                      = "10001"
		maxConnectionsPerInstance = 2
		redisTimeout              = 3 * time.Second
		redisGracePeriod          = 5 * time.Second
	)
	p := pool.New(
		[]string{"localhost:" + port},
		redisTimeout, redisTimeout, redisTimeout,
		maxConnectionsPerInstance,
		pool.Murmur3,
	)

	func() {
		// Start Redis
		cmd := exec.Command(absBinary, "--port", port)
		if err := cmd.Start(); err != nil {
			t.Fatalf("Starting %s: %s", binary, err)
		}
		defer cmd.Process.Kill()

		time.Sleep(redisGracePeriod)

		// Try initial PING
		if err := p.With("irrelevant", func(conn redis.Conn) error {
			_, err := conn.Do("PING")
			return err
		}); err != nil {
			t.Fatalf("Initial PING failed: %s", err)
		}
		t.Logf("Initial PING OK")
	}()

	terminal := make(chan struct{})
	requests := maxConnectionsPerInstance * 2 // > maxConnectionsPerInstance
	go func() {
		// Redis is down. Make a bunch of requests. All should fail quickly.
		for i := 0; i < requests; i++ {
			if err := p.With("irrelevant", func(conn redis.Conn) error {
				_, err := conn.Do("PING")
				return err
			}); err == nil {
				t.Errorf("Terminal PING succeeded, but we expected failure.")
			} else {
				t.Logf("Terminal PING failed (%s), but that was expected", err)
			}
		}
		close(terminal)
	}()
	select {
	case <-terminal:
		t.Logf("Terminal PINGs completed in time.")
	case <-time.After(2 * time.Duration(requests) * redisTimeout):
		t.Fatalf("Terminal PINGs timed out. Deadlock in connection pool?")
	}

	func() {
		// Restart Redis
		cmd := exec.Command(absBinary, "--port", port)
		if err := cmd.Start(); err != nil {
			t.Fatalf("Starting %s: %s", binary, err)
		}
		defer cmd.Process.Kill()

		time.Sleep(redisGracePeriod)

		// Try second PING x1
		err := p.With("irrelevant", func(conn redis.Conn) error {
			_, err := conn.Do("PING")
			return err
		})
		t.Logf("Second PING x1 gave error %v (just FYI)", err)
		time.Sleep(1 * time.Second) // attempt to scoot by a problem with Travis

		// Try second PING x2
		if err := p.With("irrelevant", func(conn redis.Conn) error {
			_, err := conn.Do("PING")
			return err
		}); err != nil {
			t.Errorf("Second PING x2 failed: %s", err)
		} else {
			t.Logf("Second PING x2 OK")
		}
	}()
}