// Locate and connect to an appropriate redis instance with a key. // Shard-to-shard failover could happen when enabled and needed. // If a located shard is not ready yet, i.e. the health checker does not decide its status, // wait 0.1 second for settling down. func (c *Cluster) Connect(key []byte) (*redis.Client, func(), int64, error) { shard, since, err := c.getShard(key) if err == ErrNotReady { for i := 0; err == ErrNotReady && i < 10; i++ { time.Sleep(100 * time.Millisecond) shard, since, err = c.getShard(key) } } if err != nil { return nil, nil, 0, err } cp := c.pool[shard] if cp == nil { if np, err := pool.New("tcp", shard.Addr, c.poolsize); err != nil { return nil, nil, 0, err } else { c.pool[shard] = np cp = np } } if client, err := cp.Get(); err == nil { return client, func() { cp.Put(client) }, since, nil } else { return nil, nil, 0, err } }
func Example() { client, err := redis.New("tcp", "127.0.0.1:6379", 10) hErr(err) wrapper := radix.Wrap(client) set := NewLocationSet(key, wrapper) err = set.Add("Toronto", 43.6667, -79.4167) hErr(err) err = set.Add("New York", 40.7143, -74.0060) hErr(err) results, err := set.Near(43.6687, -79.4167, 500) hErr(err) tmp, err := set.Near(43.6687, -79.4167, 500000) hErr(err) results = append(results, tmp...) fmt.Println(results) client.Cmd("DEL", key) // Output: [Toronto Toronto New York] }
func ConnectRedis(endpoint string) (Redis, error) { client, err := pool.New("tcp", endpoint, poolSize) if err != nil { return nil, err } return &redis{client}, nil }
func main() { log.Println("Prepare migrate Redis file to SeaweedFS...") startTime := time.Now() var wg sync.WaitGroup p, err := pool.New("tcp", "localhost:6379", 10) if err != nil { log.Fatal(err) } defer p.Empty() conn, err := p.Get() if err != nil { log.Fatal(err) } length, err := conn.Cmd("LLEN", "image:all").Int64() if err != nil { log.Fatal(err) } p.Put(conn) end := length / 1000 if length%1000 != 0 { end++ } for i := int64(0); i < end; i++ { wg.Add(1) go migrateRange(&wg, p, i*1000, (i+1)*1000-1) } wg.Wait() log.Printf("Migate Redis file success in %v.\n", time.Now().Sub(startTime)) }
func newNormalDB() (DBer, error) { log.L.Printf("connecting to redis at %s", config.RedisAddr) p, err := pool.New("tcp", config.RedisAddr, config.RedisPoolSize) if err != nil { log.L.Fatal(err) } return &normalDB{p}, err }
// Generate a connector for the given single redis instance func NewSingle(addr string, poolsize int) (*Single, error) { c := &Single{} if pool, err := pool.New("tcp", addr, poolsize); err != nil { return nil, err } else { c.pool = pool return c, nil } }
func New(realmId int, reamlName string) Realm { P, err := pool.New("tcp", "localhost:6379", 10) if err != nil { log.Printf("Can't connect to redis") } R := Realm{realmId, reamlName, make(map[int]*Level), make(map[int]*Player), make(map[int]*Npc), make(map[int]*Mob), P} go R.thread() //Run the realm thread responsible for saving the redis database return R }
/* * NewRedisPool creates a new pool of redis connections */ func NewRedisPool(config RedisConfig) *pool.Pool { initConfig(&config) log.Printf("Creating Redis pool with config: %v", config) p, err := pool.New(protocol, fmt.Sprintf("%s:%s", config.Host, config.Port), config.PoolSize) if err != nil { // fatal error rabbitbeans.FailOnError(err, "Cannot create redis pool") } return p }
func NewCluster(config *Config) (*Cluster, error) { master, err := pool.New("tcp", fmt.Sprintf("%s:6379", config.Master), PoolSize) if err != nil { return nil, err } c := Cluster{ master: master, slaves: make([]*pool.Pool, len(config.Slaves)), } for i, slave := range config.Slaves { conn, err := pool.New("tcp", fmt.Sprintf("%s:6379", slave), PoolSize) if err != nil { return nil, err } c.slaves[i] = conn } log.Infof("Connected 1 master and %d slaves", len(c.slaves)) return &c, nil }
//CONNECT TO REDIS SERVER //only connects to one redis server //creates a pool of connections //server is a "localhost:port" string //maxNumConnections is the size of the pool you want to open func Connect(server string, maxNumConnections int) { connPool, err := pool.New("tcp", server, maxNumConnections) if err != nil { log.Println("redisutils.go-Connect error") log.Panicln(err) return } //store pool in global variable //access the pool by importing this file and "getting" a connection from the pool log.Println("redisutils.go-Connect okay") POOL = connPool return }
// NewClient creates a sentinel client. Connects to the given sentinel instance, // pulls the information for the masters of the given names, and creates an // intial pool of connections for each master. The client will automatically // replace the pool for any master should sentinel decide to fail the master // over. The returned error is a *ClientError. func NewClient( network, address string, poolSize int, names ...string, ) ( *Client, error, ) { // We use this to fetch initial details about masters before we upgrade it // to a pubsub client client, err := redis.Dial(network, address) if err != nil { return nil, &ClientError{err: err} } masterPools := map[string]*pool.Pool{} for _, name := range names { r := client.Cmd("SENTINEL", "MASTER", name) l, err := r.List() if err != nil { return nil, &ClientError{err: err, SentinelErr: true} } addr := l[3] + ":" + l[5] pool, err := pool.New("tcp", addr, poolSize) if err != nil { return nil, &ClientError{err: err} } masterPools[name] = pool } subClient := pubsub.NewSubClient(client) r := subClient.Subscribe("+switch-master") if r.Err != nil { return nil, &ClientError{err: r.Err, SentinelErr: true} } c := &Client{ poolSize: poolSize, masterPools: masterPools, subClient: subClient, getCh: make(chan *getReq), putCh: make(chan *putReq), closeCh: make(chan struct{}), alwaysErrCh: make(chan *ClientError), switchMasterCh: make(chan *switchMaster), } go c.subSpin() go c.spin() return c, nil }
// Start initialises several variables and creates necessary go-routines func Start(c config.Config) { conf = c poolSize := 1 for _, ct := range conf.Tasks { if ct.FailedTasksTTL > 0 { poolSize++ break } } var err error redisPool, err = pool.New(conf.RedisNetwork, conf.RedisAddress, poolSize) if err != nil { output.NotifyError("redis pool.New():", err) } stats.InitTasks(conf.Tasks) workerChan = make(map[string]chan QueueTask) failedChan = make(chan failedTask) shutdownChan = make(chan bool, 1) for _, ct := range conf.Tasks { var eb *backoff.Backoff if ct.BackoffEnabled { eb = &backoff.Backoff{ Min: time.Duration(ct.BackoffMin) * time.Millisecond, Max: time.Duration(ct.BackoffMax) * time.Millisecond, Factor: ct.BackoffFactor, Jitter: true, } } workerChan[ct.Type] = make(chan QueueTask, backlog) for i := 0; i < ct.Workers; i++ { waitGroup.Add(1) go taskWorker(ct, eb) } } waitGroupFailed.Add(1) go failedTaskWorker() waitGroup.Add(1) go queueWorker() }
func main() { var err error p, err = pool.New("tcp", "localhost:6379", 10) if err != nil { log.Fatalln("Could not connect to redis, exiting...") } r := mux.NewRouter() r.HandleFunc("/match", MatchHandler).Methods("POST") r.HandleFunc("/new/{player}", PlayerHandler).Methods("POST") r.HandleFunc("/elo", EloHandler).Methods("GET") r.HandleFunc("/elo/{player}", EloHandler).Methods("GET") fmt.Println("starting.") log.Fatal(http.ListenAndServe(":8080", r)) fmt.Println("stopping.") }
// NewConnection establishes a new connection to a Redis instance func InitRedis(host, port string) *RedisState { pool, err := pool.New("tcp", host+":"+port, DefaultConnectionCount) if err != nil { log.Println("Error while creating connection pool:" + err.Error()) return nil } initialLines, err := pool.Cmd("LLEN", DefaultArrayKey).Int() if err != nil { log.Println("Error initializing connection pool:" + err.Error()) return nil } log.Printf("Initialized connection pool with count:%v", initialLines) return &RedisState{pool, initialLines} }
func (c *Client) spin() { for { select { case req := <-c.getCh: if c.alwaysErr != nil { req.retCh <- &getReqRet{nil, c.alwaysErr} continue } pool, ok := c.masterPools[req.name] if !ok { err := errors.New("unknown name: " + req.name) req.retCh <- &getReqRet{nil, &ClientError{err: err}} continue } conn, err := pool.Get() if err != nil { req.retCh <- &getReqRet{nil, &ClientError{err: err}} continue } req.retCh <- &getReqRet{conn, nil} case req := <-c.putCh: if pool, ok := c.masterPools[req.name]; ok { pool.Put(req.conn) } case err := <-c.alwaysErrCh: c.alwaysErr = err case sm := <-c.switchMasterCh: if p, ok := c.masterPools[sm.name]; ok { p.Empty() p, _ = pool.New("tcp", sm.addr, c.poolSize) c.masterPools[sm.name] = p } case <-c.closeCh: for name := range c.masterPools { c.masterPools[name].Empty() } c.subClient.Client.Close() close(c.getCh) close(c.putCh) return } } }
func main() { var redis *radix.Pool var indices []string var titles map[string]string var bitmaps map[string][]uint64 articles := flag.String("articles", "", "Directory to load *.txt files from") limit := flag.Uint("limit", 0, "Max number of articles to read") cutoff := flag.Uint("cutoff", 10, "Number of articles a word must appear into to be included in index") memlim := flag.Uint("memory", 0, "Target index final memory consumption in MB") flag.Parse() var bindAddr string if *articles != "" { indices, titles, bitmaps = loadArticles(*articles, *limit, *cutoff, *memlim) bindAddr = ":4088" } else { var err error redis, err = radix.New("tcp", "localhost:6379", 10) if err != nil { log.Fatal(err) } bindAddr = ":4080" } http.HandleFunc("/content", func(w http.ResponseWriter, r *http.Request) { q := r.FormValue("q") if q == "" { fmt.Fprint(w, "Usage: /content?q=ask+me") return } words := strings.Split(q, " ") if redis != nil { askRedis(w, words, redis) } else { searchMemory(w, words, indices, titles, bitmaps) } }) s := &http.Server{ Addr: bindAddr, ReadTimeout: 10 * time.Second, WriteTimeout: 10 * time.Second, } log.Print("ready\n") log.Fatal(s.ListenAndServe()) }
// Check a shard. // Send a redis "PING" command and checks its response. func (c *LocalChecker) checkNode(cxt *checkContext) { cxt.incheck = true defer func() { cxt.incheck = false }() if cxt.mpool == nil { if pool, err := pool.New("tcp", cxt.status.Addr, 1); err == nil { cxt.mpool = pool } } if cxt.mpool != nil { if client, err := cxt.mpool.Get(); err == nil { defer cxt.mpool.Put(client) resp := client.Cmd("PING") if result, err := resp.Str(); err == nil && result == "PONG" { c.processAlive(cxt) return } } } c.processDead(cxt) }
func InitScheduler(db *sqlx.DB, config map[string]string) *Scheduler { taskDao := dao.InitTaskDao(db) seconds, _ := strconv.Atoi(config["fetch_rules_period"]) fetchRulesPeriod := time.Duration(seconds) * time.Second seconds, _ = strconv.Atoi(config["fetch_tasks_period"]) fetchTasksPeriod := time.Duration(seconds) * time.Second listenAddr := config["listen_addr"] fetchers := strings.Split(strings.Replace(config["fetchers"], " ", "", -1), ",") fetcherApi := map[string]string{} json.Unmarshal([]byte(config["fetcher_api"]), &fetcherApi) minHostVisitInterval, _ := strconv.Atoi(config["min_host_visit_interval"]) redisAddr := config["redis_addr"] redisPoolSize, _ := strconv.Atoi(config["redis_pool_size"]) redisHeartbeat, _ := strconv.Atoi(config["redis_heartbeat"]) pool, err := pool.New("tcp", redisAddr, redisPoolSize) if err != nil { log.Errorln("init redis pool error: ", err) return nil } politeVisitor := InitPoliteVisitor(pool, int64(minHostVisitInterval)) quitChan := make(chan bool, 1) return &Scheduler{ fetchRulesPeriod: fetchRulesPeriod, fetchTasksPeriod: fetchTasksPeriod, listenAddr: listenAddr, db: db, taskDao: taskDao, fetchers: fetchers, fetcherApi: fetcherApi, politeVisitor: politeVisitor, redisPool: pool, redisPoolSize: redisPoolSize, redisHeartbeat: redisHeartbeat, quitChan: quitChan} }
func (t *Transport) Connect() error { // TODO add retries in case of failures if t.isSentinel() { client, err := sentinel.NewClient("tcp", t.Address, t.MaxConnections, t.SentinelMaster) if err != nil { msg := fmt.Sprintf("Cannot connect to Redis host '%s': %s", t.Address, err) log.Errorln(msg) return err } t.sentinelClient = client } else { // Redis standalone // create redis pool pool, err := pool.New("tcp", t.Address, t.MaxConnections) if err != nil { msg := fmt.Sprintf("Cannot connect to Redis host '%s': %s", t.Address, err) log.Errorln(msg) return err } t.pool = pool } // ping to ensure we are really connected conn, err := t.getConnection() defer conn.Close() if err != nil { log.Fatalf("Cannot ping to Redis host: %s", err.Error()) return err } t.pingRedis(conn) return nil }
func TestReset(t *T) { // Simply initializing a cluster proves Reset works to some degree, since // NewCluster calls Reset cluster := getCluster(t) old7000Pool := cluster.pools["127.0.0.1:7000"] old7001Pool := cluster.pools["127.0.0.1:7001"] // We make a bogus client and add it to the cluster to prove that it gets // removed, since it's not needed p, err := pool.New("tcp", "127.0.0.1:6379", 10) assert.Nil(t, err) cluster.pools["127.0.0.1:6379"] = p // We use resetInnerUsingPool so that we can specifically specify the pool // being used, so we don't accidentally use the 6379 one (which doesn't have // CLUSTER commands) respCh := make(chan bool) cluster.callCh <- func(c *Cluster) { err := cluster.resetInnerUsingPool(old7000Pool) assert.Nil(t, err) respCh <- true } <-respCh // Prove that the bogus client is closed _, ok := cluster.pools["127.0.0.1:6379"] assert.Equal(t, false, ok) // Prove that the remaining two addresses are still in clients, were not // reconnected, and still work assert.Equal(t, 2, len(cluster.pools)) assert.Equal(t, old7000Pool, cluster.pools["127.0.0.1:7000"]) assert.Equal(t, old7001Pool, cluster.pools["127.0.0.1:7001"]) assert.Nil(t, cluster.Cmd("GET", "foo").Err) assert.Nil(t, cluster.Cmd("GET", "bar").Err) }
func main() { var err error // create redis connection pool if db, err = pool.New("tcp", "localhost:6379", 10); err != nil { panic(err) } defer db.Empty() dbc, err := db.Get() if err != nil { fmt.Println(err) } for k, v := range defaultConfig { dbc.PipeAppend("HSETNX", "fahrrad/config", k, v) } for k, _ := range defaultConfig { dbc.PipeAppend("HGET", "fahrrad/config", k) } for _, _ = range defaultConfig { dbc.PipeResp() } var v int v, err = dbc.PipeResp().Int() if err == nil { AssignedPrefixLength = uint8(v) } v, err = dbc.PipeResp().Int() if err == nil { OnLinkPrefixLength = uint8(v) } v, err = dbc.PipeResp().Int() if err == nil { DefaultValidLifetime = uint32(v) } v, err = dbc.PipeResp().Int() if err == nil { DefaultPreferredLifetime = uint32(v) } v, err = dbc.PipeResp().Int() if err == nil { TickerDelay = time.Duration(v) * time.Second } defer db.Put(dbc) // open listening connection conn, err := net.ListenIP("ip6:ipv6-icmp", &net.IPAddr{net.IPv6unspecified, ""}) if err != nil { panic(err) } defer conn.Close() pc = ipv6.NewPacketConn(conn) // RFC4861 requires the hop limit set to 255, but the default value in golang is 64 pc.SetHopLimit(255) // only accept neighbor discovery messages filter := new(ipv6.ICMPFilter) filter.SetAll(true) filter.Accept(ipv6.ICMPTypeRouterSolicitation) filter.Accept(ipv6.ICMPTypeRouterAdvertisement) filter.Accept(ipv6.ICMPTypeNeighborSolicitation) filter.Accept(ipv6.ICMPTypeNeighborAdvertisement) if err = pc.SetICMPFilter(filter); err != nil { panic(err) } rschan = make(chan routerSolicitation) go hostManager() // read from socket buf := make([]byte, 512) for { n, _, srcAddr, err := pc.ReadFrom(buf) if err != nil { panic(err) } go handleND(srcAddr, buf[:n]) } }
func dial(t *testing.T) *redis.Pool { client, err := redis.New("tcp", "127.0.0.1:6379", 10) assert.Nil(t, err) return client }
func main() { log.SetFlags(log.Flags() | log.Lmicroseconds) log.Println("CashPoints server build: " + BuildDate) args := os.Args[1:] configFilePath := SERVER_DEFAULT_CONFIG if len(args) > 0 { configFilePath = args[0] log.Printf("Loading config file: %s\n", configFilePath) } else { log.Printf("Loading default config file: %s\n", configFilePath) } if _, err := os.Stat(configFilePath); os.IsNotExist(err) { log.Fatalf("No such config file: %s\n", configFilePath) } configFile, _ := os.Open(configFilePath) decoder := json.NewDecoder(configFile) serverConfig := ServerConfig{} err := decoder.Decode(&serverConfig) if err != nil { log.Fatalf("Failed to decode config file: %s\nError: %v\n", configFilePath, err) } certPath := "" pkeyPath := "" if serverConfig.UseTLS { certPath = path.Join(serverConfig.CertificateDir, "cert.pem") pkeyPath = path.Join(serverConfig.CertificateDir, "key.pem") if _, err := os.Stat(certPath); os.IsNotExist(err) { log.Fatalf("No such cert file for tls: %s\n", certPath) } if _, err := os.Stat(pkeyPath); os.IsNotExist(err) { log.Fatalf("No such private key file for tls: %s\n", pkeyPath) } } redis_cli_pool, err = pool.New("tcp", serverConfig.RedisHost, 16) if err != nil { log.Fatal(err) } redis_cli, err := redis_cli_pool.Get() defer redis_cli_pool.Put(redis_cli) if serverConfig.UUID_TTL < UUID_TTL_MIN { serverConfig.UUID_TTL = UUID_TTL_MIN } else if serverConfig.UUID_TTL > UUID_TTL_MAX { serverConfig.UUID_TTL = UUID_TTL_MAX } redis_cli.Cmd("HMSET", "settings", "user_login_min_length", serverConfig.UserLoginMinLength, "user_password_min_length", serverConfig.UserPwdMinLength, "uuid_ttl", serverConfig.UUID_TTL, "banks_ico_dir", serverConfig.BanksIcoDir) preloadRedisScripts(redis_cli, serverConfig.RedisScriptsDir) REQ_RES_LOG_TTL = serverConfig.ReqResLogTTL router := mux.NewRouter() router.HandleFunc("/ping", handlerPing).Methods("GET") router.HandleFunc("/user", handlerUserCreate).Methods("POST") router.HandleFunc("/user", handlerUserDelete).Methods("DELETE") router.HandleFunc("/login", handlerUserLogin).Methods("POST") router.HandleFunc("/towns", handlerTownList).Methods("GET") router.HandleFunc("/towns", handlerTownsBatch).Methods("POST") router.HandleFunc("/regions", handlerRegions) router.HandleFunc("/town/{id:[0-9]+}", handlerTown) router.HandleFunc("/bank/{id:[0-9]+}", handlerBank) router.HandleFunc("/bank/{id:[0-9]+}/ico", handlerBankIco).Methods("GET") router.HandleFunc("/bank", handlerBankCreate).Methods("POST") router.HandleFunc("/banks", handlerBankList).Methods("GET") router.HandleFunc("/banks", handlerBanksBatch).Methods("POST") router.HandleFunc("/cashpoint", handlerCashpointCreate).Methods("POST") router.HandleFunc("/cashpoint/{id:[0-9]+}", handlerCashpoint) router.HandleFunc("/town/{town_id:[0-9]+}/bank/{bank_id:[0-9]+}/cashpoints", handlerCashpointsByTownAndBank) router.HandleFunc("/nearby/cashpoints", handlerNearbyCashPoints).Methods("POST") router.HandleFunc("/nearby/towns", handlerNearbyTowns).Methods("POST") port := ":" + strconv.FormatUint(serverConfig.Port, 10) log.Println("Listening 127.0.0.1" + port) server := &http.Server{ Addr: port, Handler: router, ReadTimeout: 10 * time.Second, WriteTimeout: 10 * time.Second, MaxHeaderBytes: 1 << 20, } if serverConfig.UseTLS { log.Println("Using TLS encryption") log.Println("Certificate path: " + certPath) log.Println("Private key path: " + pkeyPath) err = server.ListenAndServeTLS(certPath, pkeyPath) } else { err = server.ListenAndServe() } if err != nil { log.Fatal(err) } }