func TestKVIncrDecr(t *testing.T) { c := getTestConn() defer c.Close() if n, err := redis.Int64(c.Do("incr", "n")); err != nil { t.Fatal(err) } else if n != 1 { t.Fatal(n) } if n, err := redis.Int64(c.Do("incr", "n")); err != nil { t.Fatal(err) } else if n != 2 { t.Fatal(n) } if n, err := redis.Int64(c.Do("decr", "n")); err != nil { t.Fatal(err) } else if n != 1 { t.Fatal(n) } if n, err := redis.Int64(c.Do("incrby", "n", 10)); err != nil { t.Fatal(err) } else if n != 11 { t.Fatal(n) } if n, err := redis.Int64(c.Do("decrby", "n", 10)); err != nil { t.Fatal(err) } else if n != 1 { t.Fatal(n) } }
func (c RedisCache) Decrement(key string, delta uint64) (newValue uint64, err error) { conn := c.pool.Get() defer conn.Close() // Check for existence *before* increment as per the cache contract. // redis will auto create the key, and we don't want that, hence the exists call existed, err := exists(conn, key) if err != nil { return 0, err } else if !existed { return 0, ErrCacheMiss } // Decrement contract says you can only go to 0 // so we go fetch the value and if the delta is greater than the amount, // 0 out the value currentVal, err := redis.Int64(conn.Do("GET", key)) if err != nil { return 0, err } if delta > uint64(currentVal) { tempint, err := redis.Int64(conn.Do("DECRBY", key, currentVal)) return uint64(tempint), err } tempint, err := redis.Int64(conn.Do("DECRBY", key, delta)) return uint64(tempint), err }
func (c *RedisCache) Incrby(key string, n int64) (int64, error) { conn := c.ConnGet() defer conn.Close() if n == 0 { return redis.Int64(conn.Do("GET", key)) } return redis.Int64(conn.Do("INCRBY", key, n)) }
func (c *RedisCache) Hincrby(key, field string, n int64) (int64, error) { conn := c.ConnGet() defer conn.Close() if n == 0 { return redis.Int64(conn.Do("HGET", key, field)) } return redis.Int64(conn.Do("HINCRBY", key, field, n)) }
// GetTriggerThrottlingTimestamps get throttling or scheduled notifications delay for given triggerID func (connector *DbConnector) GetTriggerThrottlingTimestamps(triggerID string) (time.Time, time.Time) { c := connector.Pool.Get() defer c.Close() next, _ := redis.Int64(c.Do("GET", fmt.Sprintf("moira-notifier-next:%s", triggerID))) beginning, _ := redis.Int64(c.Do("GET", fmt.Sprintf("moira-notifier-throttling-beginning:%s", triggerID))) return time.Unix(next, 0), time.Unix(beginning, 0) }
// deregister a hipache service and MAYBE untag consul func (r *HipacheAdapter) doHipacheServiceDeRegister(service *bridge.Service, omitdc bool) error { //var ConsulName string conn := r.pool.Get() defer conn.Close() servicename := r.serviceEndpoint(service, omitdc) rediskey := "frontend:" + servicename target := r.serviceTarget(service) flen, err := redis.Int64(conn.Do("LLEN", rediskey)) if err != nil { return err } if flen > 0 { // frontend exists res, err := redis.Int64(conn.Do("LREM", rediskey, 0, target)) if err != nil { return err } if res < 1 { log.Print("hipache: that's odd -- the backend is already deregged: ", rediskey, target) } remlen, err := redis.Int64(conn.Do("LLEN", rediskey)) if err != nil { return err } if !omitdc && r.consul != nil { // for ONE of the dereg runs, if we are now empty, detag. if remlen < 2 { // only the identifier is left, or something really killed the key log.Print("all entries for frontend removed, detagging", rediskey) err = r.doConsulUnTagging(service) if err != nil { return err } } } if remlen < 2 { // key is done, nuke it log.Print("all entries for frontend removed, removing key", rediskey) _, err = conn.Do("DEL", rediskey) if err != nil { return err } } } else { // frontend key does not exist log.Print("hipache: that's strange -- you're asking us to dereg something already deregged:", rediskey) // detag just in case err = r.doConsulUnTagging(service) if err != nil { return err } } return nil }
// Get returns the limit for the identifier. func (s *RedisStore) Get(key string, rate Rate) (Context, error) { ctx := Context{} key = fmt.Sprintf("%s:%s", s.Prefix, key) c := s.Pool.Get() defer c.Close() if err := c.Err(); err != nil { return Context{}, err } exists, err := redis.Bool(c.Do("EXISTS", key)) if err != nil { return ctx, err } ms := int64(time.Millisecond) if !exists { c.Do("SET", key, 1, "EX", rate.Period.Seconds()) return Context{ Limit: rate.Limit, Remaining: rate.Limit - 1, Reset: (time.Now().UnixNano()/ms + int64(rate.Period)/ms) / 1000, Reached: false, }, nil } count, err := redis.Int64(c.Do("INCR", key)) if err != nil { return ctx, nil } ttl, err := redis.Int64(c.Do("TTL", key)) if err != nil { return ctx, nil } remaining := int64(0) if count < rate.Limit { remaining = rate.Limit - count } return Context{ Limit: rate.Limit, Remaining: remaining, Reset: time.Now().Add(time.Duration(ttl) * time.Second).Unix(), Reached: count > rate.Limit, }, nil }
// Returns the number of items in the queue func (q *delayedQueue) GetSize() (int64, error) { size, err := redis.Int64(q.conn.Do("ZCARD", q.GetIndexName())) if err != nil { return 0, err } return size, nil }
func TestAnalysePool(t *testing.T) { QueuesInPartision(1) Partitions([]string{testRedis}) redisdb := redisPool[0].conn redisdb.Do("DEL", "WAREHOUSE_0") AddTask(1, "start") AddTask(2, "start") AddTask(1, "stop") AddTask(2, "stop") analyzer := func(id int, msg_channel chan string, success chan bool, next chan bool) { for { select { case msg := <-msg_channel: if msg == "stop" { <-next success <- true return } } } } AnalysePool(1, 2, true, analyzer) r, e := redisdb.Do("LLEN", "WAREHOUSE_0") s, e := redis.Int64(r, e) if s != 0 { t.Error("Queue is not empty after processing tasks: ", s) } }
// Returns the number of items in the queue func (q *unackQueue) GetSize() (int64, error) { size, err := redis.Int64(q.conn.Do("HLEN", q.GetName())) if err != nil { return 0, err } return size, nil }
func (m *Manager) RemoveSubdomain(username, prefix string) error { domain, err := m.Domain(username, prefix) if err != nil { return err } // update r53 // this is the root subdomain if err := m.updateR53("DELETE", domain.Prefix, "A", []string{domain.Endpoint}, m.defaultTTL); err != nil { return err } // this is the wildcard if err := m.updateR53("DELETE", fmt.Sprintf("*.%s", domain.Prefix), "A", []string{domain.Endpoint}, m.defaultTTL); err != nil { return err } conn := m.pool.Get() defer conn.Close() // remove from alldomains if _, err := conn.Do("SREM", allDomainsKey, prefix); err != nil { return err } key := fmt.Sprintf("%s:%s:%s", domainsKey, username, prefix) res, err := redis.Int64(conn.Do("DEL", key)) if err != nil { return err } if res == 0 { return ErrDomainDoesNotExist } return nil }
// GetWithTime returns the value of the key if it is in the store // or -1 if it does not exist. It also returns the current time at // the redis server to microsecond precision. func (r *RedigoStore) GetWithTime(key string) (int64, time.Time, error) { var now time.Time key = r.prefix + key conn, err := r.getConn() if err != nil { return 0, now, err } defer conn.Close() conn.Send("TIME") conn.Send("GET", key) conn.Flush() timeReply, err := redis.Values(conn.Receive()) if err != nil { return 0, now, err } var s, us int64 if _, err := redis.Scan(timeReply, &s, &us); err != nil { return 0, now, err } now = time.Unix(s, us*int64(time.Microsecond)) v, err := redis.Int64(conn.Receive()) if err == redis.ErrNil { return -1, now, nil } else if err != nil { return 0, now, err } return v, now, nil }
func NodeInrc() (int64, error) { v, err := redis.Int64(rds.Do("INCR", "nodeid")) if err != nil { return 0, err } return v, nil }
func TestQueue_AnalysePool(t *testing.T) { var q Queue q.Urls([]string{testRedis}) redisdb := q.pool[0] redisdb.Do("FLUSHALL") q.QueueName = "CUSTOM" q.AddTask(1, "start") q.AddTask(2, "start") q.AddTask(1, "stop") q.AddTask(2, "stop") analyzer := func(id int, msg_channel chan string, success chan bool, next chan bool) { for { select { case msg := <-msg_channel: if msg == "stop" { <-next success <- true return } } } } exitOnEmpty := func() bool { return true } q.AnalysePool(1, exitOnEmpty, analyzer) r, e := redisdb.Do("LLEN", "QUEUE::0") s, e := redis.Int64(r, e) if s != 0 { t.Error("Queue is not empty after processing tasks: ", s) } }
func Test_MigrateAllKeysWithTTLs(t *testing.T) { ClearRedis() config = Config{ Source: sourceServer.url, Dest: destServer.url, Workers: 1, Batch: 10, Prefix: "bar", } for i := 0; i < 100; i++ { key := fmt.Sprintf("bar:%d", i) sourceServer.conn.Do("SET", key, i, "EX", 600) } RunAction(migrateKeys) for i := 0; i < 100; i++ { key := fmt.Sprintf("bar:%d", i) exists, _ := redis.Bool(destServer.conn.Do("EXISTS", key)) if !exists { t.Errorf("Could not find a key %d that should have been migrated", key) } ttl, _ := redis.Int64(destServer.conn.Do("PTTL", key)) if ttl < 1 || ttl > 600000 { t.Errorf("Could not find a TTL for key %d that should have been migrated", key) } } }
// IncrementWithExpire will increment a key in redis func (r *RedisStorageManager) IncrememntWithExpire(keyName string, expire int64) int64 { db := r.pool.Get() defer db.Close() log.Debug("Incrementing raw key: ", keyName) if db == nil { log.Info("Connection dropped, connecting..") r.Connect() r.IncrememntWithExpire(keyName, expire) } else { // This function uses a raw key, so we shouldn't call fixKey fixedKey := keyName val, err := redis.Int64(db.Do("INCR", fixedKey)) log.Debug("Incremented key: ", fixedKey, ", val is: ", val) if val == 1 { log.Debug("--> Setting Expire") db.Send("EXPIRE", fixedKey, expire) } if err != nil { log.Error("Error trying to increment value:", err) } return val } return 0 }
func main() { c, err := redis.Dial("tcp", ":6379") check(err) defer c.Close() n, err := c.Do("SET", "z", "0") check(err) fmt.Println(n) v, err := c.Do("GET", "z") check(err) fmt.Printf("value: %s\n", v) n, err = c.Do("SET", "z", 0) check(err) fmt.Println(n) v, err = redis.Int64(c.Do("GET", "z")) check(err) fmt.Println("value:", v) v, _ = c.Do("GET", "z") if str, ok := v.(string); ok { if num, _ := strconv.ParseInt(str, 10, 64); err != nil { fmt.Printf("value: %d\n", num) } } }
// Create a bucket. func (s *Storage) Create(name string, capacity uint, rate time.Duration) (leakybucket.Bucket, error) { conn := s.pool.Get() defer conn.Close() if count, err := redis.Uint64(conn.Do("GET", name)); err != nil { if err != redis.ErrNil { return nil, err } // return a standard bucket if key was not found return &bucket{ name: name, capacity: capacity, remaining: capacity, reset: time.Now().Add(rate), rate: rate, pool: s.pool, }, nil } else if ttl, err := redis.Int64(conn.Do("PTTL", name)); err != nil { return nil, err } else { b := &bucket{ name: name, capacity: capacity, remaining: capacity - min(capacity, uint(count)), reset: time.Now().Add(time.Duration(ttl * millisecond)), rate: rate, pool: s.pool, } return b, nil } }
// DEL() does the "DEL" command. // // Params: // conn: redis.Conn // keys: the keys to be deleted. // Return: // n: The number of keys that were removed. // err: nil if no error occurs or specified error otherwise. func DEL(conn redis.Conn, keys []string) (n int64, err error) { msg := "" if len(keys) == 0 { return 0, errors.New("no keys") } cmd := "DEL" args := []interface{}{} for _, k := range keys { if err := CheckKey(k); err != nil { return 0, err } else { args = append(args, k) } } if n, err = redis.Int64(conn.Do(cmd, args...)); err != nil { msg = fmt.Sprintf("conn.Do(%v, %v): err: %v\n", cmd, args, err) if DEBUG { fmt.Printf(msg) } return 0, err } return n, nil }
func (c *Client) GetConfig(option string) (string, error) { interf, err := c.Do("qless", 0, "config.get", timestamp(), option) if err != nil { return "", err } var contentStr string switch interf.(type) { case []uint8: contentStr, err = redis.String(interf, nil) case int64: var contentInt64 int64 contentInt64, err = redis.Int64(interf, nil) if err == nil { contentStr = strconv.Itoa(int(contentInt64)) } default: err = errors.New("The redis return type is not []uint8 or int64") } if err != nil { return "", err } return contentStr, err }
// SortedSetsUnion creates a combined set from given list of sorted set keys. // // See: http://redis.io/commands/zunionstore func (r *RedisSession) SortedSetsUnion(destination string, keys []string, weights []interface{}, aggregate string) (int64, error) { if destination == "" { return 0, ErrDestinationNotSet } lengthOfKeys := len(keys) if lengthOfKeys == 0 { return 0, ErrKeysNotSet } prefixed := []interface{}{ r.AddPrefix(destination), lengthOfKeys, } for _, key := range keys { prefixed = append(prefixed, r.AddPrefix(key)) } if len(weights) != 0 { prefixed = append(prefixed, "WEIGHTS") prefixed = append(prefixed, weights...) } if aggregate != "" { prefixed = append(prefixed, "AGGREGATE", aggregate) } return redis.Int64(r.Do("ZUNIONSTORE", prefixed...)) }
func (p *RedisStorage) GetInt(key string) (value int64, err error) { data, err := p.do("GET", key) if err != nil || data == nil { return } return redis.Int64(data, err) }
func (c *RedisStore) Increment(key string, delta uint64) (uint64, error) { conn := c.pool.Get() defer conn.Close() // Check for existance *before* increment as per the cache contract. // redis will auto create the key, and we don't want that. Since we need to do increment // ourselves instead of natively via INCRBY (redis doesn't support wrapping), we get the value // and do the exists check this way to minimize calls to Redis val, err := conn.Do("GET", key) if val == nil { return 0, ErrCacheMiss } if err == nil { currentVal, err := redis.Int64(val, nil) if err != nil { return 0, err } var sum int64 = currentVal + int64(delta) _, err = conn.Do("SET", key, sum) if err != nil { return 0, err } return uint64(sum), nil } else { return 0, err } }
// ReadUserChatId read user's chat id func ReadUserChatId(user string) int64 { c := Pool.Get() defer c.Close() key := user + "ChatId" id, _ := redis.Int64(c.Do("GET", key)) return id }
func HubInrc() (int64, error) { v, err := redis.Int64(rds.Do("INCR", "hubid")) if err != nil { return 0, err } return v, nil }
func ReadStructFromRedis(v interface{}, key string) { conn := Open() defer conn.Close() val := reflect.ValueOf(v).Elem() for i := 0; i < val.NumField(); i++ { valueField := val.Field(i) typeField := val.Type().Field(i) tag := typeField.Tag if !valueField.CanSet() { continue } switch typeField.Type.Kind() { case reflect.String: // log.Printf("redis HGET: %s, field: %s", key, tag.Get("json")) str, err := redis.String(conn.Do("HGET", key, tag.Get("json"))) if err != nil { continue } valueField.SetString(str) case reflect.Int64: integer, err := redis.Int64(conn.Do("HGET", key, tag.Get("json"))) // log.Printf("redis HGET: %s, field: %s", key, tag.Get("json")) if err != nil { continue } valueField.SetInt(integer) } } }
func AddUser(user *UserInfo) (rs uint32, err error) { conn := userRedisPool.Get() defer conn.Close() var nkey string = fmt.Sprintf("n_%s", user.Name) var exist bool var seq int64 if seq, err = redis.Int64(conn.Do("incr", UserIdSeq)); err != nil { return } if exist, err = redis.Bool(conn.Do("exists", nkey)); err != nil { return } if exist { err = UserExists return } var buf = make([]byte, 4) binary.LittleEndian.PutUint32(buf, uint32(seq)) if _, err = redis.String(conn.Do("set", nkey, buf)); err != nil { return } rs = uint32(seq) user.Id = rs key := fmt.Sprintf("u%d", rs) var val []byte if val, err = json.Marshal(user); err != nil { return } if _, err = redis.String(conn.Do("set", key, val)); err != nil { return } return }
func (m MRedis) pub(ch string, msg string) { c, err := redis.Dial("tcp", "192.168.176.3:6379", redis.DialReadTimeout(100*time.Second), redis.DialWriteTimeout(100*time.Second)) if err != nil { log.Fatal(err) } defer c.Close() log.Printf("ch:%s msg:%s\n", ch, msg) ok, err := redis.Int64(c.Do("PUBLISH", ch, msg)) if err != nil { log.Fatal(err) } fmt.Print(ok) /* var wg sync.WaitGroup wg.Add(2) go func() { defer wg.Done() for { switch n := psc.Receive().(type) { case redis.Message: fmt.Printf("Message: %s %s\n", n.Channel, n.Data) case redis.PMessage: fmt.Printf("PMessage: %s %s %s\n", n.Pattern, n.Channel, n.Data) case redis.Subscription: fmt.Printf("Subscription: %s %s %d\n", n.Kind, n.Channel, n.Count) if n.Count == 0 { return } case error: fmt.Printf("error: %v\n", n) return } } }() go func() { defer wg.Done() psc.Subscribe("example") psc.PSubscribe("p*") // The following function calls publish a message using another // connection to the Redis server. publish("example", "hello") publish("example", "world") publish("pexample", "foo") publish("pexample", "bar") // Unsubscribe from all connections. This will cause the receiving // goroutine to exit. psc.Unsubscribe() psc.PUnsubscribe() }() wg.Wait() return */ }
// IncrBy increments the value pointed by key with the delta, and return the new value. func (s *RedisStore) IncrBy(key string, delta int64) (newVal int64) { v, err := redis.Int64(s.c.Do("INCRBY", key, delta)) if err != nil { return 0 } return v }
func getPastNyanpass(c redis.Conn) int64 { var pastCount int64 var err error if pastCount, err = redis.Int64(c.Do("get", "count")); err != nil { pastCount = 0 } return pastCount }