func ScanMap(values []interface{}) (map[string]string, error) { results := make(map[string]string) var err error for len(values) > 0 { var key string var value string values, err = redis.Scan(values, &key) if err != nil { return nil, err } if len(values) > 0 { values, err = redis.Scan(values, &value) if err != nil { return nil, err } results[key] = value } else { fmt.Println("Unable to find value for %s.", key) results[key] = "" } } return results, nil }
// get redis's slot size func SlotsInfo(addr string, fromSlot, toSlot int) (map[int]int, error) { c, err := redis.DialTimeout("tcp", addr, defaultTimeout, defaultTimeout, defaultTimeout) if err != nil { return nil, err } defer c.Close() var reply []interface{} var val []interface{} reply, err = redis.Values(c.Do("SLOTSINFO", fromSlot, toSlot-fromSlot+1)) if err != nil { return nil, err } ret := make(map[int]int) for { if reply == nil || len(reply) == 0 { break } if reply, err = redis.Scan(reply, &val); err != nil { return nil, err } var slot, keyCount int _, err := redis.Scan(val, &slot, &keyCount) if err != nil { return nil, err } ret[slot] = keyCount } return ret, nil }
// get redis's slot size func SlotsInfo(addr string, fromSlot int, toSlot int, auth string) (map[int]int, error) { c, err := newRedisConn(addr, auth) if err != nil { return nil, errors.Trace(err) } defer c.Close() var ( reply []interface{} val []interface{} ) reply, err = redis.Values(c.Do("SLOTSINFO", fromSlot, toSlot-fromSlot+1)) if err != nil { return nil, errors.Trace(err) } ret := map[int]int{} for { if reply == nil || len(reply) == 0 { break } if reply, err = redis.Scan(reply, &val); err != nil { return nil, errors.Trace(err) } var slot, keyCount int _, err := redis.Scan(val, &slot, &keyCount) if err != nil { return nil, errors.Trace(err) } ret[slot] = keyCount } return ret, nil }
func SendOfflineMsg(client *Client, appid string) { // connect to Redis redisConn, err := redis.Dial("tcp", ":6379") if err != nil { log.Printf("Dial redix error: %s", err.Error()) return } // get the timestamp var reply []interface{} reply, err = redis.Values(redisConn.Do("TIME")) if err != nil { log.Printf("Error on TIME: %s", err.Error()) return } var current_time int64 _, err = redis.Scan(reply, ¤t_time) // get offline message for each App on this device key := "broadcast_msg:" + appid reply, err = redis.Values(redisConn.Do("ZRANGE", key, 0, -1)) if err != nil { log.Printf("Error on ZRANGE: %s", err.Error()) goto Out } for len(reply) > 0 { var msg_id int64 reply, err = redis.Scan(reply, &msg_id) if err != nil { log.Printf("Error on Scan ZRANGE reply: %s", err.Error()) goto Out } log.Printf("offline msg_id: %d", msg_id) key = "msg:" + strconv.FormatInt(msg_id, 10) var reply_msg []interface{} reply_msg, err = redis.Values(redisConn.Do("HMGET", key, "msg", "expire_time")) if err != nil { log.Printf("Error on HMGET: %s", err.Error()) goto Out } var msg string var expire_time int64 _, err = redis.Scan(reply_msg, &msg, &expire_time) if err != nil { log.Printf("Error on Scan HMGET reply: %s", err.Error()) goto Out } //log.Printf("expire_time: %d, msg: %s", expire_time, msg) if expire_time > current_time { // message hasn't expired, need to send it client.SendMsg(msg, appid) } } Out: redisConn.Close() }
func getGroupMembers(gid uint32) []uint64 { members := make([]uint64, 0, 3) if userRedisPool != nil { var err error var reply []interface{} conn := userRedisPool.Get() defer conn.Close() if reply, err = redis.Values(conn.Do("sdump", fmt.Sprintf("%dFT", gid))); err != nil { ERROR.Println(err) return members } if len(reply) == 0 { return members } var l int var bs []byte if _, err = redis.Scan(reply, &l, &bs); err != nil { ERROR.Println(err) return members } for i := 0; i < len(bs); i += l { var val uint64 val = uint64(binary.LittleEndian.Uint16(bs[i:])) members = append(members, val) } } return members }
func ValidateCurrentMaster() error { c, err := redis.Dial("tcp", GetSentinel()) if err != nil { return err } reply, err := redis.Values(c.Do("SENTINEL", "masters")) if err != nil { return err } var sentinel_info []string reply, err = redis.Scan(reply, &sentinel_info) if err != nil { return err } master_name := sentinel_info[1] ip := sentinel_info[3] port := sentinel_info[5] err = SwitchMaster(master_name, ip, port) return err }
// Incr increments the specified key. If the key did not exist, it sets it to 1 // and sets it to expire after the number of seconds specified by window. // // It returns the new count value and the number of remaining seconds, or an error // if the operation fails. func (r *redisStore) Incr(key string, window time.Duration) (int, int, error) { conn := r.pool.Get() defer conn.Close() if err := selectDB(r.db, conn); err != nil { return 0, 0, err } // Atomically increment and read the TTL. conn.Send("MULTI") conn.Send("INCR", r.prefix+key) conn.Send("TTL", r.prefix+key) vals, err := redis.Values(conn.Do("EXEC")) if err != nil { conn.Do("DISCARD") return 0, 0, err } var cnt, ttl int if _, err = redis.Scan(vals, &cnt, &ttl); err != nil { return 0, 0, err } // If there was no TTL set, then this is a newly created key (INCR creates the key // if it didn't exist), so set it to expire. if ttl == -1 { ttl = int(window.Seconds()) _, err = conn.Do("EXPIRE", r.prefix+key, ttl) if err != nil { return 0, 0, err } } return cnt, ttl, nil }
func (s *RedisStore) Scan(partition string) <-chan *bucket.Bucket { retBuckets := make(chan *bucket.Bucket) go func(out chan *bucket.Bucket) { rc := s.redisPool.Get() defer rc.Close() defer close(out) rc.Send("MULTI") rc.Send("SMEMBERS", partition) rc.Send("DEL", partition) reply, err := redis.Values(rc.Do("EXEC")) if err != nil { fmt.Printf("at=%q error=%s\n", "bucket-store-scan", err) return } var delCount int64 var members []string redis.Scan(reply, &members, &delCount) for _, member := range members { id, err := bucket.ParseId(member) if err != nil { fmt.Printf("at=%q error=%s\n", "bucket-store-parse-key", err) continue } out <- &bucket.Bucket{Id: id} } }(retBuckets) return retBuckets }
func (s *testProxyRouterSuite) TestMget(c *C) { cc := s.testDialConn(c, proxyAddr, proxyAuth) defer cc.Close() const count = 20480 keys := make([]interface{}, count) for i := 0; i < count; i++ { s := strconv.Itoa(i) keys[i] = s _, err := cc.Do("SET", s, s) c.Assert(err, IsNil) } reply, err := redis.Values(cc.Do("MGET", keys...)) c.Assert(err, IsNil) temp := make([]string, count) values := make([]interface{}, count) for i := 0; i < count; i++ { values[i] = &temp[i] } _, err = redis.Scan(reply, values...) c.Assert(err, IsNil) for i := 0; i < count; i++ { c.Assert(keys[i], Equals, temp[i]) } s.s1.store.Reset() s.s2.store.Reset() }
// Work runs an infinite loop, watching its database for new requests, starting job as requested, // moving stream data back and forth, and updating job status as it changes. func (w *Worker) Work() error { conn := w.pool.Get() defer conn.Close() for { Debugf("Waiting for job") // Get the list of current jobs // Wait for next start event vals, err := redis.Values(conn.Do("BLPOP", w.KeyPath("start"), "0")) if err != nil { return err } var id string if _, err := redis.Scan(vals[1:], &id); err != nil { return err } Debugf("Received instruction to start job %s", id) // Acquire lock on the job acquired, err := redis.Bool(conn.Do("SETNX", w.KeyPath(id), "me")) if err != nil { return err } Debugf("Acquiring lock for job %s... -> %s", id, acquired) // FIXME: set a dead man's switch with TTL & a periodic refresh if acquired { Debugf("Spawning goroutine for job %s", id) go func(id string) { if err := w.startJob(id); err != nil { fmt.Fprintf(os.Stderr, "Error starting job %s: %s\n", id, err) } }(id) } } }
func (c *RedisCache) key2Mode(key string, typ reflect.Type, val reflect.Value) error { conn := c.ConnGet() defer conn.Close() conn.Send("MULTI") vals := []interface{}{} timeField := []int{} for i := 0; i < typ.NumField(); i++ { conn.Send("HGET", key, typ.Field(i).Name) switch val.Field(i).Interface().(type) { case time.Time: timeField = append(timeField, i) var str string vals = append(vals, &str) default: vals = append(vals, val.Field(i).Addr().Interface()) } } reply, err := redis.Values(conn.Do("EXEC")) if err != nil { return err } if _, err := redis.Scan(reply, vals...); err == nil { var n int for _, n = range timeField { if time, e := time.Parse(time.RFC1123Z, string(reply[n].([]byte))); e == nil { val.Field(n).Set(reflect.ValueOf(time)) } } return nil } else { return err } }
// creates a status hash structure and returns it's status id. func createStatus(message string, uid int, c redis.Conn) (int, error) { var login string var sid int defer c.Close() c.Do("MULTI") // get login name for user with id c.Do("HGET", "user:"******"login") // get the incremented global status count c.Do("INCR", "status:id") // reply contains both the login for the uid and the global status count reply, err := redis.Values(c.Do("EXEC")) if err != nil { return -1, err } // scan reply into the local variables if _, err := redis.Scan(reply, &login, &sid); err != nil { return -1, err } // set all the appropriate values in the hash store if _, err := c.Do("HMSET", "status:"+strconv.Itoa(sid), "message", message, "posted", time.Now().Unix(), "id", sid, "uid", uid, "login", login); err != nil { return -1, err } // increment the user's post count if _, err := c.Do("HINCRBY", "user:"******"posts", 1); err != nil { return -1, err } return sid, nil }
// GetWithTime returns the value of the key if it is in the store // or -1 if it does not exist. It also returns the current time at // the redis server to microsecond precision. func (r *RedigoStore) GetWithTime(key string) (int64, time.Time, error) { var now time.Time key = r.prefix + key conn, err := r.getConn() if err != nil { return 0, now, err } defer conn.Close() conn.Send("TIME") conn.Send("GET", key) conn.Flush() timeReply, err := redis.Values(conn.Receive()) if err != nil { return 0, now, err } var s, us int64 if _, err := redis.Scan(timeReply, &s, &us); err != nil { return 0, now, err } now = time.Unix(s, us*int64(time.Microsecond)) v, err := redis.Int64(conn.Receive()) if err == redis.ErrNil { return -1, now, nil } else if err != nil { return 0, now, err } return v, now, nil }
func (sr *Runner) getChecks(maxChecks int, timeout int) []stalker.Check { log.Debugln("Getting checks off queue") checks := make([]stalker.Check, 0) expireTime := time.Now().Add(3 * time.Second).Unix() for len(checks) <= maxChecks { //we've exceeded our try time if time.Now().Unix() > expireTime { break } rconn := sr.rpool.Get() defer rconn.Close() res, err := redis.Values(rconn.Do("BLPOP", sr.conf.workerQueue, timeout)) if err != nil { if err != redis.ErrNil { log.Errorln("Error grabbing check from queue:", err.Error()) break } else { log.Debugln("redis result:", err) continue } } var rb []byte res, err = redis.Scan(res, nil, &rb) var check stalker.Check if err := json.Unmarshal(rb, &check); err != nil { log.Errorln("Error decoding check from queue to json:", err.Error()) break } checks = append(checks, check) } return checks }
// Info returns information about the lock. func (l *RedisLock) Info() (*LockInfo, error) { var owner, data string var expire int l.client.conn.Send("MULTI") l.client.conn.Send("GET", l.key()) l.client.conn.Send("PTTL", l.key()) l.client.conn.Send("GET", l.dataKey()) reply, err := redis.Values(l.client.conn.Do("EXEC")) if err == redis.ErrNil { return &LockInfo{l.name, false, "", time.Duration(0), ""}, nil } if err != nil { return nil, err } _, err = redis.Scan(reply, &owner, &expire, &data) if err != nil { return nil, err } ttl := time.Duration(expire) * time.Millisecond return &LockInfo{ Name: l.name, Acquired: ttl > 0, Owner: owner, TTL: ttl, Data: data, }, nil }
// return: success_count, remain_count, error // slotsmgrt host port timeout slotnum count func (m *migrater) sendRedisMigrateCmd(c redis.Conn, slotId int, toAddr string) (bool, error) { addrParts := strings.Split(toAddr, ":") if len(addrParts) != 2 { return false, ErrInvalidAddr } //use scan and migrate reply, err := redis.MultiBulk(c.Do("scan", 0)) if err != nil { return false, err } var next string var keys []interface{} if _, err := redis.Scan(reply, &next, &keys); err != nil { return false, err } for _, key := range keys { if _, err := c.Do("migrate", addrParts[0], addrParts[1], key, slotId, MIGRATE_TIMEOUT); err != nil { //todo, try del if key exists return false, err } } return next != "0", nil }
func ScanBuckets(mailbox string) <-chan *Bucket { buckets := make(chan *Bucket) go func(ch chan *Bucket) { defer utils.MeasureT("redis.scan-buckets", time.Now()) defer close(ch) rc := redisPool.Get() defer rc.Close() rc.Send("MULTI") rc.Send("SMEMBERS", mailbox) rc.Send("DEL", mailbox) reply, err := redis.Values(rc.Do("EXEC")) if err != nil { fmt.Printf("at=%q error=%s\n", "redset-smembers", err) return } var delCount int64 var members []string redis.Scan(reply, &members, &delCount) for _, member := range members { k, err := ParseKey(member) if err != nil { fmt.Printf("at=parse-key error=%s\n", err) continue } ch <- &Bucket{Key: *k} } }(buckets) return buckets }
func scanKeys(queue chan Task, wg *sync.WaitGroup) { cursor := 0 conn := sourceConnection(config.Source) key_search := fmt.Sprintf("%s*", config.Prefix) log.Println("Starting Scan with keys", key_search) for { // we scan with our cursor offset, starting at 0 reply, _ := redis.Values(conn.Do("scan", cursor, "match", key_search, "count", config.Batch)) var tmp_keys []string // this func name is confusing...it actually just converts array returns to Go values redis.Scan(reply, &cursor, &tmp_keys) // put this thing in the queue queue <- Task{list: tmp_keys} // check if we need to stop... if cursor == 0 { log.Println("Finished!") // close the channel close(queue) wg.Done() break } } }
func projectsList(key string, from, to int) (*[]*Project, error) { c := redisPool.Get() defer c.Close() recentList, err := redis.Values(c.Do("LRANGE", key, from, to)) if err != nil { return nil, err } var projects []*Project for len(recentList) > 0 { var pId string recentList, err = redis.Scan(recentList, &pId) if err != nil { return nil, err } project, err := Fetch(pId) if err != nil { log.Println(err) } projects = append(projects, project) } return &projects, nil }
// FetchEvent waiting for event from Db func (connector *DbConnector) FetchEvent() (*EventData, error) { c := connector.Pool.Get() defer c.Close() var event EventData rawRes, err := c.Do("BRPOP", "moira-trigger-events", 1) if err != nil { log.Warning("Failed to wait for event: %s", err.Error()) time.Sleep(time.Second * 5) return nil, nil } if rawRes != nil { var ( eventBytes []byte key []byte ) res, _ := redis.Values(rawRes, nil) if _, err = redis.Scan(res, &key, &eventBytes); err != nil { log.Warning("Failed to parse event: %s", err.Error()) return nil, err } if err := json.Unmarshal(eventBytes, &event); err != nil { log.Error(fmt.Sprintf("Failed to parse event json %s: %s", eventBytes, err.Error())) return nil, err } return &event, nil } return nil, nil }
// Return a JSON with the ids and names of the Pingdom checks func getJsonChecks(redisResponse []interface{}) []byte { conn := redisPool.Get() // Redis connection to get the names for the ids checks := make([]CheckType, len(redisResponse)) for k, _ := range redisResponse { v := "" redisResponse, _ = redis.Scan(redisResponse, &v) id, _ := strconv.ParseInt(v, 10, 64) // Get the name corresponding to the id n, err := redis.String(conn.Do("GET", "check:"+v)) for { if err == nil { break } else { log.Printf("Redis error in GET check: %s\n", err) n, err = redis.String(conn.Do("GET", "check:"+v)) } } checks[k] = CheckType{id, n} } conn.Close() b, _ := json.MarshalIndent(Checks{checks}, "", " ") return b }
func SlotsInfo(addr, passwd string, fromSlot, toSlot int) (map[int]int, error) { c, err := DialTo(addr, passwd) if err != nil { return nil, err } defer c.Close() infos, err := redis.Values(c.Do("SLOTSINFO", fromSlot, toSlot-fromSlot+1)) if err != nil { return nil, errors.Trace(err) } slots := make(map[int]int) if infos != nil { for i := 0; i < len(infos); i++ { info, err := redis.Values(infos[i], nil) if err != nil { return nil, errors.Trace(err) } var slotid, slotsize int if _, err := redis.Scan(info, &slotid, &slotsize); err != nil { return nil, errors.Trace(err) } else { slots[slotid] = slotsize } } } return slots, nil }
func LoadUserAccessToken(token string) (int64, int64, string, error) { conn := redis_pool.Get() defer conn.Close() key := fmt.Sprintf("access_token_%s", token) var uid int64 var appid int64 var uname string exists, err := redis.Bool(conn.Do("EXISTS", key)) if err != nil { return 0, 0, "", err } if !exists { return 0, 0, "", errors.New("token non exists") } reply, err := redis.Values(conn.Do("HMGET", key, "user_id", "app_id", "user_name")) if err != nil { log.Info("hmget error:", err) return 0, 0, "", err } _, err = redis.Scan(reply, &uid, &appid, &uname) if err != nil { log.Warning("scan error:", err) return 0, 0, "", err } return appid, uid, uname, nil }
func (db *Database) getDoc(c redis.Conn, path string) (*doc.Package, time.Time, error) { r, err := redis.Values(getDocScript.Do(c, path)) if err == redis.ErrNil { return nil, time.Time{}, nil } else if err != nil { return nil, time.Time{}, err } var p []byte var t int64 if _, err := redis.Scan(r, &p, &t); err != nil { return nil, time.Time{}, err } p, err = snappy.Decode(nil, p) if err != nil { return nil, time.Time{}, err } var pdoc doc.Package if err := gob.NewDecoder(bytes.NewReader(p)).Decode(&pdoc); err != nil { return nil, time.Time{}, err } nextCrawl := pdoc.Updated if t != 0 { nextCrawl = time.Unix(t, 0).UTC() } return &pdoc, nextCrawl, err }
func (s *QueueSubscriber) redisPop(ch chan response) { var res response reply, err := redis.Values(s.conn.Do(QueueSubCmd, s.queue, 0)) if err != nil { res.err = err ch <- res return } var qName string var msg string if _, err := redis.Scan(reply, &qName, &msg); err != nil { res.err = err ch <- res return } // base64 decode msg b, err := base64.StdEncoding.DecodeString(msg) if err != nil { res.err = err ch <- res return } res.b = b ch <- res }
func (db *Database) ImportGraph(pdoc *doc.Package, level DepLevel) ([]Package, [][2]int, error) { // This breadth-first traversal of the package's dependencies uses the // Redis pipeline as queue. Links to packages with invalid import paths are // only included for the root package. c := db.Pool.Get() defer c.Close() if err := importGraphScript.Load(c); err != nil { return nil, nil, err } nodes := []Package{{Path: pdoc.ImportPath, Synopsis: pdoc.Synopsis}} edges := [][2]int{} index := map[string]int{pdoc.ImportPath: 0} for _, path := range pdoc.Imports { if level >= HideStandardAll && isStandardPackage(path) { continue } j := len(nodes) index[path] = j edges = append(edges, [2]int{0, j}) nodes = append(nodes, Package{Path: path}) importGraphScript.Send(c, path) } for i := 1; i < len(nodes); i++ { c.Flush() r, err := redis.Values(c.Receive()) if err == redis.ErrNil { continue } else if err != nil { return nil, nil, err } var synopsis, terms string if _, err := redis.Scan(r, &synopsis, &terms); err != nil { return nil, nil, err } nodes[i].Synopsis = synopsis for _, term := range strings.Fields(terms) { if strings.HasPrefix(term, "import:") { path := term[len("import:"):] if level >= HideStandardDeps && isStandardPackage(path) { continue } j, ok := index[path] if !ok { j = len(nodes) index[path] = j nodes = append(nodes, Package{Path: path}) importGraphScript.Send(c, path) } edges = append(edges, [2]int{i, j}) } } } return nodes, edges, nil }
func TestScanConversionError(t *testing.T) { for _, tt := range scanConversionErrorTests { values := []interface{}{tt.src} dest := reflect.New(reflect.TypeOf(tt.dest)) values, err := redis.Scan(values, dest.Interface()) if err == nil { t.Errorf("Scan(%v) did not return error", tt) } } }
/* Get Session - auto create Session and Cookie if not found */ func (conn *SessionConnect) Session(w http.ResponseWriter, r *http.Request) *SessionCookie { // New cookie object t_sess := new(SessionCookie) t_sess.name = conn.session_id t_sess.values = make(map[string]interface{}) // Getting cookie cookie, err := r.Cookie(t_sess.name) if err != http.ErrNoCookie && err != nil { log.Printf("%s", err) } if cookie == nil { // Setting new cookie, no cookie found n_cookie := &http.Cookie{ Name: t_sess.name, Value: get_random_value(), Path: "/", MaxAge: Expire, Expires: time.Unix(time.Now().Unix()+int64(Expire), 0), } t_sess.cookie = n_cookie } else { // Cookie found, getting data from Redis t_sess.cookie = cookie do_req, err := clredis.Do("HGETALL", Prefix+t_sess.cookie.Value) if err != nil { log.Printf("%s", err) } v, err := redis.Values(do_req, err) if err != nil { log.Printf("%s", err) } for len(v) > 0 { var key, value string values, err := redis.Scan(v, &key, &value) if err != nil { log.Printf("%s", err) } v = values t_sess.values[key] = value } // reset expiration expire_sess(t_sess) } // Set coookie http.SetCookie(w, t_sess.cookie) // return SessionCookie instance return t_sess }
func (s *SortedSetType) All() SortedSetItems { reply, err := s.client.do("ZRANGE", s.Key, 0, -1, "WITHSCORES") values, err := redis.Values(reply, err) if err != nil { return SortedSetItems{} } items := make([]SortedSetItem, len(values)/2) for i := range items { values, _ = redis.Scan(values, &items[i].Member, &items[i].Score) } return SortedSetItems(items) }
func ShowOrdersController(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { r.ParseForm() var accessToken string if len(r.Form["access_token"]) > 0 { accessToken = r.Form["access_token"][0] } else { accessToken = r.Header.Get("Access-Token") } if len(accessToken) <= 0 { w.WriteHeader(401) fmt.Fprintf(w, `{"code": "INVALID_ACCESS_TOKEN", "message": "无效的令牌"}`) return } lua := `local uid = redis.call("get", "u:"..KEYS[1]) local oid = redis.call("get", "u:o:"..uid) if oid then local menu = redis.call("hgetall", "o:f:"..oid) return {uid, oid, menu} end` var getScript = redis.NewScript(1, lua) c := rp.Get() reply, _ := redis.Values(getScript.Do(c, accessToken)) var json bytes.Buffer if len(reply) != 0 { var userId, orderId string var items []interface{} redis.Scan(reply, &userId, &orderId, &items) var total int = 0 var j []string v, _ := redis.Ints(items, nil) for i := 0; i < len(v); i += 2 { fid := v[i] cnt := v[i+1] total += gfoods[fid].Price * cnt j = append(j, fmt.Sprintf(`{"food_id": %d, "count": %d}`, fid, cnt)) } json.WriteString("[{") json.WriteString(fmt.Sprintf(`"id": "%s", `, orderId)) json.WriteString(fmt.Sprintf(`"items": [%s]`, strings.Join(j, ","))) json.WriteString(fmt.Sprintf(`,"total": %d`, total)) json.WriteString("}]") } else { json.WriteString("{}") } w.WriteHeader(200) fmt.Fprintf(w, json.String()) }