func clearRedis(t *testing.T, c redis.Conn, key string) { reply, err := c.Do("DEL", key) _, err = redis.Int(reply, err) if nil != err { t.Logf("DEL %s failed, %v", key, err) } }
// QueueDocsetJob will queue a job to build a docset for an artifact, if there // is not yet one built. func QueueDocsetJob(groupId, artifactId string, version string) error { var redisConn redis.Conn = redisconn.Get() id := groupId + ":" + artifactId exists, err := redis.Bool(redisConn.Do("SISMEMBER", "docsets", id)) if err != nil { return err } if exists == true && version != "" { verExists, err := redis.Bool(redisConn.Do("SISMEMBER", "docset:"+id, version)) if err != nil || verExists { return err } } else if exists == true { return nil } if err := QueueJob(map[string]string{ "Job": "build-docset", "ArtifactId": artifactId, "GroupId": groupId, "Version": version, }); err != nil { return err } return nil }
func PingRedis(c redis.Conn, t time.Time) error { _, err := c.Do("ping") if err != nil { log.Println("[ERROR] ping redis fail", err) } return err }
func (self *SortedSetGenerator) checkZscore(conn redis.Conn, reqnum int32, mytime *time.Timer) { //执行req_num次,重复利用args数组中的数据,直到执行了req_num次结束 length := int32(len(self.data_list)) var i int32 = 0 for i < reqnum { values := self.data_list[i%length] key := values.key for _, value := range values.sortedlist { select { case <-mytime.C: return default: if i > reqnum { break } i++ //log.Info("redis zscore ops: [%d:%d]", key, value) reporter.Resultdata.AddSendQuantity() _, err := conn.Do("zscore", key, value) if err != nil { log.Error("redis zscore failed: [%d:%d],err %v", key, value, err) reporter.Resultdata.AddFailQuantity() } else { reporter.Resultdata.AddSuccQuantity() } } } } }
func createShortURL(url string, conn redis.Conn) (Data, error) { var d Data count, err := redis.Int(conn.Do("INCR", "global:size")) if err != nil { log.Print(err) return d, err } log.Print("Total: ", count) encodedVar := base62.EncodeInt(int64(count)) key := strings.Join([]string{encodedVar, url}, "||") conn.Send("MULTI") conn.Send("HSET", key, "count", 0) _, err2 := conn.Do("EXEC") if err2 != nil { log.Print(err2) return d, err2 } d.Original = url d.HitCount = 0 d.Short = encodedVar d.FullShort = strings.Join([]string{*base, encodedVar}, "") return d, err }
func (self *SortedSetGenerator) checkZadd(conn redis.Conn, reqnum int32, mytime *time.Timer) { //执行req_num次,重复利用args数组中的数据,直到执行了req_num次结束 length := int32(len(self.data_list)) var i int32 = 0 var testquantity int32 = 10 var index int = 0 for ; i < reqnum; i++ { select { case <-mytime.C: return default: values := self.data_list[testquantity%length] key := values.key data := values.sortedlist[index%len(values.sortedlist)] //log.Info("redis zadd ops: [%d:%d:%d]", key, index, data) reporter.Resultdata.AddSendQuantity() _, err := redis.Int(conn.Do("zadd", key, index, data)) if err != nil { log.Error("redis zadd failed: [%d:%d],err %v", values.key, data, err) reporter.Resultdata.AddFailQuantity() } else { reporter.Resultdata.AddSuccQuantity() //log.Error("redis set failed: %v", values) } testquantity++ if testquantity%length == 0 { index++ } } } }
func (self *SortedSetGenerator) checkZcount(conn redis.Conn, req_num int32, mytime *time.Timer) { var pos int32 = 0 for _, value := range self.data_list { select { case <-mytime.C: return default: if pos >= req_num { break } reporter.Resultdata.AddSendQuantity() reply, err := redis.Int(conn.Do("zcount", value.key, "-inf", "+inf")) //log.Info("redis operating: zcount %d -inf +inf", value.key) if err != nil { log.Error("redis operating: zcount %d error %v", value.key, err) reporter.Resultdata.AddFailQuantity() } else { if reply > 0 { reporter.Resultdata.AddSuccQuantity() } else { reporter.Resultdata.AddErrorQuantity() } } pos++ } } }
func popMessage(conn redis.Conn, key string) (*Message, error) { reply, err := redis.MultiBulk(conn.Do("BLPOP", key, DEFAULTTIMEOUT)) if err != nil { return nil, err } return parseMessage(reply) }
func DrainStore(r redis.Conn, id string, secret string, body string) bool { // STABLE AS F**K if !strings.Contains(body, "host heroku router - at=info method=") { return false } key := fmt.Sprintf("auth-%s", id) existing, _ := redis.String(r.Do("GET", key)) if existing != secret { return false } request := make(map[string]string) request["app_id"] = id matchVerb, _ := regexp.Compile(`method=(\w+)`) request["verb"] = matchVerb.FindStringSubmatch(body)[1] matchPath, _ := regexp.Compile(`path=([^ ]+)`) request["path"] = matchPath.FindStringSubmatch(body)[1] matchStatus, _ := regexp.Compile(`status=(\d+)`) request["status"] = matchStatus.FindStringSubmatch(body)[1] raw, err := json.Marshal(request) if err != nil { panic(err) } r.Do("RPUSH", "requests", raw) return true }
func (self *HashGenerator) hdel(conn redis.Conn, reqnum int32, mytime *time.Timer) { length := int32(len(self.data_list)) var i, j int32 = 0, 0 for ; j < reqnum; i++ { values := self.data_list[i%length] for _, data := range values.names { if j > reqnum { break } j++ select { case <-mytime.C: return default: reporter.Resultdata.AddSendQuantity() _, err := redis.Int(conn.Do("hdel", values.key, data.name)) if err != nil { log.Error("redis hdel failed: [%s:%s],err %v", values.key, data.name, err) reporter.Resultdata.AddFailQuantity() } else { reporter.Resultdata.AddSuccQuantity() //datacheck := &reporter.ObjChecker{1, reply} //reporter.Datasummer.AddChecker(datacheck) } } } } }
func (r *RedisBackend) testOnBorrow(c redis.Conn, t time.Time) error { _, err := c.Do("PING") if err != nil { defer c.Close() } return err }
func (self *HashGenerator) hkeys(conn redis.Conn, reqnum int32, mytime *time.Timer) { length := int32(len(self.data_list)) var i int32 = 0 for ; i < reqnum; i++ { select { case <-mytime.C: return default: values := self.data_list[i%length] key := values.key length := len(values.names) name_slice := make([]string, length) for pos, data := range values.names { name_slice[pos] = data.name } reporter.Resultdata.AddSendQuantity() reply, err := redis.Strings(conn.Do("hkeys", key)) if err != nil { log.Error("redis lpush failed: [%s],err %v", key, err) reporter.Resultdata.AddFailQuantity() } else { //reporter.Resultdata.AddSuccQuantity() datacheck := &reporter.StringArrayChecker{name_slice, reply} reporter.Datasummer.AddChecker(datacheck) } } } }
// key name value func (self *HashGenerator) hset(conn redis.Conn, reqnum int32, mytime *time.Timer) { length := int32(len(self.data_list)) var i int32 = 0 for ; i < reqnum; i++ { values := self.data_list[i%length] key := values.key for _, data := range values.names { select { case <-mytime.C: return default: reporter.Resultdata.AddSendQuantity() _, err := redis.Int(conn.Do("hset", key, data.name, data.value)) if err != nil { log.Error("redis lpush failed: [%s:%s:%d],err %v", key, data.name, data.value, err) reporter.Resultdata.AddFailQuantity() } else { reporter.Resultdata.AddSuccQuantity() //log.Error("redis lpush failed: %v", values) } } } } }
// findGame finds a game in the list of open games. If one doesn't exist, creates a new gameid // returns a new Game and if it's a new game or not. func findGame(ctx context.Context, con redis.Conn) (*Game, bool, error) { lc := "FindGame" // do we have an open game? gameID, err := redis.String(con.Do("RPOP", openGames)) // ignore nil errors, since that is expected if err != nil && err != redis.ErrNil { logger.Error(ctx, lc, "Error finding open game: %v", err) return new(Game), false, err } // is this a brand new game? isNew := (gameID == "") if isNew { logger.Info(ctx, lc, "Could not find open game, creating one... ") u, err := uuid.NewV4() if err != nil { return nil, false, err } gameID = u.String() } return NewGame(gameID), isNew, nil }
func (self *StringGenerator) checkGet(conn redis.Conn, reqnum int32, mytime *time.Timer) { //执行req_num次,重复利用args数组中的数据,直到执行了req_num次结束 length := int32(len(self.data_list)) var i int32 = 0 for ; i < reqnum; i++ { select { case <-mytime.C: return default: values := self.data_list[i%length] reporter.Resultdata.AddSendQuantity() //reply, err := redis.String(conn.Do("get", values.key)) _, err := conn.Do("get", values.key) //log.Info("redis operating: get %s", values.key) if err != nil { //log.Error("redis %s failed: %v, %v", cmd, values, err) reporter.Resultdata.AddFailQuantity() } else { /*if values.value == reply { reporter.Resultdata.AddSuccQuantity() } else { reporter.Resultdata.AddErrorQuantity() //log.Error("redis set failed: %v", values) }*/ reporter.Resultdata.AddSuccQuantity() } } } }
func findSession(w http.ResponseWriter, r *http.Request, rc redis.Conn) (*Session, error) { session := new(Session) usertokenCookie, err := r.Cookie("usertoken") if err != nil { return session, lwutil.NewErr(err) } usertoken := usertokenCookie.Value //redis if rc == nil { rc = redisPool.Get() defer rc.Close() } sessionBytes, err := redis.Bytes(rc.Do("get", fmt.Sprintf("sessions/%s", usertoken))) if err != nil { return session, lwutil.NewErr(err) } err = json.Unmarshal(sessionBytes, session) lwutil.CheckError(err, "") //update session dt := time.Now().Sub(session.Born) if dt > sessionUpdateSecond*time.Second { newSession(w, session.Userid, session.Username, session.Appid, rc) } return session, nil }
func (self *StringGenerator) runDel(conn redis.Conn, req_num int32, mytime *time.Timer) { var pos int32 = 0 for _, values := range self.data_list { if pos >= req_num { break } reporter.Resultdata.AddSendQuantity() reply, err := redis.Int(conn.Do("del", values.key)) //log.Info("redis operating: del %s", values.key) if err != nil { //log.Error("redis %s failed: %v, %v", cmd, values, err) reporter.Resultdata.AddFailQuantity() } else { if reply > 0 { reporter.Resultdata.AddSuccQuantity() } else { reporter.Resultdata.AddErrorQuantity() } } pos++ } /* if pos >= req_num { log.Info(" %d > %d, end del", pos, req_num) } else { log.Info("del all data in data_list") }*/ }
func (p *poller) getJob(conn redis.Conn) (*job, error) { for _, queue := range p.queues(p.isStrict) { logger.Debugf("Checking %s", queue) reply, err := conn.Do("LPOP", fmt.Sprintf("%squeue:%s", namespace, queue)) if err != nil { return nil, err } if reply != nil { logger.Debugf("Found job on %s", queue) job := &job{Queue: queue} decoder := json.NewDecoder(bytes.NewReader(reply.([]byte))) if useNumber { decoder.UseNumber() } if err := decoder.Decode(&job.Payload); err != nil { return nil, err } return job, nil } } return nil, nil }
func (self *SortedSetGenerator) checkZrevrange(conn redis.Conn, req_num int32, mytime *time.Timer) { var pos int32 = 0 for _, value := range self.data_list { select { case <-mytime.C: return default: if pos >= req_num { break } reporter.Resultdata.AddSendQuantity() reply, err := redis.Values(conn.Do("zrevrange", value.key, 0, ZRANGE_100)) //log.Info("redis operating: zrevrange %d 0 %d", value.key, ZRANGE_100) if err != nil { log.Info("redis operating: zrevrange %d 0 %d error %v", value.key, ZRANGE_100, err) reporter.Resultdata.AddFailQuantity() } else { if len(reply) > 0 { reporter.Resultdata.AddSuccQuantity() } else { reporter.Resultdata.AddErrorQuantity() } } pos++ } } }
// load sbf header from redis func LoadHeader(conn redis.Conn, refer string) (*SBFHeader, error) { if ret, err := redis.Bytes(conn.Do("GETRANGE", refer, 0, SBF_HEADER_SIZE-1)); err == nil { if len(ret) > 0 { header := new(SBFHeader) copy(header.Name[:], ret[0:3]) copy(header.Version[:], ret[3:8]) // from bytes to number if err := BytesToNumber(ret[8:10], &header.Count); err != nil { return nil, err } if err := BytesToNumber(ret[10:12], &header.FullRate); err != nil { return nil, err } if err := BytesToNumber(ret[12:14], &header.SliceCount); err != nil { return nil, err } if err := BytesToNumber(ret[14:18], &header.SliceSize); err != nil { return nil, err } header.Refer = refer return header, nil } else { return nil, errors.New(fmt.Sprintf("SBF %s NOT FOUND.", refer)) } } else { return nil, err } }
func (self *SortedSetGenerator) checkZrange(conn redis.Conn, req_num int32, range_data int32, mytime *time.Timer) { var pos int32 = 0 for ; pos < req_num; pos++ { select { case <-mytime.C: return default: value := self.data_list[pos%ZRANGE_ALL] reporter.Resultdata.AddSendQuantity() reply, err := redis.Values(conn.Do("ZRANGE", value.key, 0, range_data)) //log.Info("redis operating: zrange %d 0 %d", value.key, range_data) if err != nil { log.Info("redis operating: zrange %d 0 %d error %v", value.key, range_data, err) reporter.Resultdata.AddFailQuantity() } else { //log.Info(" zrange %d 0 %d ret:%v", value.key, range_data, reply) /*type IntArrayChecker struct { myuid []int64 youruid []interface{} }*/ datacheck := &reporter.IntArrayChecker{Myuid: value.sortedlist, Youruid: reply} reporter.Datasummer.AddChecker(datacheck) } } } }
// DEL() does the "DEL" command. // // Params: // conn: redis.Conn // keys: the keys to be deleted. // Return: // n: The number of keys that were removed. // err: nil if no error occurs or specified error otherwise. func DEL(conn redis.Conn, keys []string) (n int64, err error) { msg := "" if len(keys) == 0 { return 0, errors.New("no keys") } cmd := "DEL" args := []interface{}{} for _, k := range keys { if err := CheckKey(k); err != nil { return 0, err } else { args = append(args, k) } } if n, err = redis.Int64(conn.Do(cmd, args...)); err != nil { msg = fmt.Sprintf("conn.Do(%v, %v): err: %v\n", cmd, args, err) if DEBUG { fmt.Printf(msg) } return 0, err } return n, nil }
func (self *SortedSetGenerator) checkZscoreOk(conn redis.Conn, reqnum int32, mytime *time.Timer) { length := int32(len(self.data_list)) var i int32 = 0 var index int32 = -1 for ; i < reqnum; i++ { if i%length == 0 { index++ } values := self.data_list[i%length] key := values.key data := values.sortedlist[index] //log.Info("redis zscore ops: [%d:%d]", key, data) reporter.Resultdata.AddSendQuantity() _, err := redis.Int(conn.Do("zscore", key, data)) if err != nil { log.Error("redis zscore failed: [%d:%d],err %v", key, data, err) reporter.Resultdata.AddFailQuantity() } else { reporter.Resultdata.AddSuccQuantity() } if i%length == 0 { index++ } } }
// HMSET() does the "HMSET" command. // // Params: // conn: redis.Conn. // key: key to store the hash. // m: map contains the specified fields and their respective values. // Return: // nil if no error occurs or specified error otherwise. func HMSET(conn redis.Conn, key string, m map[string]string) error { msg := "" if err := CheckKey(key); err != nil { return err } if err := CheckMap(m); err != nil { return err } cmd := "HMSET" args := []interface{}{} args = append(args, key) for k, v := range m { args = append(args, k, v) } if _, err := conn.Do(cmd, args...); err != nil { msg = fmt.Sprintf("conn.Do(%v, %v): err: %v\n", cmd, args, err) if DEBUG { fmt.Printf(msg) } return err } return nil }
func getLongURL(short string, conn redis.Conn) (Data, error) { var d Data search := strings.Join([]string{short, "||*"}, "") fmt.Println(search) n, err := redis.Strings(conn.Do("KEYS", search)) if err != nil { log.Print(err) return d, err } if len(n) < 1 { log.Print("Nothing Found") } else { parts := strings.Split(n[0], "||") d.Short = parts[0] d.Original = parts[1] d.FullShort = strings.Join([]string{*base, parts[0]}, "") newCount, err := redis.Int(conn.Do("HINCRBY", n[0], "count", 1)) if err != nil { log.Println(err) } d.HitCount = newCount } log.Println("Served: ", d.Original) return d, nil }
func (tc *ExtraIncrTestCase) groupfetch(c1, c2 redis.Conn, key string) int { r1, e1 := c1.Do("get", key) r2, e2 := c2.Do("get", key) if e1 != nil || e2 != nil { Panic("groupfetch key = %s, e1 = %s, e2 = %s", key, e1, e2) } if r1 == nil && r2 == nil { Panic("groupfetch key = %s, r1 == nil && r2 == nil", key) } if r1 != nil && r2 != nil { Panic("groupfetch key = %s, r1 != nil && r2 != nil, %v %v", key, r1, r2) } if r1 != nil { if v, err := redis.Int(r1, nil); err != nil { Panic("groupfetch key = %s, error = %s", key, err) } else { return v } } if r2 != nil { if v, err := redis.Int(r2, nil); err != nil { Panic("groupfetch key = %s, error = %s", key, err) } else { return v } } return -1 }
func RedisDo(commandName string, args ...interface{}) (interface{}, error) { var redisConn redis.Conn var err error for i := 0; i < redisRetryCount; i++ { if redisConn, err = redis.Dial("tcp", redisAddress); err != nil { fog.Warn("redis.Dial: %s", err) time.Sleep(5 * time.Second) continue } result, err := redisConn.Do(commandName, args...) redisConn.Close() if err != nil { fog.Warn("RedisDo: %s", err) time.Sleep(1 * time.Second) continue } return result, nil } return nil, fmt.Errorf("RedisDo: failed after %d retries %s", redisRetryCount, err) }
func (out *RedisOutput) UpdateLocalTopologyMap(conn redis.Conn) { TopologyMapTmp := make(map[string]string) hostnames, err := redis.Strings(conn.Do("KEYS", "*")) if err != nil { logp.Err("Fail to get the all shippers from the topology map %s", err) return } for _, hostname := range hostnames { res, err := redis.String(conn.Do("HGET", hostname, "ipaddrs")) if err != nil { logp.Err("[%s] Fail to get the IPs: %s", hostname, err) } else { ipaddrs := strings.Split(res, ",") for _, addr := range ipaddrs { TopologyMapTmp[addr] = hostname } } } out.TopologyMap = TopologyMapTmp logp.Debug("output_redis", "Topology %s", TopologyMapTmp) }
func (task *FlushTask) readValuesFromRedis(items []dbproto.FlushItem) ([][]byte, error) { // TODO: // use MGET for performace count := len(items) values := make([][]byte, count) var err error var conn redis.Conn for i := 0; i < count; i++ { redis_key := items[i].GetMsgType() + "_" + items[i].GetKey() conn = task.getRedisConn(items[i].GetKey()) values[i], err = redis.Bytes(conn.Do("GET", redis_key)) if err != nil { logger.Printf("Read Key %s but ERROR: redis: %v", redis_key, err) values[i] = nil // if no data from redis, just ignore if err == redis.ErrNil { continue } // if other errors, return return nil, err } } return values, nil }
//创建一个新的redis连接 func NewRedisCache(ip string, port string, password string) (*RedisCache, error) { var ( c redis.Conn err error ) c, err = redis.DialTimeout("tcp", ip+":"+port, 0, 1*time.Second, 1*time.Second) if err != nil { glog.Error("Error:", err) return nil, err } if password != "" { _, err = c.Do("AUTH", password) if err != nil { c.Close() glog.Error("Error:", err) return nil, err } } return &RedisCache{ session: c, }, err }