func (r *Redis) Set(key string, value interface{}, exp ...time.Duration) error { conn := r.pool.Get() defer conn.Close() switch v := value.(type) { case Marshaler: data, err := v.MarshalCache() if err != nil { return err } value = data case encoding.BinaryMarshaler: data, err := v.MarshalBinary() if err != nil { return err } value = data } args := redis.Args{}.Add(key).Add(value) if hasExpire(exp) { ms := int(exp[0] / time.Millisecond) args = args.Add("PX").Add(ms) } _, err := conn.Do("SET", args...) return err }
// Enqueue an already existing job by jobId. This can be used for fast retries func (c *RedisClient) Enqueue(jobIds ...string) error { args := redis.Args{} args.AddFlat(jobIds) _, err := c.conn.Do("ENQUEUE", args) return err }
func TestDeleteModelsBySetIdsScript(t *testing.T) { testingSetUp() defer testingTearDown() // Create and save some test models models, err := createAndSaveTestModels(5) if err != nil { t.Fatalf("Unexpected error saving test models: %s", err.Error()) } // The set of ids will contain three valid ids and two invalid ones ids := []string{} for _, model := range models[:3] { ids = append(ids, model.ModelId()) } ids = append(ids, "foo", "bar") tempSetKey := "testModelIds" conn := testPool.NewConn() defer conn.Close() saddArgs := redis.Args{tempSetKey} saddArgs = saddArgs.Add(Interfaces(ids)...) if _, err = conn.Do("SADD", saddArgs...); err != nil { t.Errorf("Unexpected error in SADD: %s", err.Error()) } // Run the script tx := testPool.NewTransaction() count := 0 tx.deleteModelsBySetIds(tempSetKey, testModels.Name(), newScanIntHandler(&count)) if err := tx.Exec(); err != nil { t.Fatalf("Unexected error in tx.Exec: %s", err.Error()) } // Check that the return value is correct if count != 3 { t.Errorf("Expected count to be 3 but got %d", count) } // Make sure the first three models were deleted for _, model := range models[:3] { modelKey, err := testModels.ModelKey(model.ModelId()) if err != nil { t.Errorf("Unexpected error in ModelKey: %s", err.Error()) } expectKeyDoesNotExist(t, modelKey) expectSetDoesNotContain(t, testModels.IndexKey(), model.ModelId()) } // Make sure the last two models were not deleted for _, model := range models[3:] { modelKey, err := testModels.ModelKey(model.ModelId()) if err != nil { t.Errorf("Unexpected error in ModelKey: %s", err.Error()) } expectKeyExists(t, modelKey) expectSetContains(t, testModels.IndexKey(), model.ModelId()) } }
func (c *client) DelJob(jobIds ...string) (int, error) { conn, err := c.get() if err != nil { return 0, err } args := redis.Args{} for _, jobId := range jobIds { args = args.Add(jobId) } return redis.Int(conn.Do("DELJOB", args...)) }
// Upsert set. If create is true, the session will be created if it does not // exist. Otherwise, the session must already exist. func (s *store) set(sessionID storage.ID, data map[string]interface{}, create bool) error { ms := &sess{ Data: data, LastSeen: time.Now(), } // Get connection from pool. conn, err := s.cfg.GetConn() if err != nil { return err } defer conn.Close() // Serialize. buf := bytes.Buffer{} buf.WriteByte(0) // Version 0 serialization scheme. err = gob.NewEncoder(&buf).Encode(ms) log.Panice(err, "encode session") // should never happen // Assemble command. expiry := s.cfg.Expiry if lt, ok := data["session_lifetime"].(time.Duration); ok { expiry = lt } expirys := int(expiry.Seconds()) args := redis.Args{} args = args.Add(s.makeKey(sessionID), buf.Bytes(), "EX", expirys) if !create { // Require key to already exist. args = args.Add("XX") } // Send command to Redis. _, err = conn.Do("SET", args...) log.Debuge(err, "set") return nil }
// GetMulti gets <count> jobs from the given queues, or times out if timeout has elapsed without // enough jobs being available. Returns a list of jobs or an error func (c *RedisClient) GetMulti(count int, timeout time.Duration, queues ...string) ([]Job, error) { if len(queues) == 0 { return nil, errors.New("disque: no queues specified") } if count < 0 { return nil, fmt.Errorf("disque: invalid count %d", count) } args := redis.Args{} if timeout > 0 { args = args.Add("TIMEOUT", int64(timeout/time.Millisecond)) } if count > 0 { args = args.Add("COUNT", count) } args = args.Add("FROM") args = args.AddFlat(queues) vals, err := redis.Values(c.conn.Do("GETJOB", args...)) if err != nil { return nil, fmt.Errorf("disque: could not get jobs: %s", err) } ret := make([]Job, 0, len(vals)) for _, v := range vals { if arr, ok := v.([]interface{}); ok { ret = append(ret, Job{ Queue: string(arr[0].([]byte)), id: string(arr[1].([]byte)), Data: arr[2].([]byte), }) } } return ret, nil }
// Add sents an ADDJOB command to disque, as specified by the AddRequest. Returns the job id or an error func (c *RedisClient) Add(r AddRequest) (string, error) { //ADDJOB queue_name job <ms-timeout> [REPLICATE <count>] [DELAY <sec>] [RETRY <sec>] [TTL <sec>] [MAXLEN <count>] [ASYNC] args := redis.Args{r.Job.Queue, r.Job.Data, int(r.Timeout / time.Millisecond)} if r.Replicate > 0 { args = args.Add("REPLICATE", r.Replicate) } if r.Delay > 0 { args = args.Add("DELAY", int64(r.Delay.Seconds())) } if r.Retry > 0 { args = args.Add("RETRY", int64(r.Retry.Seconds())) } if r.TTL > 0 { args = args.Add("TTL", int64(r.TTL.Seconds())) } if r.Maxlen > 0 { args = args.Add("MAXLEN", r.Maxlen) } if r.Async { args = args.Add("ASYNC") } id, err := redis.String(c.conn.Do("ADDJOB", args...)) if err != nil { return "", errors.New("disque: could not add job: " + err.Error()) } return id, nil }
// mainHashArgsForFields is like mainHashArgs but only returns the hash // fields which match the given fieldNames. func (mr *modelRef) mainHashArgsForFields(fieldNames []string) (redis.Args, error) { args := redis.Args{mr.key()} ms := mr.spec for _, fs := range ms.fields { // Skip fields whose names do not appear in fieldNames. if !stringSliceContains(fieldNames, fs.name) { continue } fieldVal := mr.fieldValue(fs.name) switch fs.kind { case primativeField: args = args.Add(fs.redisName, fieldVal.Interface()) case pointerField: if !fieldVal.IsNil() { args = args.Add(fs.redisName, fieldVal.Elem().Interface()) } else { args = args.Add(fs.redisName, "NULL") } case inconvertibleField: switch fieldVal.Type().Kind() { // For nilable types that are nil store NULL case reflect.Ptr, reflect.Slice, reflect.Map, reflect.Interface: if fieldVal.IsNil() { args = args.Add(fs.redisName, "NULL") continue } } // For inconvertibles, that are not nil, convert the value to bytes // using the gob package. valBytes, err := mr.spec.fallback.Marshal(fieldVal.Interface()) if err != nil { return nil, err } args = args.Add(fs.redisName, valBytes) } } return args, nil }
func HGetKvs(hkvs []Hkv) error { rc := redisPool.Get() defer rc.Close() //request for _, hkv := range hkvs { vValue := reflect.ValueOf(hkv.Value) if vValue.Kind() == reflect.Ptr { vValue = vValue.Elem() } if vValue.Kind() != reflect.Struct { return NewErrStr("err_not_struct") } numField := vValue.NumField() args := make([]interface{}, 0, numField+1) key := hMakeKey(hkv.Db.Name, hkv.TableName, hkv.KeyName, hkv.KeyValue) args = append(args, key) vType := vValue.Type() for i := 0; i < numField; i++ { args = append(args, vType.Field(i).Name) } rc.Send("exists", key) rc.Send("hmget", args...) } err := rc.Flush() if err != nil { return NewErr(err) } //deal with reply needWriteToRedis := false for ihkv, hkv := range hkvs { existsInRedis, err := redis.Bool(rc.Receive()) if err != nil { hkvs[ihkv].Error = err continue } vValue := reflect.ValueOf(hkv.Value) if vValue.Kind() == reflect.Ptr { vValue = vValue.Elem() } numField := vValue.NumField() args := make([]interface{}, 0, numField) for i := 0; i < numField; i++ { args = append(args, vValue.Field(i).Addr().Interface()) } reply, err := redis.Values(rc.Receive()) if err != nil { hkvs[ihkv].Error = err continue } redis.Scan(reply, args...) //need query form db? nilFieldNames := make([]string, 0, len(reply)) nilFieldItfs := make([]interface{}, 0, len(reply)) vType := vValue.Type() for i, r := range reply { if r == nil { nilFieldNames = append(nilFieldNames, vType.Field(i).Name) nilFieldItfs = append(nilFieldItfs, vValue.Field(i).Addr().Interface()) } } if len(nilFieldNames) != 0 { strSql := fmt.Sprintf("SELECT %s FROM %s WHERE %s=%v", strings.Join(nilFieldNames, ","), hkv.TableName, hkv.KeyName, hkv.KeyValue) err := hkv.Db.QueryRow(strSql).Scan(nilFieldItfs...) if err != nil { hkvs[ihkv].Error = err continue } else { //if no error, then save to redis var args redis.Args key := hMakeKey(hkv.Db.Name, hkv.TableName, hkv.KeyName, hkv.KeyValue) args = args.Add(key) for i, v := range nilFieldNames { args = args.Add(v) args = args.Add(reflect.ValueOf(nilFieldItfs[i]).Elem().Interface()) } err := rc.Send("hmset", args...) if err != nil { hkvs[ihkv].Error = err continue } needWriteToRedis = true //if not exists in redis before, then set expire time if !existsInRedis { rc.Send("expire", key, CACHE_LIFE_SEC) } } } } if needWriteToRedis { err = rc.Flush() return NewErr(err) } return nil }
func (c *client) AddJob(queueName string, job []byte, options AddJobOptions) (string, error) { conn, err := c.get() if err != nil { return "", err } args := redis.Args{queueName, job, int(options.Timeout.Nanoseconds() / 1000000)} if options.Replicate > 0 { args = args.Add("REPLICATE", options.Replicate.Seconds()) } if options.Delay > 0 { args = args.Add("DELAY", options.Delay.Seconds()) } if options.Retry > 0 { args = args.Add("RETRY", options.Retry.Seconds()) } if options.TTL > 0 { args = args.Add("TTL", options.TTL.Seconds()) } if options.MaxLen > 0 { args = args.Add("MAXLEN", options.MaxLen) } if options.Async == true { args = args.Add("ASYNC") } return redis.String(conn.Do("ADDJOB", args...)) }
func (c *client) QScan(options QScanOptions) ([]interface{}, error) { conn, err := c.get() if err != nil { return nil, err } args := redis.Args{} if options.Count > 0 { args = args.Add("COUNT", options.Count) } if options.BusyLoop == true { args = args.Add("BUSYLOOP") } if options.MinLen > 0 { args = args.Add("MINLEN", options.MinLen) } if options.MaxLen > 0 { args = args.Add("MAXLEN", options.MaxLen) } if options.ImportRate > 0 { args = args.Add("IMPORTRATE", options.ImportRate) } return redis.Values(conn.Do("QSCAN", args...)) }
func (c *client) GetJob(options GetJobOptions, queueNames ...string) ([]job, error) { conn, err := c.get() if err != nil { return nil, err } args := redis.Args{} if options.NoHang == true { args = args.Add("NOHANG") } if options.Timeout.Nanoseconds() > 0 { args = args.Add("TIMEOUT", int(options.Timeout.Nanoseconds()/1000000)) } if options.Count > 0 { args = args.Add("COUNT", options.Count) } if options.WithCounters == true { args = args.Add("WITHCOUNTERS") } args = args.Add("FROM") for _, queueName := range queueNames { args = args.Add(queueName) } reply, err := redis.Values(conn.Do("GETJOB", args...)) if err != nil { return nil, err } result := make([]job, 0, len(reply)) for _, v := range reply { if value, err := redis.Values(v, nil); err != nil { return nil, err } else { queueName, err := redis.String(value[0], nil) id, err := redis.String(value[1], err) body, err := redis.Bytes(value[2], err) if err != nil { return nil, err } result = append(result, job{QueueName: queueName, Id: id, Body: body}) } } return result, nil }