// GetPendingTasks returns a slice of task.Signatures waiting in the queue func (redisBroker *RedisBroker) GetPendingTasks(queue string) ([]*signatures.TaskSignature, error) { conn, err := redisBroker.open() if err != nil { return nil, fmt.Errorf("Dial: %s", err) } defer conn.Close() if queue == "" { queue = redisBroker.config.DefaultQueue } bytes, err := conn.Do("LRANGE", queue, 0, 10) if err != nil { fmt.Printf("Error: %v\n", err) return nil, err } results, err := redis.ByteSlices(bytes, err) if err != nil { fmt.Printf("Error: %v\n", err) return nil, err } var taskSignatures []*signatures.TaskSignature for _, result := range results { var taskSignature signatures.TaskSignature if err := json.Unmarshal(result, &taskSignature); err != nil { return nil, err } taskSignatures = append(taskSignatures, &taskSignature) } return taskSignatures, nil }
func (wp *WorkerPool) startZPolling() { var zqstrs []string for _, qname := range wp.queues { zqstrs = append(zqstrs, scheduledQueue(qname)) } for wp.running { now := time.Now().UTC().Unix() for _, zset := range zqstrs { conn := wp.redisPool.Get() res, err := redis.ByteSlices(conn.Do(zrangebyscore, zset, 0, now)) conn.Close() if err != nil { continue } for _, jsn := range res { wp.workCh <- qj{zset, jsn} conn := wp.redisPool.Get() _, err := conn.Do(zremrangebyscore, zset, 0, now) if err != nil { // TODO -- try again } conn.Close() } } } }
// collectAndCallOnce collects the package, sleeps the configured duraction and // calls the callback only once with a slice of packets, sorted by signal // strength (strongest at index 0). This method exists since multiple gateways // are able to receive the same packet, but the packet needs to processed // only once. // It is important to validate the packet before calling collectAndCallOnce // (since the MIC is part of the storage key, make sure it is valid). // It is safe to collect the same packet received by the same gateway twice. // Since the underlying storage type is a set, the result will always be a // unique set per gateway MAC and packet MIC. func collectAndCallOnce(p *redis.Pool, rxPacket models.RXPacket, callback func(packets RXPackets) error) error { var buf bytes.Buffer enc := gob.NewEncoder(&buf) if err := enc.Encode(rxPacket); err != nil { return fmt.Errorf("encode rx packet error: %s", err) } c := p.Get() defer c.Close() // store the packet in a set with CollectAndCallOnceWait expiration // in case the packet is received by multiple gateways, the set will contain // each packet. key := "collect_" + hex.EncodeToString(rxPacket.PHYPayload.MIC[:]) c.Send("MULTI") c.Send("SADD", key, buf.Bytes()) c.Send("PEXPIRE", key, int64(CollectAndCallOnceWait*2)/int64(time.Millisecond)) _, err := c.Do("EXEC") if err != nil { return fmt.Errorf("add rx packet to collect set error: %s", err) } // acquire a lock on processing this packet _, err = redis.String((c.Do("SET", key+"_lock", "lock", "PX", int64(CollectAndCallOnceWait*2)/int64(time.Millisecond), "NX"))) if err != nil { if err == redis.ErrNil { // the packet processing is already locked by an other process // so there is nothing to do anymore :-) return nil } return fmt.Errorf("acquire lock error: %s", err) } // wait the configured amount of time, more packets might be received // from other gateways time.Sleep(CollectAndCallOnceWait) // collect all packets from the set rxPackets := make(RXPackets, 0) payloads, err := redis.ByteSlices(c.Do("SMEMBERS", key)) if err != nil { return fmt.Errorf("get collect set members error: %s", err) } if len(payloads) == 0 { return errors.New("zero items in collect set") } for _, b := range payloads { var packet models.RXPacket if err := gob.NewDecoder(bytes.NewReader(b)).Decode(&packet); err != nil { return fmt.Errorf("decode rx packet error: %s", err) } rxPackets = append(rxPackets, packet) } sort.Sort(rxPackets) return callback(rxPackets) }
// Pull implements the `func Pull` from `Processor`. It pulls from the right-side // of the Redis structure in a blocking-fashion, using BLPOP. // // If an redis.ErrNil is returned, it is silenced, and both fields are returend // as nil. If the err is not a redis.ErrNil, but is still non-nil itself, then // it will be returend, along with an empty []byte. // // If an item can sucessfully be removed from the keyspace, it is returned // without error. func (f *fifoProcessor) Pull(cnx redis.Conn, src string, timeout time.Duration) ([]byte, error) { slices, err := redis.ByteSlices(cnx.Do("BRPOP", src, block(timeout))) if err != nil { return nil, err } return slices[1], nil }
func RedisDoGetMultiByteSlice(cmd string, args ...interface{}) (data [][]byte, err error) { c := RedisPool.Get() defer c.Close() // data_origin, err := c.Do("keys", "channel:*") data, err = redis.ByteSlices(c.Do(cmd, args...)) if err != nil { fmt.Println(err) } return }
func (wp *WorkerPool) startPolling() { qstr := strings.Join(wp.queues, " ") for { conn := wp.redisPool.Get() res, err := redis.ByteSlices(conn.Do("BLPOP", qstr, wp.timeout.Seconds())) conn.Close() if err != nil { continue } wp.workCh <- qj{string(res[0]), res[1]} } }
func TestExpectSlice(t *testing.T) { connection := NewConn() field1 := []byte("hello") connection.Command("HMGET", "key", "field1", "field2").ExpectSlice(field1, nil) if len(connection.commands) != 1 { t.Fatalf("Did not registered the command. Expected '1' and got '%d'", len(connection.commands)) } reply, err := redis.ByteSlices(connection.Do("HMGET", "key", "field1", "field2")) if err != nil { t.Fatal(err) } if string(reply[0]) != string(field1) { t.Fatalf("reply[0] not hello but %s", string(reply[0])) } if reply[1] != nil { t.Fatal("reply[1] not nil") } }
// StartConsuming enters a loop and waits for incoming messages func (redisBroker *RedisBroker) StartConsuming(consumerTag string, taskProcessor TaskProcessor) (bool, error) { if redisBroker.retryFunc == nil { redisBroker.retryFunc = utils.RetryClosure() } redisBroker.pool = redisBroker.newPool() defer redisBroker.pool.Close() _, err := redisBroker.pool.Get().Do("PING") if err != nil { redisBroker.retryFunc() return redisBroker.retry, err // retry true } redisBroker.retryFunc = utils.RetryClosure() redisBroker.stopChan = make(chan int) redisBroker.stopReceivingChan = make(chan int) redisBroker.errorsChan = make(chan error) deliveries := make(chan []byte) redisBroker.wg.Add(1) go func() { defer redisBroker.wg.Done() log.Print("[*] Waiting for messages. To exit press CTRL+C") conn := redisBroker.pool.Get() for { select { // A way to stop this goroutine from redisBroker.StopConsuming case <-redisBroker.stopReceivingChan: return default: itemBytes, err := conn.Do("BLPOP", redisBroker.config.DefaultQueue, "1") if err != nil { redisBroker.errorsChan <- err return } // Unline BLPOP, LPOP is non blocking so nil means we can keep iterating if itemBytes == nil { continue } items, err := redis.ByteSlices(itemBytes, nil) if err != nil { redisBroker.errorsChan <- err return } if len(items) != 2 { log.Println("Got unexpected amount of byte arrays, ignoring") continue } // items[0] - queue name (key), items[1] - value item := items[1] signature := new(signatures.TaskSignature) if err := json.Unmarshal(item, signature); err != nil { redisBroker.errorsChan <- err return } // If the task is not registered, we requeue it, // there might be different workers for processing specific tasks if !redisBroker.IsTaskRegistered(signature.Name) { _, err := conn.Do("RPUSH", redisBroker.config.DefaultQueue, item) if err != nil { redisBroker.errorsChan <- err return } continue } deliveries <- item } } }() if err := redisBroker.consume(deliveries, taskProcessor); err != nil { return redisBroker.retry, err // retry true } return redisBroker.retry, nil }
func (*RedisStore) Byteses(reply interface{}, err error) ([][]byte, error) { return redis.ByteSlices(reply, err) }
ve(redis.Ints(nil, nil)), ve([]int(nil), redis.ErrNil), }, { "strings([v1, v2])", ve(redis.Strings([]interface{}{[]byte("v1"), []byte("v2")}, nil)), ve([]string{"v1", "v2"}, nil), }, { "strings(nil)", ve(redis.Strings(nil, nil)), ve([]string(nil), redis.ErrNil), }, { "byteslices([v1, v2])", ve(redis.ByteSlices([]interface{}{[]byte("v1"), []byte("v2")}, nil)), ve([][]byte{[]byte("v1"), []byte("v2")}, nil), }, { "byteslices(nil)", ve(redis.ByteSlices(nil, nil)), ve([][]byte(nil), redis.ErrNil), }, { "values([v1, v2])", ve(redis.Values([]interface{}{[]byte("v1"), []byte("v2")}, nil)), ve([]interface{}{[]byte("v1"), []byte("v2")}, nil), }, { "values(nil)", ve(redis.Values(nil, nil)),