예제 #1
0
func askRedis(w http.ResponseWriter, words []string, pool *radix.Pool) {
	w.Header().Add("Server", "go-search/redis")
	const (
		_token   = "t:"
		_content = "c:"
	)
	tokens := make([]string, len(words))
	for i, word := range words {
		tokens[i] = _token + word
	}
	redis, err := pool.Get()
	if err != nil {
		http.Error(w, fmt.Sprintf("Redis error: %v", err), http.StatusInternalServerError)
		// do not return connection into the pool
		return
	}
	rIds := redis.Cmd("SINTER", tokens)
	if rIds.Err != nil {
		http.Error(w, fmt.Sprintf("Redis error: %v", rIds.Err), http.StatusInternalServerError)
		// do not return connection into the pool
		return
	}
	lIds, _ := rIds.List()
	for _, id := range lIds {
		rContent := redis.Cmd("GET", _content+id)
		var title string
		if rIds.Err != nil {
			title = fmt.Sprintf("(redis error: %v)", rIds.Err)
		} else {
			title, _ = rContent.Str()
		}
		fmt.Fprintf(w, "%v,%v\n", id, title)
	}
	pool.Put(redis)
}
예제 #2
0
/*
 * Creates a new Pipe that has access to Redis.
 */
func NewRedisPipe(redisPool pool.Pool) rabbitbeans.Pipe {
	client, err := redisPool.Get()
	if err != nil {
		// handle error
		rabbitbeans.FailOnError(err, "Cannot get redis connection from pool")
	}
	return RedisPipe{client}
}
예제 #3
0
func migrateRange(wg *sync.WaitGroup, pool *pool.Pool, start int64, end int64) {
	defer wg.Done()

	conn, err := pool.Get()
	if err != nil {
		log.Fatal(err)
	}
	defer pool.Put(conn)

	keys, err := conn.Cmd("LRANGE", "image:all", start, end).List()
	if err != nil {
		log.Fatal(err)
	}

	for _, key := range keys {
		data, err := conn.Cmd("HGET", "image:file:"+key, "data").Bytes()
		if err != nil {
			log.Fatal(err)
		}
		fid := saveFileToFS(data)
		conn.Cmd("HDEL", "image:file:"+key, "data")
		conn.Cmd("HSET", "image:file:"+key, "dfid", fid)
		processSubImages(conn, "cache", key)
		processSubImages(conn, "smart", key)
	}
}
예제 #4
0
func (c *Cluster) resetInnerUsingPool(p *pool.Pool) error {

	// If we move the throttle check to be in here we'll have to fix the test in
	// TestReset, since it depends on being able to call Reset right after
	// initializing the cluster

	client, err := p.Get()
	if err != nil {
		return err
	}
	defer p.Put(client)

	pools := map[string]*pool.Pool{}

	elems, err := client.Cmd("CLUSTER", "SLOTS").Array()
	if err != nil {
		return err
	} else if len(elems) == 0 {
		return errors.New("empty CLUSTER SLOTS response")
	}

	var start, end, port int
	var ip, slotAddr string
	var slotPool *pool.Pool
	var ok, changed bool
	for _, slotGroup := range elems {
		slotElems, err := slotGroup.Array()
		if err != nil {
			return err
		}
		if start, err = slotElems[0].Int(); err != nil {
			return err
		}
		if end, err = slotElems[1].Int(); err != nil {
			return err
		}
		slotAddrElems, err := slotElems[2].Array()
		if err != nil {
			return err
		}
		if ip, err = slotAddrElems[0].Str(); err != nil {
			return err
		}
		if port, err = slotAddrElems[1].Int(); err != nil {
			return err
		}

		// cluster slots returns a blank ip for the node we're currently
		// connected to. I guess the node doesn't know its own ip? I guess that
		// makes sense
		if ip == "" {
			slotAddr = p.Addr
		} else {
			slotAddr = ip + ":" + strconv.Itoa(port)
		}
		for i := start; i <= end; i++ {
			c.mapping[i] = slotAddr
		}
		if slotPool, ok = c.pools[slotAddr]; ok {
			pools[slotAddr] = slotPool
		} else {
			slotPool, err = c.newPool(slotAddr, true)
			if err != nil {
				return err
			}
			changed = true
			pools[slotAddr] = slotPool
		}
	}

	for addr := range c.pools {
		if _, ok := pools[addr]; !ok {
			c.pools[addr].Empty()
			delete(c.poolThrottles, addr)
			changed = true
		}
	}
	c.pools = pools

	if changed {
		select {
		case c.ChangeCh <- struct{}{}:
		default:
		}
	}

	return nil
}