Ejemplo n.º 1
0
func (cp *ConnPool) postConnect(conn net.Conn, err error) (net.Conn, error) {
	if err != nil || !cp.sendReadOnly {
		return conn, err
	}
	defer func() {
		if err != nil {
			conn.Close()
			conn = nil
		}
	}()

	if _, err = conn.Write(REDIS_CMD_READ_ONLY); err != nil {
		log.Error("write READONLY failed", conn.RemoteAddr().String(), err)
		return conn, err
	}

	var data *resp.Data
	reader := bufio.NewReader(conn)
	data, err = resp.ReadData(reader)
	if err != nil {
		log.Error("read READONLY resp failed", conn.RemoteAddr().String(), err)
		return conn, err
	}

	if data.T == resp.T_Error {
		log.Error("READONLY resp is not OK", conn.RemoteAddr().String())
		err = errors.New("post connect error: READONLY resp is not OK")
	}
	return conn, err
}
Ejemplo n.º 2
0
// redirect send request to backend again to new server told by redis cluster
func (s *Session) redirect(server string, plRsp *PipelineResponse, ask bool) {
	var conn net.Conn
	var err error

	plRsp.err = nil
	conn, err = s.connPool.GetConn(server)
	if err != nil {
		log.Error(err)
		plRsp.err = err
		return
	}
	defer func() {
		if err != nil {
			log.Error(err)
			conn.(*pool.PoolConn).MarkUnusable()
		}
		conn.Close()
	}()

	reader := bufio.NewReader(conn)
	if ask {
		if _, err = conn.Write(ASK_CMD_BYTES); err != nil {
			plRsp.err = err
			return
		}
	}
	if _, err = conn.Write(plRsp.ctx.cmd.Format()); err != nil {
		plRsp.err = err
		return
	}
	if ask {
		if _, err = resp.ReadData(reader); err != nil {
			plRsp.err = err
			return
		}
	}
	obj := resp.NewObject()
	if err = resp.ReadDataBytes(reader, obj); err != nil {
		plRsp.err = err
	} else {
		plRsp.rsp = obj
	}
}
Ejemplo n.º 3
0
func (mc *MultiKeyCmd) CoalesceRsp() *PipelineResponse {
	plRsp := &PipelineResponse{}
	var rsp *resp.Data
	switch mc.CmdType() {
	case MGET:
		rsp = &resp.Data{T: resp.T_Array, Array: make([]*resp.Data, mc.numSubCmds)}
	case MSET:
		rsp = OK_DATA
	case DEL:
		rsp = &resp.Data{T: resp.T_Integer}
	default:
		panic("invalid multi key cmd name")
	}
	for i, subCmdRsp := range mc.subCmdRsps {
		if subCmdRsp.err != nil {
			rsp = &resp.Data{T: resp.T_Error, String: []byte(subCmdRsp.err.Error())}
			break
		}
		reader := bufio.NewReader(bytes.NewReader(subCmdRsp.rsp.Raw()))
		data, err := resp.ReadData(reader)
		if err != nil {
			log.Errorf("re-parse response err=%s", err)
			rsp = &resp.Data{T: resp.T_Error, String: []byte(err.Error())}
			break
		}
		if data.T == resp.T_Error {
			rsp = data
			break
		}
		switch mc.CmdType() {
		case MGET:
			rsp.Array[i] = data
		case MSET:
		case DEL:
			rsp.Integer += data.Integer
		default:
			panic("invalid multi key cmd name")
		}
	}
	plRsp.rsp = resp.NewObjectFromData(rsp)
	return plRsp
}
Ejemplo n.º 4
0
/**
获取cluster slots信息,并利用cluster nodes信息来将failed的slave过滤掉
*/
func (d *Dispatcher) doReload(server string) (slotInfos []*SlotInfo, err error) {
	var conn net.Conn
	conn, err = d.connPool.GetConn(server)
	if err != nil {
		log.Error(server, err)
		return
	} else {
		log.Infof("query cluster slots from %s", server)
	}
	defer func() {
		if err != nil {
			conn.(*pool.PoolConn).MarkUnusable()
		}
		conn.Close()
	}()
	_, err = conn.Write(REDIS_CMD_CLUSTER_SLOTS)
	if err != nil {
		log.Errorf("write cluster slots error", server, err)
		return
	}
	r := bufio.NewReader(conn)
	var data *resp.Data
	data, err = resp.ReadData(r)
	if err != nil {
		log.Error(server, err)
		return
	}
	slotInfos = make([]*SlotInfo, 0, len(data.Array))
	for _, info := range data.Array {
		slotInfos = append(slotInfos, NewSlotInfo(info))
	}

	// filter slot info with cluster nodes information
	_, err = conn.Write(REDIS_CMD_CLUSTER_NODES)
	if err != nil {
		log.Errorf("write cluster nodes error", server, err)
		return
	}
	r = bufio.NewReader(conn)
	data, err = resp.ReadData(r)
	if err != nil {
		log.Error(server, err)
		return
	}
	aliveNodes := make(map[string]bool)
	lines := strings.Split(strings.TrimSpace(string(data.String)), "\n")
	for _, line := range lines {
		// 305fa52a4ed213df3ca97a4399d9e2a6e44371d2 10.4.17.164:7704 master - 0 1440042315188 2 connected 5461-10922
		log.Debug(line)
		elements := strings.SplitN(line, " ", CLUSTER_NODES_FIELD_SPLIT_NUM)
		if !strings.Contains(elements[CLUSTER_NODES_FIELD_NUM_FLAGS], "fail") {
			aliveNodes[elements[CLUSTER_NODES_FIELD_NUM_IP_PORT]] = true
		} else {
			log.Warningf("node fails: %s", elements[1])
		}
	}
	for _, si := range slotInfos {
		if d.readPrefer == READ_PREFER_MASTER {
			si.read = []string{si.write}
		} else if d.readPrefer == READ_PREFER_SLAVE || d.readPrefer == READ_PREFER_SLAVE_IDC {
			localIPPrefix := LocalIP()
			if len(localIPPrefix) > 0 {
				segments := strings.SplitN(localIPPrefix, ".", 3)
				localIPPrefix = strings.Join(segments[:2], ".")
				localIPPrefix += "."
			}
			var readNodes []string
			for _, node := range si.read {
				if !aliveNodes[node] {
					log.Infof("filter %s since it's not alive", node)
					continue
				}
				if d.readPrefer == READ_PREFER_SLAVE_IDC {
					// ips are regarded as in the same idc if they have the same first two segments, eg 10.4.x.x
					if !strings.HasPrefix(node, localIPPrefix) {
						log.Infof("filter %s by read prefer slave idc", node)
						continue
					}
				}
				readNodes = append(readNodes, node)
			}
			if len(readNodes) == 0 {
				readNodes = []string{si.write}
			}
			si.read = readNodes
		}
	}
	return
}