func pipelineInsert(conn redis.Conn, keyScoreMembers []common.KeyScoreMember, maxSize int) error { for _, tuple := range keyScoreMembers { if err := insertScript.Send( conn, tuple.Key, tuple.Score, tuple.Member, maxSize, ); err != nil { return err } } if err := conn.Flush(); err != nil { return err } for _ = range keyScoreMembers { // TODO actually count inserts if _, err := conn.Receive(); err != nil { return err } } return nil }
func (p *process) close(conn redis.Conn) error { logger.Infof("%v shutdown", p) conn.Send("SREM", fmt.Sprintf("%sworkers", namespace), p) conn.Send("DEL", fmt.Sprintf("%sstat:processed:%s", namespace, p)) conn.Send("DEL", fmt.Sprintf("%sstat:failed:%s", namespace, p)) return conn.Flush() }
func pipelineDelete(conn redis.Conn, keyScoreMembers []common.KeyScoreMember, maxSize int) error { for _, keyScoreMember := range keyScoreMembers { if err := deleteScript.Send( conn, keyScoreMember.Key, keyScoreMember.Score, keyScoreMember.Member, maxSize, ); err != nil { return err } } if err := conn.Flush(); err != nil { return err } for _ = range keyScoreMembers { // TODO actually count deletes if _, err := conn.Receive(); err != nil { return err } } return nil }
func (self *redis_gateway) execute(c redis.Conn, commands [][]string) error { switch len(commands) { case 0: return nil case 1: e := self.redis_do(c, commands[0]) if nil != e { return errors.New("execute `" + strings.Join(commands[0], " ") + "` failed, " + e.Error()) } return nil default: for _, command := range commands { e := self.redis_send(c, command) if nil != e { return errors.New("execute `" + strings.Join(command, " ") + "` failed, " + e.Error()) } } e := c.Flush() if nil != e { return e } for i := 0; i < len(commands); i++ { _, e = c.Receive() if nil != e { return errors.New("execute `" + strings.Join(commands[i], " ") + "` failed, " + e.Error()) } } return nil } }
func Get(conn redis.Conn) string { conn.Send("GET", "fooo") conn.Flush() result, _ := redis.String(conn.Receive()) // TODO missing two err catch return result }
func Save(obj interface{}, conn redis.Conn) reflect.Type { conn.Send("GET", "fooo") conn.Flush() //result, _ := redis.String(conn.Receive()) // TODO missing two err catch return reflect.ValueOf(&obj).Type() }
func publishEventsPipeline(conn redis.Conn, command string) publishFn { return func(key outil.Selector, data []outputs.Data) ([]outputs.Data, error) { var okEvents []outputs.Data serialized := make([]interface{}, 0, len(data)) okEvents, serialized = serializeEvents(serialized, 0, data) if len(serialized) == 0 { return nil, nil } data = okEvents[:0] for i, serializedEvent := range serialized { eventKey, err := key.Select(okEvents[i].Event) if err != nil { logp.Err("Failed to set redis key: %v", err) continue } data = append(data, okEvents[i]) if err := conn.Send(command, eventKey, serializedEvent); err != nil { logp.Err("Failed to execute %v: %v", command, err) return okEvents, err } } if err := conn.Flush(); err != nil { return data, err } failed := data[:0] var lastErr error for i := range serialized { _, err := conn.Receive() if err != nil { if _, ok := err.(redis.Error); ok { logp.Err("Failed to %v event to list with %v", command, err) failed = append(failed, data[i]) lastErr = err } else { logp.Err("Failed to %v multiple events to list with %v", command, err) failed = append(failed, data[i:]...) lastErr = err break } } } return failed, lastErr } }
func (b *BGPDump) parseBGPCSV(r io.Reader, conn redis.Conn) (int, error) { day := b.day() s := bufio.NewScanner(r) n := 0 var asn string for s.Scan() { cols := strings.Split(s.Text(), "|") if len(cols) < 7 { return n, ParseError{ Message: "too few columns", Path: filepath.Base(b.Path()), LineNum: n, Line: s.Text(), } } block := cols[5] if _, ok := asn12654blocks[block]; ok { asn = "12654" } else { asPath := cols[6] asns := strings.Split(asPath, " ") asn = asns[len(asns)-1] if asn == "" { return n, ParseError{ Message: "no ASPATH data", Path: filepath.Base(b.Path()), LineNum: n, Line: s.Text(), } } } conn.Send("HSET", fmt.Sprintf("i2a:%s", block), day, asn) n++ if n%10000 == 0 { err := conn.Flush() if err != nil { return 0, err } } } conn.Send("SADD", "i2a:imported_dates", day) err := conn.Flush() if err != nil { return 0, err } return n, nil }
func read(list string, conn redis.Conn, count int) { for i := 0; i < count; i++ { conn.Send("LPOP", list) } conn.Flush() for i := 0; i < count; i++ { value, err := redis.String(conn.Receive()) if err != nil { os.Exit(0) } if value != "" { fmt.Println(value) } } }
func pipelineScore(conn redis.Conn, keyMembers []common.KeyMember) (map[common.KeyMember]Presence, error) { for _, keyMember := range keyMembers { if err := conn.Send("ZSCORE", keyMember.Key+insertSuffix, keyMember.Member); err != nil { return map[common.KeyMember]Presence{}, err } if err := conn.Send("ZSCORE", keyMember.Key+deleteSuffix, keyMember.Member); err != nil { return map[common.KeyMember]Presence{}, err } } if err := conn.Flush(); err != nil { return map[common.KeyMember]Presence{}, err } m := map[common.KeyMember]Presence{} for i := 0; i < len(keyMembers); i++ { insertReply, insertErr := conn.Receive() insertValue, insertErr := redis.Float64(insertReply, insertErr) deleteReply, deleteErr := conn.Receive() deleteValue, deleteErr := redis.Float64(deleteReply, deleteErr) switch { case insertErr == nil && deleteErr == redis.ErrNil: m[keyMembers[i]] = Presence{ Present: true, Inserted: true, Score: insertValue, } case insertErr == redis.ErrNil && deleteErr == nil: m[keyMembers[i]] = Presence{ Present: true, Inserted: false, Score: deleteValue, } case insertErr == redis.ErrNil && deleteErr == redis.ErrNil: m[keyMembers[i]] = Presence{ Present: false, } default: return map[common.KeyMember]Presence{}, fmt.Errorf( "pipelineScore bad state for %v (%v/%v)", keyMembers[i], insertErr, deleteErr, ) } } return m, nil }
func pipelineRange(conn redis.Conn, keys []string, offset, limit int) (map[string][]common.KeyScoreMember, error) { if limit < 0 { return map[string][]common.KeyScoreMember{}, fmt.Errorf("negative limit is invalid for offset-based select") } for _, key := range keys { if err := conn.Send( "ZREVRANGE", key+insertSuffix, offset, offset+limit-1, "WITHSCORES", ); err != nil { return map[string][]common.KeyScoreMember{}, err } } if err := conn.Flush(); err != nil { return map[string][]common.KeyScoreMember{}, err } m := make(map[string][]common.KeyScoreMember, len(keys)) for _, key := range keys { values, err := redis.Values(conn.Receive()) if err != nil { return map[string][]common.KeyScoreMember{}, err } var ( ksm = common.KeyScoreMember{Key: key} keyScoreMembers = make([]common.KeyScoreMember, 0, len(values)) ) for len(values) > 0 { if values, err = redis.Scan(values, &ksm.Member, &ksm.Score); err != nil { return map[string][]common.KeyScoreMember{}, err } keyScoreMembers = append(keyScoreMembers, ksm) } m[key] = keyScoreMembers } return m, nil }
func publishEventsPipeline(conn redis.Conn, command string) publishFn { return func(dest []byte, events []common.MapStr) ([]common.MapStr, error) { var args [2]interface{} args[0] = dest serialized := make([]interface{}, 0, len(events)) events, serialized = serializeEvents(serialized, 0, events) if len(serialized) == 0 { return nil, nil } for _, event := range serialized { args[1] = event if err := conn.Send(command, args[:]...); err != nil { logp.Err("Failed to execute %v: %v", command, err) return events, err } } if err := conn.Flush(); err != nil { return events, err } failed := events[:0] var lastErr error for i := range serialized { _, err := conn.Receive() if err != nil { if _, ok := err.(redis.Error); ok { logp.Err("Failed to %v event to list (%v) with %v", command, dest, err) failed = append(failed, events[i]) lastErr = err } else { logp.Err("Failed to %v multiple events to list (%v) with %v", command, dest, err) failed = append(failed, events[i:]...) lastErr = err break } } } return failed, lastErr } }
func GetRepo(c redis.Conn, repo string) (string, bool, error) { c.Send("GET", repo) c.Send("TTL", repo) if err := c.Flush(); err != nil { return "", false, nil } content, err := redis.String(c.Receive()) if err == redis.ErrNil { return "", false, nil } if err != nil { return "", false, err } if ttl, err := redis.Int(c.Receive()); err == nil && ttl > FRESH_CACHE_LIMIT { return content, true, nil } return content, false, err }
func newSession(w http.ResponseWriter, userid uint64, username string, appid uint32, rc redis.Conn) (usertoken string, err error) { if rc == nil { rc = redisPool.Get() defer rc.Close() } usertoken = "" usertokenRaw, err := rc.Do("get", fmt.Sprintf("usertokens/%d+%d", userid, appid)) lwutil.CheckError(err, "") if usertokenRaw != nil { if usertoken, err := redis.String(usertokenRaw, err); err != nil { return usertoken, lwutil.NewErr(err) } rc.Do("del", fmt.Sprintf("sessions/%s", usertoken)) } usertoken = lwutil.GenUUID() session := Session{userid, username, time.Now(), appid} jsonSession, err := json.Marshal(session) if err != nil { return usertoken, lwutil.NewErr(err) } rc.Send("setex", fmt.Sprintf("sessions/%s", usertoken), sessionLifeSecond, jsonSession) rc.Send("setex", fmt.Sprintf("usertokens/%d+%d", userid, appid), sessionLifeSecond, usertoken) rc.Flush() for i := 0; i < 2; i++ { if _, err = rc.Receive(); err != nil { return usertoken, lwutil.NewErr(err) } } // cookie http.SetCookie(w, &http.Cookie{Name: "usertoken", Value: usertoken, MaxAge: sessionLifeSecond, Path: "/"}) return usertoken, err }
func pipelineRevRange(conn redis.Conn, keys []string, offset, limit int) (map[string][]common.KeyScoreMember, error) { for _, key := range keys { if err := conn.Send( "ZREVRANGE", key+insertSuffix, offset, offset+limit-1, "WITHSCORES", ); err != nil { return map[string][]common.KeyScoreMember{}, err } } if err := conn.Flush(); err != nil { return map[string][]common.KeyScoreMember{}, err } m := make(map[string][]common.KeyScoreMember, len(keys)) for _, key := range keys { values, err := redis.Values(conn.Receive()) if err != nil { return map[string][]common.KeyScoreMember{}, err } keyScoreMembers := make([]common.KeyScoreMember, 0, len(values)) for len(values) > 0 { var member string var score float64 if values, err = redis.Scan(values, &member, &score); err != nil { return map[string][]common.KeyScoreMember{}, err } keyScoreMembers = append(keyScoreMembers, common.KeyScoreMember{Key: key, Score: score, Member: member}) } m[key] = keyScoreMembers } return m, nil }
// Import stores the contents of a downloaded BGP dump into a redis server. // -1 is returned if the dump is alredy imported into redis. func (b *CIDRReport) Import(conn redis.Conn) (int, error) { alreadyImported, err := redis.Bool(conn.Do("SISMEMBER", "asd:imported_dates", b.day())) if err != nil { return 0, err } if alreadyImported { return -1, nil } file, err := os.Open(b.Path()) if err != nil { return 0, err } n := 0 day := b.day() err = parseReport(file, func(asd *ASDescription) error { conn.Send("HSET", fmt.Sprintf("asd:%d", asd.ASN), day, fmt.Sprintf("%s, %s", asd.Description, asd.CountryCode)) n++ if n%10000 == 0 { err := conn.Flush() if err != nil { return err } } return nil }) conn.Send("SADD", "asd:imported_dates", day) err = conn.Flush() if err != nil { return 0, err } return n, nil }
func SetCache(c redis.Conn, repo, content, coverage string) error { c.Send("SETEX", repo, CACHE_EXPIRE_TIME, content) c.Send("SET", repo+".coverage", coverage) return c.Flush() }
func SetStats(c redis.Conn, repo string) error { c.Send("ZINCRBY", "top", 1, repo) c.Send("ZADD", "last", time.Now().Unix(), repo) return c.Flush() }
func pipelineRangeByScore(conn redis.Conn, keys []string, cursor, stopcursor common.Cursor, limit int) (map[string][]common.KeyScoreMember, error) { if limit < 0 { // TODO maybe change that return map[string][]common.KeyScoreMember{}, fmt.Errorf("negative limit is invalid for cursor-based select") } // pastCursor returns true when the score+member are "past" the cursor // (smaller score, larger lexicographically) and can therefore be included // in the resultset. pastCursor := func(score float64, member string) bool { if score < cursor.Score { return true } if score == cursor.Score && bytes.Compare([]byte(member), []byte(cursor.Member)) < 0 { return true } return false } // beforeStop returns true as long as the score+member are "before" the // stopcursor (larger score, smaller lexicographically) and can therefore // be included in the resultset. beforeStop := func(score float64, member string) bool { if score > stopcursor.Score { return true } if score == stopcursor.Score && bytes.Compare([]byte(member), []byte(stopcursor.Member)) > 0 { return true } return false } // An unlimited number of members may exist at cursor.Score. Luckily, // they're in lexicographically stable order. Walk the elements we get // back. For as long as element.Score == cursor.Score, and a // lexicographical comparison of (element.Score, cursor.Score) < 0, // discard the element. As soon as that condition fails, break the loop, // and collect elements. If we run out of elements before collecting the // user-requested limit, double the limit and try again, up to N times. var ( keysToSelect = keys // start with all selectLimit = limit // double every time maxAttempts = 3 // up to this many times (TODO could be paramaterized) results = map[string][]common.KeyScoreMember{} ) for attempt := 0; len(keysToSelect) > 0 && attempt < maxAttempts; attempt++ { for _, key := range keysToSelect { if err := conn.Send( "ZREVRANGEBYSCORE", key+insertSuffix, fmt.Sprint(cursor.Score), // max "-inf", // min "WITHSCORES", "LIMIT", 0, selectLimit, ); err != nil { return map[string][]common.KeyScoreMember{}, err } } if err := conn.Flush(); err != nil { return map[string][]common.KeyScoreMember{}, err } m := make(map[string][]common.KeyScoreMember, len(keys)) for _, key := range keysToSelect { values, err := redis.Values(conn.Receive()) if err != nil { return map[string][]common.KeyScoreMember{}, err } var ( collected = 0 validated = make([]common.KeyScoreMember, 0, len(values)) hitStop = false ) for len(values) > 0 { var member string var score float64 if values, err = redis.Scan(values, &member, &score); err != nil { return map[string][]common.KeyScoreMember{}, err } collected++ if !pastCursor(score, member) { continue // this element is still behind or at our cursor } if !beforeStop(score, member) { hitStop = true continue // this element is beyond our stop point } validated = append(validated, common.KeyScoreMember{Key: key, Score: score, Member: member}) } // At this point, we know if we can use these elements, or need to // go back for more. var ( haveEnoughElements = len(validated) >= limit exhaustedElements = collected < selectLimit ) if haveEnoughElements || exhaustedElements || hitStop { if len(validated) > limit { validated = validated[:limit] } m[key] = validated } } retryKeys := make([]string, 0, len(keysToSelect)) for _, key := range keysToSelect { if a, ok := m[key]; ok { results[key] = a // use it } else { retryKeys = append(retryKeys, key) // try again } } keysToSelect = retryKeys selectLimit *= 2 } if n := len(keysToSelect); n > 0 { return map[string][]common.KeyScoreMember{}, fmt.Errorf("%d key(s) failed to yield enough elements", n) } return results, nil }
func (p *process) open(conn redis.Conn) error { conn.Send("SADD", fmt.Sprintf("%sworkers", namespace), p) conn.Send("SET", fmt.Sprintf("%sstat:processed:%v", namespace, p), "0") conn.Send("SET", fmt.Sprintf("%sstat:failed:%v", namespace, p), "0") return conn.Flush() }
func (p *process) start(conn redis.Conn) error { conn.Send("SET", fmt.Sprintf("%sworker:%s:started", namespace, p), time.Now().String()) return conn.Flush() }
func (b *BGPDump) parseBGPDump(conn redis.Conn) (int, error) { day := b.day() n := 0 f, err := os.Open(b.Path()) if err != nil { return 0, err } gzipReader, err := gzip.NewReader(f) if err != nil { return n, fmt.Errorf("couldn't create gzip reader: %v", err) } scanner := bufio.NewScanner(gzipReader) scanner.Split(mrt.SplitMrt) count := 0 indexTableCount := 0 entries: for scanner.Scan() { count++ data := scanner.Bytes() hdr := &mrt.MRTHeader{} errh := hdr.DecodeFromBytes(data[:mrt.MRT_COMMON_HEADER_LEN]) if err != nil { return 0, errh } msg, err := mrt.ParseMRTBody(hdr, data[mrt.MRT_COMMON_HEADER_LEN:]) if err != nil { log.Printf("could not parse mrt body: %v", err) continue entries } if msg.Header.Type != mrt.TABLE_DUMPv2 { return 0, fmt.Errorf("unexpected message type: %d", msg.Header.Type) } switch mtrBody := msg.Body.(type) { case *mrt.PeerIndexTable: indexTableCount++ if indexTableCount != 1 { return 0, fmt.Errorf("got >1 PeerIndexTable") } case *mrt.Rib: prefix := mtrBody.Prefix if len(mtrBody.Entries) < 0 { return 0, fmt.Errorf("no entries") } for _, entry := range mtrBody.Entries { attrs: for _, attr := range entry.PathAttributes { switch attr := attr.(type) { case *bgp.PathAttributeAsPath: if len(attr.Value) < 1 { continue attrs } if v, ok := attr.Value[0].(*bgp.As4PathParam); ok { if len(v.AS) < 0 { continue attrs } conn.Send("HSET", fmt.Sprintf("i2a:%s", prefix), day, v.AS[len(v.AS)-1]) n++ if n%10000 == 0 { err := conn.Flush() if err != nil { return 0, err } } continue entries } } } } default: return 0, fmt.Errorf("unsupported message %v %s", mtrBody, spew.Sdump(msg)) } } conn.Send("SADD", "i2a:imported_dates", day) err = conn.Flush() if err != nil { return 0, err } return n, nil }
func (p *process) fail(conn redis.Conn) error { conn.Send("INCR", fmt.Sprintf("%sstat:failed", namespace)) conn.Send("INCR", fmt.Sprintf("%sstat:failed:%s", namespace, p)) return conn.Flush() }
func (p *process) finish(conn redis.Conn) error { conn.Send("DEL", fmt.Sprintf("%sworker:%s", namespace, p)) conn.Send("DEL", fmt.Sprintf("%sworker:%s:started", namespace, p)) return conn.Flush() }