func asyncFlushAndQuitOnCompletion(t *testing.T, client redis.AsyncClient) { // flush it fStat, e := client.Flushdb() if e != nil { t.Errorf("on Flushdb - %s", e) } ok, fe := fStat.Get() if fe != nil { t.Fatalf("BUG - non-Error future result get must never return error - got: %s", fe) } if !ok { t.Fatalf("BUG - non-Error flushdb future result must always be true ") } fStat, e = client.Quit() if e != nil { t.Errorf("on Quit - %s", e) } ok, fe = fStat.Get() if fe != nil { t.Fatalf("BUG - non-Error future result get must never return error - got: %s", fe) } if !ok { t.Fatalf("BUG - non-Error quit future result must always be true ") } }
func load(r redis.AsyncClient, k string, w http.ResponseWriter) (obj interface{}) { f, rerr := r.Get(k) if rerr != nil { panic(rerr) } val, rerr, timeout := f.TryGet(50000000000) if rerr != nil { panic(rerr) } if timeout { loadtimeout++ log.Println("load timeout! count: ", loadtimeout) fmt.Fprintf(w, "Save failed for %s", key) return } zr, err := zlib.NewReader(bytes.NewReader(val)) if err != nil { log.Fatal("Failed to create zlib reader with error: ", err) } defer zr.Close() jd := json.NewDecoder(zr) err = jd.Decode(&obj) if err != nil { log.Fatal("Failed to decode json with error: ", err) } return }
func save(r redis.AsyncClient, key string, obj interface{}, w http.ResponseWriter) { var b bytes.Buffer z := zlib.NewWriter(&b) defer z.Close() je := json.NewEncoder(z) err := je.Encode(obj) if err != nil { log.Fatal("Failed to json Encode with error: ", err) } z.Flush() f, rerr := r.Set(key, b.Bytes()) if rerr != nil { panic(rerr) } _, rerr, timeout := f.TryGet(50000000000) if rerr != nil { panic(rerr) } if timeout { savetimeout++ log.Println("save timeout! count: ", savetimeout) fmt.Fprintf(w, "Save failed for %s", key) } }
func doRpop(id string, signal chan int, client redis.AsyncClient, cnt int) { key := "list-R" + id for i := 0; i < cnt; i++ { client.Rpop(key) } signal <- 1 }
func primeKey(key string, r redis.AsyncClient) { path := "document.json" file, err := os.Open(path) if err != nil { panic(err) } reader := bufio.NewReader(file) document, _ := ioutil.ReadAll(reader) var b bytes.Buffer z := zlib.NewWriter(&b) z.Write(document) z.Close() f, rerr := r.Set(key, b.Bytes()) if rerr != nil { panic(rerr) } _, rerr, timeout := f.TryGet(50000000000) if rerr != nil { panic(rerr) } if timeout { savetimeout++ log.Println("save timeout! count: ", savetimeout) } }
func doGet(id string, signal chan int, client redis.AsyncClient, cnt int) { key := "set-" + id for i := 0; i < cnt; i++ { client.Get(key) } signal <- 1 }
func doDecr(id string, signal chan int, client redis.AsyncClient, cnt int) { key := "ctr-" + id for i := 0; i < cnt; i++ { client.Decr(key) } signal <- 1 }
func load(r redis.AsyncClient, k string, w http.ResponseWriter) (obj interface{}) { f, rerr := r.Get(k) if rerr != nil { panic(rerr) } val, rerr, timeout := f.TryGet(50000000000) if rerr != nil { panic(rerr) } if timeout { loadtimeout++ log.Println("load timeout! count: ", loadtimeout) fmt.Fprintf(w, "Save failed for %s", key) return } setPromise, srerr := r.Set(key, val) if srerr != nil { panic(rerr) } _, grerr, timeout := setPromise.TryGet(50000000000) if grerr != nil { panic(rerr) } if timeout { savetimeout++ log.Println("save timeout! count: ", savetimeout) fmt.Fprintf(w, "Save failed for %s", key) } return }
func doRpush(id string, signal chan int, client redis.AsyncClient, cnt int) { key := "list-R-" + id value := []byte("foo") for i := 0; i < cnt; i++ { client.Rpush(key, value) } signal <- 1 }
func doSet(id string, signal chan int, client redis.AsyncClient, cnt int) { key := "set-" + id value := []byte("foo") for i := 0; i < cnt; i++ { client.Set(key, value) } signal <- 1 }
func doPing(id string, signal chan int, client redis.AsyncClient, cnt int) { var fr redis.FutureBool for i := 0; i < cnt; i++ { fr, _ = client.Ping() } fr.Get() signal <- 1 }
func doDecr(id string, signal chan int, client redis.AsyncClient, cnt int) { var fr redis.FutureInt64 key := "ctr-" + id for i := 0; i < cnt; i++ { fr, _ = client.Decr(key) } fr.Get() signal <- 1 }
func setup(client redis.AsyncClient) { fr, e := client.Flushdb() if e != nil { log.Println("Error creating client for worker: ", e) log.Println("fr: ", fr) panic(e) } frr, e2 := fr.Get() if e2 != nil { log.Println("Error creating client for worker: ", e2) log.Println("frr: ", frr) panic(e) } }
func QuitAsyncClient(t *testing.T, client redis.AsyncClient) { // flush it fstat, e := client.Quit() if e != nil { t.Fatalf("on Quit - %s", e) } ok, fe := fstat.Get() if fe != nil { t.Fatalf("on fstat.Get() - %s", fe) } if !ok { t.Fatalf("fstat.Get() returned false") } }
func doIncr(id string, signal chan int, client redis.AsyncClient, cnt int) { key := "ctr-" + id var fr redis.FutureInt64 for i := 0; i < cnt; i++ { fr, _ = client.Incr(key) } v, _ := fr.Get() if v != int64(cnt) { log.Fatalf("BUG: expecting counter %s to be %d but it is %d\n", key, cnt, v) panic(1) } // debug sanity check // log.Printf("worker[%s] - last INCR result %s=%d\n", id, key, v) signal <- 1 }
func doPing(id string, signal chan int, client redis.AsyncClient, cnt int) { for i := 0; i < cnt; i++ { client.Ping() } signal <- 1 }