// TestSTMConflict tests that conflicts are retried. func TestSTMConflict(t *testing.T) { clus := NewClusterV3(t, &ClusterConfig{Size: 3}) defer clus.Terminate(t) etcdc := clus.RandClient() keys := make([]string, 5) for i := 0; i < len(keys); i++ { keys[i] = fmt.Sprintf("foo-%d", i) if _, err := etcdc.Put(context.TODO(), keys[i], "100"); err != nil { t.Fatalf("could not make key (%v)", err) } } errc := make(chan error) for i := range keys { curEtcdc := clus.RandClient() srcKey := keys[i] applyf := func(stm concurrency.STM) error { src := stm.Get(srcKey) // must be different key to avoid double-adding dstKey := srcKey for dstKey == srcKey { dstKey = keys[rand.Intn(len(keys))] } dst := stm.Get(dstKey) srcV, _ := strconv.ParseInt(src, 10, 64) dstV, _ := strconv.ParseInt(dst, 10, 64) xfer := int64(rand.Intn(int(srcV)) / 2) stm.Put(srcKey, fmt.Sprintf("%d", srcV-xfer)) stm.Put(dstKey, fmt.Sprintf("%d", dstV+xfer)) return nil } go func() { _, err := concurrency.NewSTMRepeatable(context.TODO(), curEtcdc, applyf) errc <- err }() } // wait for txns for range keys { if err := <-errc; err != nil { t.Fatalf("apply failed (%v)", err) } } // ensure sum matches initial sum sum := 0 for _, oldkey := range keys { rk, err := etcdc.Get(context.TODO(), oldkey) if err != nil { t.Fatalf("couldn't fetch key %s (%v)", oldkey, err) } v, _ := strconv.ParseInt(string(rk.Kvs[0].Value), 10, 64) sum += int(v) } if sum != len(keys)*100 { t.Fatalf("bad sum. got %d, expected %d", sum, len(keys)*100) } }
func doSTM(ctx context.Context, client *v3.Client, requests <-chan stmApply) { defer wg.Done() for applyf := range requests { st := time.Now() _, err := v3sync.NewSTMRepeatable(context.TODO(), client, applyf) var errStr string if err != nil { errStr = err.Error() } results <- result{errStr: errStr, duration: time.Since(st), happened: time.Now()} bar.Increment() } }
// TestSTMPutNewKey confirms a STM put on a new key is visible after commit. func TestSTMPutNewKey(t *testing.T) { clus := NewClusterV3(t, &ClusterConfig{Size: 1}) defer clus.Terminate(t) etcdc := clus.RandClient() applyf := func(stm concurrency.STM) error { stm.Put("foo", "bar") return nil } if _, err := concurrency.NewSTMRepeatable(context.TODO(), etcdc, applyf); err != nil { t.Fatalf("error on stm txn (%v)", err) } resp, err := etcdc.Get(context.TODO(), "foo") if err != nil { t.Fatalf("error fetching key (%v)", err) } if string(resp.Kvs[0].Value) != "bar" { t.Fatalf("bad value. got %+v, expected 'bar' value", resp) } }
// TestSTMAbort tests that an aborted txn does not modify any keys. func TestSTMAbort(t *testing.T) { clus := NewClusterV3(t, &ClusterConfig{Size: 1}) defer clus.Terminate(t) etcdc := clus.RandClient() ctx, cancel := context.WithCancel(context.TODO()) applyf := func(stm concurrency.STM) error { stm.Put("foo", "baz") cancel() stm.Put("foo", "bap") return nil } if _, err := concurrency.NewSTMRepeatable(ctx, etcdc, applyf); err == nil { t.Fatalf("no error on stm txn") } resp, err := etcdc.Get(context.TODO(), "foo") if err != nil { t.Fatalf("error fetching key (%v)", err) } if len(resp.Kvs) != 0 { t.Fatalf("bad value. got %+v, expected nothing", resp) } }