예제 #1
0
func TestCounterDec(t *testing.T) {
	runtime.GOMAXPROCS(8)
	cluster := embeddedetcd.TestClusterOf1()
	cluster.Launch()
	defer cluster.Terminate(wipe_data_onterm)

	const wrkers = 3
	const wrkersInc = 100

	startbarrier := &sync.WaitGroup{}
	startbarrier.Add(1)

	alldone := &sync.WaitGroup{}
	alldone.Add(wrkers)

	kapi := KeyClientFromCluster(cluster)
	ctx := context.Background()
	mycntr, err := sereno.NewCounter(ctx, "counter002", kapi)
	AssertT(t, err == nil, "err should be nil, got:%v", err)
	err = mycntr.Set(wrkers * wrkersInc)
	AssertT(t, err == nil, "err should be nil, got:%v", err)

	for x := 0; x < wrkers; x++ {
		go func() {
			startbarrier.Wait()
			kapi := KeyClientFromCluster(cluster)
			ctx := context.Background()
			cntr, err := sereno.NewCounter(ctx, "counter002", kapi)
			AssertT(t, err == nil, "err should be nil, got:%v", err)

			for i := 0; i < wrkersInc; i++ {
				err := cntr.Dec(1)
				AssertT(t, err == nil, "err should be nil, got:%v", err)
			}
			alldone.Done()
		}()
	}

	testtimeout := NewTestCaseTimeout(t, 10*time.Second, time.Microsecond)
	defer testtimeout.End()
	startbarrier.Done()

	alldone.Wait()

	cnt, err := mycntr.Val()
	AssertT(t, err == nil, "err should be nil, got:%v", err)
	AssertT(t, cnt == 0, "expected %v counts, got:%v", 0, cnt)
}
예제 #2
0
func TestSignalWithEctdLoad(t *testing.T) {
	//turn off
	t.SkipNow()

	runtime.GOMAXPROCS(8)

	sereno.UseDebugdlogging = true
	sereno.Usedtracedlogging = true

	cluster := embeddedetcd.TestClusterOf1()
	cluster.Launch()
	defer func() {
		t.Log("terminating etcd cluster")
		cluster.Terminate(wipe_data_onterm)
	}()

	testtimeout := NewTestCaseTimeout(t, 260*time.Second, time.Microsecond)
	defer testtimeout.End()

	const msgcount = 500
	const workers = 5

	ready1barrier := &sync.WaitGroup{}
	ready1barrier.Add(1)

	done := &sync.WaitGroup{}
	done.Add(1)

	kapi := KeyClientFromCluster(cluster)
	ctx := context.Background()
	pub, err := sereno.NewPubSubTopic(ctx, "topic42", kapi)
	AssertT(t, err == nil, "err should be nil, got:%v", err)

	tcnt := int64(0)
	go func() {
		defer done.Done()

		ctx, cancel := context.WithTimeout(context.Background(), time.Second)
		defer cancel()
		sub, err := sereno.NewPubSubTopic(ctx, "topic42", kapi)
		AssertT(t, err == nil, "err should be nil, got:%v", err)
		subchan, err := sub.Subscribe()
		AssertT(t, err == nil, "err should be nil, got:%v", err)

		ready1barrier.Done()
		ecnt := int64(0)
		cnt := 0
		st := time.Now()
		defer func() {
			secs := time.Now().Sub(st).Seconds()
			rate := float64(cnt) / secs
			t.Logf("Background Subscriber: %v msgs @ rate: %0.0f msg/s", cnt, rate)
		}()

		seen := map[string]bool{}

		for {
			select {
			case <-time.After(25 * time.Second):
				sub.UnSubscribe()
				return
			case msgout, open := <-subchan:
				if !open {
					t.Fatalf("sub chan closed....")
				}
				if msgout.Err != nil {
					err := msgout.Err
					if err == context.Canceled {
						return
					} else if err == context.DeadlineExceeded {
						return
					}
					t.Fatalf("error: %v", msgout.Err)
				}

				if cnt == 10 {
					fmt.Println("sleeping")
					time.Sleep(5 * time.Second)
				}

				if string(msgout.Msg) == "exit" {
					fmt.Println("exit signaled : cnt:", cnt, " ecnt:", ecnt)
					ecnt++
				} else {
					cnt++
					atomic.AddInt64(&tcnt, 1)
					if _, ok := seen[string(msgout.Msg)]; ok {
						t.Logf("the message:%v is a duplicate", string(msgout.Msg))
					}
					seen[string(msgout.Msg)] = true
				}

				if ecnt == workers {
					//return
				}
			}
		}
	}()

	ready1barrier.Wait()

	for i := 0; i < workers; i++ {
		go func() {
			for i := 0; i < msgcount; i++ {
				m := fmt.Sprintf("msgid:%d", i)
				err := pub.Publish([]byte(m))
				AssertT(t, err == nil, "err should be nil, got:%v", err)
			}

			time.Sleep(200 * time.Millisecond)
			err := pub.Publish([]byte("exit"))
			AssertT(t, err == nil, "err should be nil, got:%v", err)
		}()
	}
	done.Wait()
	AssertT(t, msgcount*workers == atomic.LoadInt64(&tcnt), "mis count with results %v != %v", msgcount*4, atomic.LoadInt64(&tcnt))
	t.Log("testing done...")
}
예제 #3
0
func TestSignal(t *testing.T) {
	runtime.GOMAXPROCS(8)
	cluster := embeddedetcd.TestClusterOf1()
	cluster.Launch()
	defer func() {
		t.Log("terminating etcd cluster")
		cluster.Terminate(wipe_data_onterm)
	}()

	testtimeout := NewTestCaseTimeout(t, 60*time.Second, time.Microsecond)
	defer testtimeout.End()

	const msgcount = 1500

	ready1barrier := &sync.WaitGroup{}
	ready1barrier.Add(1)

	done := &sync.WaitGroup{}
	done.Add(1)

	kapi := KeyClientFromCluster(cluster)
	ctx := context.Background()
	pub, err := sereno.NewPubSubTopic(ctx, "topic42", kapi)
	AssertT(t, err == nil, "err should be nil, got:%v", err)

	go func() {
		defer done.Done()

		ctx, cancel := context.WithTimeout(context.Background(), time.Second)
		defer cancel()
		sub, err := sereno.NewPubSubTopic(ctx, "topic42", kapi)
		AssertT(t, err == nil, "err should be nil, got:%v", err)
		subchan, err := sub.Subscribe()
		AssertT(t, err == nil, "err should be nil, got:%v", err)
		defer sub.UnSubscribe()

		ready1barrier.Done()

		cnt := 0
		st := time.Now()
		defer func() {
			secs := time.Now().Sub(st).Seconds()
			rate := float64(cnt) / secs
			t.Logf("Background Subscriber: %v msgs @ rate: %0.0f msg/s", cnt, rate)
		}()
		for msgout := range subchan {
			if msgout.Err != nil {
				err := msgout.Err
				if err == context.Canceled {
					return
				} else if err == context.DeadlineExceeded {
					return
				}
				t.Fatalf("error: %v", msgout.Err)
			}

			cnt++
			if cnt == msgcount-1 {
				break
			}
		}
	}()

	ready1barrier.Wait()
	for i := 0; i < msgcount; i++ {
		m := fmt.Sprintf("msgid:%d", i)
		err := pub.Publish([]byte(m))
		AssertT(t, err == nil, "err should be nil, got:%v", err)
	}

	done.Wait()
	t.Log("testing done...")
}
예제 #4
0
func TestWorkGroupTest(t *testing.T) {
	runtime.GOMAXPROCS(8)
	const wrkers = 100

	cluster := embeddedetcd.TestClusterOf1()
	cluster.Launch()
	defer cluster.Terminate(wipe_data_onterm)

	startbarrier := &sync.WaitGroup{}
	startbarrier.Add(1)
	activeWork := int64(wrkers)

	kapi := KeyClientFromCluster(cluster)
	ctx := context.Background()
	dwg, err := sereno.NewWaitGroup(ctx, "wg001", kapi)
	AssertT(t, err == nil, "err should be nil, got:%v", err)

	err = dwg.Add(wrkers)
	AssertT(t, err == nil, "err should be nil, got:%v", err)

	for x := 0; x < wrkers; x++ {
		go func() {
			defer func() {
				atomic.AddInt64(&activeWork, -1)

				kapi := KeyClientFromCluster(cluster)
				ctx := context.Background()
				dwg, err := sereno.NewWaitGroup(ctx, "wg001", kapi)
				AssertT(t, err == nil, "err should be nil, got:%v", err)
				dwg.Done()
			}()
			startbarrier.Wait()

			r := rand.Int31n(int32(4096))
			d := time.Duration(r) * time.Millisecond
			time.Sleep(d)
		}()
	}

	done := make(chan bool)
	go func() {
		time.Sleep(150 * time.Millisecond)
		startbarrier.Done()

		select {
		case <-time.After(10 * time.Second):
			c, err := sereno.WgCount(dwg)
			AssertT(t, err == nil, "testcase timed out: err should be nil, got:%v", err)
			t.Fatalf("testcase timed out: waiting on: %v", c)
		case <-done:
			t.Log("nothing to see here, all done...")
		}
	}()

	err = dwg.Wait()
	AssertT(t, err == nil, "err should be nil, got:%v", err)

	close(done)
	AssertT(t, 0 == atomic.LoadInt64(&activeWork), "active work count wasn't zero, got:%v", atomic.LoadInt64(&activeWork))

	time.Sleep(150 * time.Millisecond)
}