コード例 #1
1
ファイル: swarm_test.go プロジェクト: rht/bssim
func TestConnHandler(t *testing.T) {
	// t.Skip("skipping for another test")
	t.Parallel()

	ctx := context.Background()
	swarms := makeSwarms(ctx, t, 5)

	gotconn := make(chan struct{}, 10)
	swarms[0].SetConnHandler(func(conn *Conn) {
		gotconn <- struct{}{}
	})

	connectSwarms(t, ctx, swarms)

	<-time.After(time.Millisecond)
	// should've gotten 5 by now.

	swarms[0].SetConnHandler(nil)

	expect := 4
	for i := 0; i < expect; i++ {
		select {
		case <-time.After(time.Second):
			t.Fatal("failed to get connections")
		case <-gotconn:
		}
	}

	select {
	case <-gotconn:
		t.Fatalf("should have connected to %d swarms", expect)
	default:
	}
}
コード例 #2
1
ファイル: eventer_test.go プロジェクト: ympons/gobot
func TestEventerOnce(t *testing.T) {
	e := NewEventer()
	e.AddEvent("test")

	sem := make(chan bool)
	e.Once("test", func(data interface{}) {
		sem <- true
	})

	go func() {
		e.Publish("test", true)
	}()

	select {
	case <-sem:
	case <-time.After(10 * time.Millisecond):
		t.Errorf("Once was not called")
	}

	go func() {
		e.Publish("test", true)
	}()

	select {
	case <-sem:
		t.Errorf("Once was called twice")
	case <-time.After(10 * time.Millisecond):
	}
}
コード例 #3
0
func mapTestConcurrent2(size int, threads int) int64 {
	//channel for getting times
	done := make(chan int64, 2*threads)

	var funcTimes int64 = 0
	start := time.Now().UnixNano()

	cMap := buildMapConcurrent2(size, threads, done)
	mapReadTestConcurrent(cMap, threads, done)

	i := 0
	for t := range done {
		funcTimes += t
		i++
		if i == 2*threads { //means I've read all of the values that will be read
			close(done)
		}
	}

	end := time.Now().UnixNano()

	fmt.Println("V2: Concurrent Test:", float64((end-start))/1000000, "ms")
	//fmt.Println("Concurrent Test additive", (funcTimes)/1000, "us")

	<-time.After(time.Second * 3)
	cMap.DestructMap()
	<-time.After(time.Second * 2)

	return end - start
}
コード例 #4
0
ファイル: watch_test.go プロジェクト: vsayer/etcd
func testWatchCancelRunning(t *testing.T, wctx *watchctx) {
	ctx, cancel := context.WithCancel(context.Background())
	if wctx.ch = wctx.w.Watch(ctx, "a"); wctx.ch == nil {
		t.Fatalf("expected non-nil watcher channel")
	}
	if _, err := wctx.kv.Put(ctx, "a", "a"); err != nil {
		t.Fatal(err)
	}
	cancel()
	select {
	case <-time.After(time.Second):
		t.Fatalf("took too long to cancel")
	case v, ok := <-wctx.ch:
		if !ok {
			// closed before getting put; OK
			break
		}
		// got the PUT; should close next
		select {
		case <-time.After(time.Second):
			t.Fatalf("took too long to close")
		case v, ok = <-wctx.ch:
			if ok {
				t.Fatalf("expected watcher channel to close, got %v", v)
			}
		}
	}
}
コード例 #5
0
func main() {
	{
		errChan := make(chan error)
		go func() {
			errChan <- nil
		}()
		select {
		case v := <-errChan:
			fmt.Println("even if nil, it still receives", v)
		case <-time.After(time.Second):
			fmt.Println("time-out!")
		}
		// even if nil, it still receives <nil>
	}

	{
		errChan := make(chan error)
		errChan = nil
		go func() {
			errChan <- nil
		}()
		select {
		case v := <-errChan:
			fmt.Println("even if nil, it still receives", v)
		case <-time.After(time.Second):
			fmt.Println("time-out!")
		}
		// time-out!
	}
}
コード例 #6
0
ファイル: tail_test.go プロジェクト: srid/tail
func _TestReSeek(_t *testing.T, poll bool) {
	var name string
	if poll {
		name = "reseek-polling"
	} else {
		name = "reseek-inotify"
	}
	t := NewTailTest(name, _t)
	t.CreateFile("test.txt", "a really long string goes here\nhello\nworld\n")
	tail := t.StartTail(
		"test.txt",
		Config{Follow: true, ReOpen: true, Poll: poll, Location: -1})

	go t.VerifyTailOutput(tail, []string{
		"a really long string goes here", "hello", "world", "h311o", "w0r1d", "endofworld"})

	// truncate now
	<-time.After(100 * time.Millisecond)
	t.TruncateFile("test.txt", "h311o\nw0r1d\nendofworld\n")

	// Delete after a reasonable delay, to give tail sufficient time
	// to read all lines.
	<-time.After(100 * time.Millisecond)
	t.RemoveFile("test.txt")

	// Do not bother with stopping as it could kill the tomb during
	// the reading of data written above. Timings can vary based on
	// test environment.
	// tail.Stop()
}
コード例 #7
0
ファイル: consumer_test.go プロジェクト: adilhafeez/marshal
func (s *ConsumerSuite) TestStrictOrdering(c *C) {
	// Test that we can strict ordering semantics
	s.cn.options.StrictOrdering = true
	s.Produce("test16", 0, "m1", "m2", "m3", "m4")
	s.Produce("test16", 1, "m1", "m2", "m3", "m4")
	c.Assert(s.cn.tryClaimPartition(0), Equals, true)
	c.Assert(s.cn.tryClaimPartition(1), Equals, true)
	c.Assert(s.m.waitForRsteps(4), Equals, 4)

	msg1 := <-s.cn.messages
	c.Assert(msg1.Value, DeepEquals, []byte("m1"))
	msg2 := <-s.cn.messages
	c.Assert(msg2.Value, DeepEquals, []byte("m1"))

	// This should time out (no messages available)
	select {
	case <-s.cn.messages:
		c.Error("Expected timeout, got message.")
	case <-time.After(300 * time.Millisecond):
		// Nothing, this is good.
	}

	// Commit the first message, expect a single new message
	c.Assert(s.cn.Commit(msg1), IsNil)
	msg3 := <-s.cn.messages
	c.Assert(msg3.Value, DeepEquals, []byte("m2"))

	// This should time out (no messages available)
	select {
	case <-s.cn.messages:
		c.Error("Expected timeout, got message.")
	case <-time.After(300 * time.Millisecond):
		// Nothing, this is good.
	}
}
コード例 #8
0
ファイル: corebulk_test.go プロジェクト: jacqui/elastigo
func TestBulkSmallBatch(t *testing.T) {
	InitTests(true)
	c := NewTestConn()

	date := time.Unix(1257894000, 0)
	data := map[string]interface{}{"name": "smurfs", "age": 22, "date": time.Unix(1257894000, 0)}

	// Now tests small batches
	indexer := c.NewBulkIndexer(1)
	indexer.BufferDelayMax = 100 * time.Millisecond
	indexer.BulkMaxDocs = 2
	messageSets = 0
	indexer.Sender = func(buf *bytes.Buffer) error {
		messageSets += 1
		return indexer.Send(buf)
	}
	indexer.Start()
	<-time.After(time.Millisecond * 20)

	indexer.Index("users", "user", "2", "", &date, data, true)
	indexer.Index("users", "user", "3", "", &date, data, true)
	indexer.Index("users", "user", "4", "", &date, data, true)
	<-time.After(time.Millisecond * 200)
	//	indexer.Flush()
	indexer.Stop()
	assert.T(t, messageSets == 2, fmt.Sprintf("Should have sent 2 message sets %d", messageSets))

}
コード例 #9
0
ファイル: pool_test.go プロジェクト: anmic/pg
func (t *PoolTest) TestCloseClosesAllConnections(c *C) {
	ln, err := t.db.Listen("test_channel")
	c.Assert(err, IsNil)

	wait := make(chan struct{}, 2)
	go func() {
		wait <- struct{}{}
		_, _, err := ln.Receive()
		c.Assert(err, ErrorMatches, `^(.*use of closed network connection|EOF)$`)
		wait <- struct{}{}
	}()

	select {
	case <-wait:
		// ok
	case <-time.After(3 * time.Second):
		c.Fatal("timeout")
	}

	c.Assert(t.db.Close(), IsNil)

	select {
	case <-wait:
		// ok
	case <-time.After(3 * time.Second):
		c.Fatal("timeout")
	}

	c.Assert(t.db.Pool().Len(), Equals, 0)
	c.Assert(t.db.Pool().FreeLen(), Equals, 0)
}
コード例 #10
0
ファイル: dht_test.go プロジェクト: djbarber/ipfs-hack
// if minPeers or avgPeers is 0, dont test for it.
func waitForWellFormedTables(t *testing.T, dhts []*IpfsDHT, minPeers, avgPeers int, timeout time.Duration) bool {
	// test "well-formed-ness" (>= minPeers peers in every routing table)

	checkTables := func() bool {
		totalPeers := 0
		for _, dht := range dhts {
			rtlen := dht.routingTable.Size()
			totalPeers += rtlen
			if minPeers > 0 && rtlen < minPeers {
				t.Logf("routing table for %s only has %d peers (should have >%d)", dht.self, rtlen, minPeers)
				return false
			}
		}
		actualAvgPeers := totalPeers / len(dhts)
		t.Logf("avg rt size: %d", actualAvgPeers)
		if avgPeers > 0 && actualAvgPeers < avgPeers {
			t.Logf("avg rt size: %d < %d", actualAvgPeers, avgPeers)
			return false
		}
		return true
	}

	timeoutA := time.After(timeout)
	for {
		select {
		case <-timeoutA:
			log.Debugf("did not reach well-formed routing tables by %s", timeout)
			return false // failed
		case <-time.After(5 * time.Millisecond):
			if checkTables() {
				return true // succeeded
			}
		}
	}
}
コード例 #11
0
ファイル: cluster_test.go プロジェクト: Yogesh9692/mgo
func (s *S) TestCustomDialNew(c *C) {
	dials := make(chan bool, 16)
	dial := func(addr *mgo.ServerAddr) (net.Conn, error) {
		dials <- true
		if addr.TCPAddr().Port == 40012 {
			c.Check(addr.String(), Equals, "localhost:40012")
		}
		return net.DialTCP("tcp", nil, addr.TCPAddr())
	}
	info := mgo.DialInfo{
		Addrs:      []string{"localhost:40012"},
		DialServer: dial,
	}

	// Use hostname here rather than IP, to make things trickier.
	session, err := mgo.DialWithInfo(&info)
	c.Assert(err, IsNil)
	defer session.Close()

	const N = 3
	for i := 0; i < N; i++ {
		select {
		case <-dials:
		case <-time.After(5 * time.Second):
			c.Fatalf("expected %d dials, got %d", N, i)
		}
	}
	select {
	case <-dials:
		c.Fatalf("got more dials than expected")
	case <-time.After(100 * time.Millisecond):
	}
}
コード例 #12
0
ファイル: system.go プロジェクト: yanghongkjxy/go-ipfs
// Run is the main republisher loop
func (np *Republisher) Run() {
	for {
		select {
		case <-np.Publish:
			quick := time.After(np.TimeoutShort)
			longer := time.After(np.TimeoutLong)

		wait:
			var pubnowresp chan struct{}

			select {
			case <-np.ctx.Done():
				return
			case <-np.Publish:
				quick = time.After(np.TimeoutShort)
				goto wait
			case <-quick:
			case <-longer:
			case pubnowresp = <-np.pubnowch:
			}

			err := np.publish(np.ctx)
			if pubnowresp != nil {
				pubnowresp <- struct{}{}
			}
			if err != nil {
				log.Errorf("republishRoot error: %s", err)
			}

		case <-np.ctx.Done():
			return
		}
	}
}
コード例 #13
0
ファイル: periodic_test.go プロジェクト: musha68k/go-ipfs
func testSeqNoWait(t *testing.T, toTest intervalFunc) {
	t.Parallel()

	last := time.Now()
	times := make(chan time.Time, 10)
	wait := make(chan struct{})
	p := toTest(times, wait)

	for i := 0; i < 5; i++ {
		next := <-times
		testBetween(t, 0, next.Sub(last), interval+grace) // min of 0

		<-time.After(interval * 2) // make it wait.
		last = time.Now()          // make it now (sequential)
		wait <- struct{}{}         // release it.
	}

	go p.Close()

end:
	select {
	case wait <- struct{}{}: // drain any extras.
		goto end
	case <-p.Closed():
	case <-time.After(timeout):
		t.Error("proc failed to close")
	}
}
コード例 #14
0
ファイル: ctxio_test.go プロジェクト: andradeandrey/go-ipfs
func TestWritePostCancel(t *testing.T) {
	ctx, cancel := context.WithCancel(context.Background())
	piper, pipew := io.Pipe()
	w := NewWriter(ctx, pipew)

	buf := []byte("abcdefghij")
	buf2 := make([]byte, 10)
	done := make(chan ioret)

	go func() {
		n, err := w.Write(buf)
		done <- ioret{n, err}
	}()

	piper.Read(buf2)

	select {
	case ret := <-done:
		if ret.n != 10 {
			t.Error("ret.n should be 10", ret.n)
		}
		if ret.err != nil {
			t.Error("ret.err should be nil", ret.err)
		}
		if string(buf2) != "abcdefghij" {
			t.Error("write contents differ")
		}
	case <-time.After(20 * time.Millisecond):
		t.Fatal("failed to write")
	}

	go func() {
		n, err := w.Write(buf)
		done <- ioret{n, err}
	}()

	cancel()

	select {
	case ret := <-done:
		if ret.n != 0 {
			t.Error("ret.n should be 0", ret.n)
		}
		if ret.err == nil {
			t.Error("ret.err should be ctx error", ret.err)
		}
	case <-time.After(20 * time.Millisecond):
		t.Fatal("failed to stop writing after cancel")
	}

	copy(buf, []byte("aaaaaaaaaa"))

	piper.Read(buf2)

	if string(buf2) == "aaaaaaaaaa" {
		t.Error("buffer was read from after ctx cancel")
	} else if string(buf2) != "abcdefghij" {
		t.Error("write contents differ from expected")
	}
}
コード例 #15
0
ファイル: tail_test.go プロジェクト: ando-masaki/tail
func TestRateLimiting(_t *testing.T) {
	t := NewTailTest("rate-limiting", _t)
	t.CreateFile("test.txt", "hello\nworld\nagain\nextra\n")
	config := Config{
		Follow:      true,
		RateLimiter: ratelimiter.NewLeakyBucket(2, time.Second)}
	leakybucketFull := "Too much log activity; waiting a second before resuming tailing"
	tail := t.StartTail("test.txt", config)

	// TODO: also verify that tail resumes after the cooloff period.
	go t.VerifyTailOutput(tail, []string{
		"hello", "world", "again",
		leakybucketFull,
		"more", "data",
		leakybucketFull})

	// Add more data only after reasonable delay.
	<-time.After(1200 * time.Millisecond)
	t.AppendFile("test.txt", "more\ndata\n")

	// Delete after a reasonable delay, to give tail sufficient time
	// to read all lines.
	<-time.After(100 * time.Millisecond)
	t.RemoveFile("test.txt")

	// tail.Stop()
	tail.Cleanup()
}
コード例 #16
0
ファイル: terraform_test.go プロジェクト: YuleiXiao/cockroach
// TestFiveNodesAndWriters runs a cluster and one writer per node.
// The test runs until SIGINT is received or the specified duration
// has passed.
func TestFiveNodesAndWriters(t *testing.T) {
	deadline := time.After(*flagDuration)
	f := farmer(t, "write-5n5w")
	defer f.MustDestroy()
	const size = 5
	if err := f.Resize(size, size); err != nil {
		t.Fatal(err)
	}
	if err := f.WaitReady(3 * time.Minute); err != nil {
		t.Fatal(err)
	}
	checkGossip(t, f, longWaitTime, hasPeers(size))

	c := make(chan os.Signal, 1)
	signal.Notify(c, os.Interrupt)
	f.Assert(t)
	for {
		select {
		case <-deadline:
			return
		case <-c:
			return
		case <-time.After(time.Minute):
			f.Assert(t)
		}
	}
}
コード例 #17
0
ファイル: filter_test.go プロジェクト: Pankov404/juju
func (s *FilterSuite) TestServiceDeath(c *gc.C) {
	f, err := filter.NewFilter(s.uniter, s.unit.Tag().(names.UnitTag))
	c.Assert(err, jc.ErrorIsNil)
	defer statetesting.AssertStop(c, f)
	dyingC := s.notifyAsserterC(c, f.UnitDying())
	dyingC.AssertNoReceive()

	err = s.unit.SetAgentStatus(state.StatusIdle, "", nil)
	c.Assert(err, jc.ErrorIsNil)
	err = s.wordpress.Destroy()
	c.Assert(err, jc.ErrorIsNil)

	timeout := time.After(coretesting.LongWait)
loop:
	for {
		select {
		case <-f.UnitDying():
			break loop
		case <-time.After(coretesting.ShortWait):
			s.BackingState.StartSync()
		case <-timeout:
			c.Fatalf("dead not detected")
		}
	}
	err = s.unit.Refresh()
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(s.unit.Life(), gc.Equals, state.Dying)

	// Can't set s.wordpress to Dead while it still has units.
}
コード例 #18
0
ファイル: wsconn.go プロジェクト: ylywyn/v2ray-core
func (ws *wsconn) pingPong() {
	pongRcv := make(chan int, 1)
	ws.wsc.SetPongHandler(func(data string) error {
		pongRcv <- 0
		return nil
	})

	go func() {
		for !ws.connClosing {
			ws.wlock.Lock()
			ws.wsc.WriteMessage(websocket.PingMessage, nil)
			ws.wlock.Unlock()
			tick := time.After(time.Second * 3)

			select {
			case <-pongRcv:
			case <-tick:
				if !ws.connClosing {
					log.Debug("WS:Closing as ping is not responded~" + ws.wsc.UnderlyingConn().LocalAddr().String() + "-" + ws.wsc.UnderlyingConn().RemoteAddr().String())
				}
				ws.Close()
			}
			<-time.After(time.Second * 27)
		}

		return
	}()

}
コード例 #19
0
ファイル: state_test.go プロジェクト: ailispaw/docker
func TestStateTimeoutWait(t *testing.T) {
	s := NewState()
	started := make(chan struct{})
	go func() {
		s.WaitRunning(100 * time.Millisecond)
		close(started)
	}()
	select {
	case <-time.After(200 * time.Millisecond):
		t.Fatal("Start callback doesn't fire in 100 milliseconds")
	case <-started:
		t.Log("Start callback fired")
	}

	s.Lock()
	s.SetRunning(49, false)
	s.Unlock()

	stopped := make(chan struct{})
	go func() {
		s.WaitRunning(100 * time.Millisecond)
		close(stopped)
	}()
	select {
	case <-time.After(200 * time.Millisecond):
		t.Fatal("Start callback doesn't fire in 100 milliseconds")
	case <-stopped:
		t.Log("Start callback fired")
	}

}
コード例 #20
0
ファイル: stopper_test.go プロジェクト: alaypatel07/cockroach
func TestStopper(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := stop.NewStopper()
	running := make(chan struct{})
	waiting := make(chan struct{})
	cleanup := make(chan struct{})

	s.RunWorker(func() {
		<-running
	})

	go func() {
		<-s.ShouldStop()
		select {
		case <-waiting:
			t.Fatal("expected stopper to have blocked")
		case <-time.After(1 * time.Millisecond):
			// Expected.
		}
		close(running)
		select {
		case <-waiting:
			// Success.
		case <-time.After(100 * time.Millisecond):
			t.Fatal("stopper should have finished waiting")
		}
		close(cleanup)
	}()

	s.Stop()
	close(waiting)
	<-cleanup
}
コード例 #21
0
ファイル: watch_test.go プロジェクト: coreroller/coreroller
// default [templates:*go.html compile]
//
// if any go.html changes then run "templates", "compile", "default"
func TestWatchTasksWithoutSrcShouldAlwaysRun(t *testing.T) {
	trace := ""
	pass := 1
	tasks := func(p *Project) {
		p.Task("A", nil, func(*Context) {
			trace += "A"
		})
		p.Task("B", nil, func(*Context) {
			trace += "B"
		})
		p.Task("C", nil, func(*Context) {
			trace += "C"
		}).Src("test/sub/foo.txt")

		p.Task("default", S{"A", "B", "C"}, func(*Context) {
			// on watch, the task is run once before watching
			if pass == 2 {
				p.Exit(0)
			}
			pass++
		})
	}

	go func() {
		execCLI(tasks, []string{"-w"}, nil)
	}()

	<-time.After(testProjectDelay)

	touch("test/sub/foo.txt", 0)

	<-time.After(testWatchDelay)

	assert.Equal(t, "ABCABC", trace)
}
コード例 #22
0
ファイル: stopper_test.go プロジェクト: alaypatel07/cockroach
func TestStopperIsStopped(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := stop.NewStopper()
	bc := newBlockingCloser()
	s.AddCloser(bc)
	go s.Stop()

	select {
	case <-s.ShouldStop():
	case <-time.After(100 * time.Millisecond):
		t.Fatal("stopper should have finished waiting")
	}
	select {
	case <-s.IsStopped():
		t.Fatal("expected blocked closer to prevent stop")
	case <-time.After(1 * time.Millisecond):
		// Expected.
	}
	bc.Unblock()
	select {
	case <-s.IsStopped():
		// Expected
	case <-time.After(100 * time.Millisecond):
		t.Fatal("stopper should have finished stopping")
	}
}
コード例 #23
0
func (clnt *client) Restore(containerID string, options ...CreateOption) error {
	w := clnt.getOrCreateExitNotifier(containerID)
	defer w.close()
	cont, err := clnt.getContainerdContainer(containerID)
	if err == nil && cont.Status != "stopped" {
		clnt.lock(cont.Id)
		container := clnt.newContainer(cont.BundlePath)
		container.systemPid = systemPid(cont)
		clnt.appendContainer(container)
		clnt.unlock(cont.Id)

		if err := clnt.Signal(containerID, int(syscall.SIGTERM)); err != nil {
			logrus.Errorf("error sending sigterm to %v: %v", containerID, err)
		}
		select {
		case <-time.After(10 * time.Second):
			if err := clnt.Signal(containerID, int(syscall.SIGKILL)); err != nil {
				logrus.Errorf("error sending sigkill to %v: %v", containerID, err)
			}
			select {
			case <-time.After(2 * time.Second):
			case <-w.wait():
				return nil
			}
		case <-w.wait():
			return nil
		}
	}
	return clnt.setExited(containerID)
}
コード例 #24
0
ファイル: app.go プロジェクト: Zapelini/tsuru
// Same as AcquireApplicationLock but it keeps trying to acquire the lock
// until timeout is reached.
func AcquireApplicationLockWait(appName string, owner string, reason string, timeout time.Duration) (bool, error) {
	timeoutChan := time.After(timeout)
	conn, err := db.Conn()
	if err != nil {
		return false, err
	}
	defer conn.Close()
	for {
		appLock := AppLock{
			Locked:      true,
			Reason:      reason,
			Owner:       owner,
			AcquireDate: time.Now().In(time.UTC),
		}
		err = conn.Apps().Update(bson.M{"name": appName, "lock.locked": bson.M{"$in": []interface{}{false, nil}}}, bson.M{"$set": bson.M{"lock": appLock}})
		if err == nil {
			return true, nil
		}
		if err != mgo.ErrNotFound {
			return false, err
		}
		select {
		case <-timeoutChan:
			return false, nil
		case <-time.After(300 * time.Millisecond):
		}
	}
}
コード例 #25
0
ファイル: consul_store_test.go プロジェクト: drcapulet/p2
func TestPublishQuitsOnInChannelCloseBeforeData(t *testing.T) {
	inCh := make(chan api.KVPairs)
	quitCh := make(chan struct{})
	defer close(quitCh)

	outCh, errCh := publishLatestRCs(inCh, quitCh)
	close(inCh)

	select {
	case _, ok := <-outCh:
		if ok {
			t.Fatalf("outCh should have closed since inCh closed")
		}
	case <-time.After(1 * time.Second):
		t.Fatalf("Timed out waiting for outCh to close")
	}

	select {
	case _, ok := <-errCh:
		if ok {
			t.Fatalf("errCh should have closed since inCh closed")
		}
	case <-time.After(1 * time.Second):
		t.Fatalf("Timed out waiting for errCh to close")
	}
}
コード例 #26
0
ファイル: cluster_test.go プロジェクト: cespare/prat
func (s *S) TestCustomDial(c *C) {
	dials := make(chan bool, 16)
	dial := func(addr net.Addr) (net.Conn, error) {
		tcpaddr, ok := addr.(*net.TCPAddr)
		if !ok {
			return nil, fmt.Errorf("unexpected address type: %T", addr)
		}
		dials <- true
		return net.DialTCP("tcp", nil, tcpaddr)
	}
	info := mgo.DialInfo{
		Addrs: []string{"localhost:40012"},
		Dial:  dial,
	}

	// Use hostname here rather than IP, to make things trickier.
	session, err := mgo.DialWithInfo(&info)
	c.Assert(err, IsNil)
	defer session.Close()

	const N = 3
	for i := 0; i < N; i++ {
		select {
		case <-dials:
		case <-time.After(5 * time.Second):
			c.Fatalf("expected %d dials, got %d", N, i)
		}
	}
	select {
	case <-dials:
		c.Fatalf("got more dials than expected")
	case <-time.After(100 * time.Millisecond):
	}
}
コード例 #27
0
ファイル: step_run.go プロジェクト: vvchik/packer
func (s *StepRun) Run(state multistep.StateBag) multistep.StepAction {
	driver := state.Get("driver").(Driver)
	ui := state.Get("ui").(packer.Ui)
	vmName := state.Get("vmName").(string)

	ui.Say("Starting the virtual machine...")

	err := driver.Start(vmName)
	if err != nil {
		err := fmt.Errorf("Error starting vm: %s", err)
		state.Put("error", err)
		ui.Error(err.Error())
		return multistep.ActionHalt
	}

	s.vmName = vmName

	if int64(s.BootWait) > 0 {
		ui.Say(fmt.Sprintf("Waiting %s for boot...", s.BootWait))
		wait := time.After(s.BootWait)
	WAITLOOP:
		for {
			select {
			case <-wait:
				break WAITLOOP
			case <-time.After(1 * time.Second):
				if _, ok := state.GetOk(multistep.StateCancelled); ok {
					return multistep.ActionHalt
				}
			}
		}
	}

	return multistep.ActionContinue
}
コード例 #28
0
ファイル: dpg_test.go プロジェクト: alLi1n/djdns
func TestDejePageGetter_getDoc_logging(t *testing.T) {
	buf := new(bytes.Buffer)
	pg := NewDejePageGetter(buf)
	_, topic, closer, _, dumb := setup_deje_env(t)
	defer closer()

	// Use fresh log, so hanger-on client has no access
	buf = new(bytes.Buffer)
	pg.writer = buf

	// Create client, wait for it to connect
	_, err := pg.getDoc(topic)
	if err != nil {
		t.Fatal(err)
	}
	<-time.After(50 * time.Millisecond)

	// Put something in the log, and wait for it to broadcast
	if err = dumb.Publish("foo"); err != nil {
		t.Fatal(err)
	}
	<-time.After(50 * time.Millisecond)

	heading := "client '" + topic + "': "
	expected_log := heading + "Tip is nil\n" +
		heading + "Non-{} message\n"
	got_log := buf.String()
	if got_log != expected_log {
		t.Fatalf("Log content was wrong.\nexp: '%s'\ngot: '%s'", expected_log, got_log)
	}

}
コード例 #29
0
ファイル: GoTimeout.go プロジェクト: caesarjuming/GoLearn
func main() {
	c1 := make(chan string, 3)
	c2 := make(chan string, 3)

	go func() {
		time.Sleep(time.Second * 2)
		c1 <- "c1 resource"
	}()

	select {
	case msg := <-c1:
		fmt.Println(msg)
	case <-time.After(time.Second * 2):
		fmt.Println("timeout 2")
	}

	go func() {
		time.Sleep(time.Second * 1)
		c2 <- "c2 resource"
	}()

	select {
	case msg := <-c2:
		fmt.Println(msg)
	case <-time.After(time.Second * 3):
		fmt.Println("timeout 3")
	}

}
コード例 #30
0
ファイル: end2end_test.go プロジェクト: ELMERzark/grpc-go
func testExceedMaxStreamsLimit(t *testing.T, e env) {
	// Only allows 1 live stream per server transport.
	s, cc := setUp(nil, 1, "", e)
	tc := testpb.NewTestServiceClient(cc)
	defer tearDown(s, cc)
	done := make(chan struct{})
	ch := make(chan int)
	go func() {
		for {
			select {
			case <-time.After(5 * time.Millisecond):
				ch <- 0
			case <-time.After(5 * time.Second):
				close(done)
				return
			}
		}
	}()
	// Loop until a stream creation hangs due to the new max stream setting.
	for {
		select {
		case <-ch:
			ctx, _ := context.WithTimeout(context.Background(), time.Second)
			if _, err := tc.StreamingInputCall(ctx); err != nil {
				if grpc.Code(err) == codes.DeadlineExceeded {
					return
				}
				t.Fatalf("%v.StreamingInputCall(_) = %v, want <nil>", tc, err)
			}
		case <-done:
			t.Fatalf("Client has not received the max stream setting in 5 seconds.")
		}
	}
}