示例#1
1
文件: block_test.go 项目: twmb/dash
func BenchmarkRWMutexR1(b *testing.B) {
	var mtx sync.RWMutex
	for i := 0; i < b.N; i++ {
		mtx.RLock()
		mtx.RUnlock()
	}
}
示例#2
0
func TestStream_Open_WithOp(t *testing.T) {
	src := newStrSrc([]string{"HELLO", "WORLD", "HOW", "ARE", "YOU"})
	snk := newStrSink()
	op1 := api.UnFunc(func(ctx context.Context, data interface{}) interface{} {
		str := data.(string)
		return len(str)
	})

	var m sync.RWMutex
	runeCount := 0
	op2 := api.UnFunc(func(ctx context.Context, data interface{}) interface{} {
		length := data.(int)
		m.Lock()
		runeCount += length
		m.Unlock()
		return nil
	})

	strm := New().From(src).Transform(op1).Transform(op2).To(snk)
	select {
	case err := <-strm.Open():
		if err != nil {
			t.Fatal(err)
		}
	case <-time.After(50 * time.Millisecond):
		t.Fatal("Waited too long ...")
	}
	m.RLock()
	if runeCount != 19 {
		t.Fatal("Data not streaming, runeCount 19, got ", runeCount)
	}
	m.RUnlock()
}
示例#3
0
func main() {
	var l *sync.RWMutex
	l = new(sync.RWMutex)
	l.RUnlock()
	fmt.Println("l")
	l.RLock()
}
示例#4
0
func Task(f func(in Receiver, out Sender)) Sender {
	var running bool
	var l sync.RWMutex
	inR, inW := Pipe()
	outR, outW := Pipe()
	obj := NewServer()
	obj.OnAttach(Handler(func(msg *Message) error {
		msg.Ret.Send(&Message{Verb: Ack, Ret: inW})
		fmt.Printf("copying task output from %#v to %#v\n", outR, msg.Ret)
		defer fmt.Printf("(DONE) copying task output from %#v to %#v\n", outR, msg.Ret)
		Copy(msg.Ret, outR)
		return nil
	}))
	obj.OnStart(Handler(func(msg *Message) error {
		l.RLock()
		r := running
		l.RUnlock()
		if r {
			return fmt.Errorf("already running")
		}
		l.Lock()
		go f(inR, outW)
		running = true
		l.Unlock()
		msg.Ret.Send(&Message{Verb: Ack})
		return nil
	}))
	return obj
}
示例#5
0
func TestSendReceiveWithWait(t *testing.T) {
	conn := NewConn()
	conn.ReceiveWait = true

	conn.Command("HGETALL", "person:1").ExpectMap(map[string]string{
		"name": "Mr. Johson",
		"age":  "42",
	})
	conn.Command("HGETALL", "person:2").ExpectMap(map[string]string{
		"name": "Ms. Jennifer",
		"age":  "28",
	})

	ids := []string{"1", "2"}
	for _, id := range ids {
		conn.Send("HGETALL", fmt.Sprintf("person:%s", id))
	}

	var people []Person
	var peopleLock sync.RWMutex

	go func() {
		for i := 0; i < len(ids); i++ {
			values, err := redis.Values(conn.Receive())
			if err != nil {
				t.Fatal(err)
			}

			var person Person
			err = redis.ScanStruct(values, &person)
			if err != nil {
				t.Fatal(err)
			}

			peopleLock.Lock()
			people = append(people, person)
			peopleLock.Unlock()
		}
	}()

	for i := 0; i < len(ids); i++ {
		conn.ReceiveNow <- true
	}
	time.Sleep(10 * time.Millisecond)

	peopleLock.RLock()
	defer peopleLock.RUnlock()

	if len(people) != 2 {
		t.Fatalf("Wrong number of people. Expected '2' and got '%d'", len(people))
	}

	if people[0].Name != "Mr. Johson" || people[1].Name != "Ms. Jennifer" {
		t.Error("People name order are wrong")
	}

	if people[0].Age != 42 || people[1].Age != 28 {
		t.Error("People age order are wrong")
	}
}
示例#6
0
// Readers-Writers
func rwlockExample() {
	runtime.GOMAXPROCS(runtime.NumCPU())
	// rand.Seed(time.Now().UnixNano())
	// rand.Seed(0)
	l := new(sync.RWMutex)
	wg := new(sync.WaitGroup)
	for i := 0; i < 10; i++ {
		wg.Add(1)
		go func(i int) {
			r := rand.Intn(10)
			time.Sleep(50 * time.Millisecond)
			if r < 5 {
				// I am reader.
				fmt.Printf("Reader waiting %d\n", i)
				l.RLock()
				fmt.Printf("go reader %d\n", i)
				l.RUnlock()
				wg.Done()
			} else {
				// I am writer
				fmt.Printf("Writer waiting %d\n", i)
				l.Lock()
				fmt.Printf("go writer %d\n", i)
				time.Sleep(50 * time.Millisecond)
				l.Unlock()
				wg.Done()
			}
		}(i)
	}
	wg.Wait()
}
示例#7
0
//GetCache checks  nodes in lookuptable have the cache.
//if found gets records.
func GetCache(background bool, c *thread.Cache) bool {
	const searchDepth = 5 // Search node size
	ns := manager.NodesForGet(c.Datfile, searchDepth)
	found := false
	var wg sync.WaitGroup
	var mutex sync.RWMutex
	dm := NewManger(c)
	for _, n := range ns {
		wg.Add(1)
		go func(n *node.Node) {
			defer wg.Done()
			if !headWithRange(n, c, dm) {
				return
			}
			if getWithRange(n, c, dm) {
				mutex.Lock()
				found = true
				mutex.Unlock()
				return
			}
		}(n)
	}
	if background {
		bg(c, &wg)
	} else {
		wg.Wait()
	}
	mutex.RLock()
	defer mutex.RUnlock()
	return found
}
示例#8
0
func TestNoRaceRWMutexMultipleReaders(t *testing.T) {
	var mu sync.RWMutex
	x := int64(0)
	ch := make(chan bool, 3)
	go func() {
		mu.Lock()
		defer mu.Unlock()
		x = 2
		ch <- true
	}()
	go func() {
		mu.RLock()
		y := x + 1
		_ = y
		mu.RUnlock()
		ch <- true
	}()
	go func() {
		mu.RLock()
		y := x + 2
		_ = y
		mu.RUnlock()
		ch <- true
	}()
	<-ch
	<-ch
	<-ch
}
示例#9
0
func TestRaceRWMutexMultipleReaders(t *testing.T) {
	var mu sync.RWMutex
	var x, y int64 = 0, 1
	ch := make(chan bool, 3)
	go func() {
		mu.Lock()
		defer mu.Unlock()
		x = 2
		ch <- true
	}()
	go func() {
		mu.RLock()
		y = x + 1
		mu.RUnlock()
		ch <- true
	}()
	go func() {
		mu.RLock()
		y = x + 2
		mu.RUnlock()
		ch <- true
	}()
	<-ch
	<-ch
	<-ch
	_ = y
}
示例#10
0
func (lm *lockManager) UnlockPolicy(lock *sync.RWMutex, lockType bool) {
	if lockType == exclusive {
		lock.Unlock()
	} else {
		lock.RUnlock()
	}
}
示例#11
0
func BenchmarkRWMutexRLock(b *testing.B) {
	var l sync.RWMutex
	for i := 0; i < b.N; i++ {
		l.RLock()
		l.RUnlock()
	}
}
示例#12
0
func compileVariableNode(node *parse.VariableNode, dotType reflect.Type, args []parse.Node, finalType reflect.Type) (cmd command, retType reflect.Type) {
	var mu sync.RWMutex
	type key struct {
		dotType, finalType reflect.Type
	}
	cache := make(map[key]command)

	name := node.Ident[0]
	cmd = func(s state, dot, final interface{}) interface{} {
		value := s.varValue(name)
		if len(node.Ident) == 1 {
			return value.Interface()
		}

		if dotType == nil {
			dotType = reflect.ValueOf(dot).Type()
		}
		if finalType == nil && final != nil {
			finalType = reflect.ValueOf(final).Type()
		}
		k := key{dotType, finalType}

		mu.RLock()
		f, exist := cache[k]
		mu.RUnlock()
		if !exist {
			f, _ = compileFieldChain(node.Ident[1:], dotType, args, finalType)
			mu.Lock()
			cache[k] = f
			mu.Unlock()
		}
		return f(s, dot, value)
	}
	return
}
示例#13
0
// Returns an Action that runs the original Action when there is no cached value.
// The cached value is unset after the given ttl (time to live) duration.
// A negative ttl will permanently cache
func (a Action) cache(ttl time.Duration) Action {
	var data map[string]interface{}
	lock := sync.RWMutex{}
	return func(r *http.Request) (map[string]interface{}, error) {
		lock.RLock()
		if data != nil {
			lock.RUnlock()
			return data, nil
		}
		lock.RUnlock()

		lock.Lock()
		defer lock.Unlock()
		var err error
		data, err = a(r)
		if data != nil {
			if ttl > 0 {
				time.AfterFunc(ttl, func() {
					lock.Lock()
					data = nil
					lock.Unlock()
				})
			}
		}
		return data, err
	}
}
示例#14
0
func compileFieldDynamic(name string, args []parse.Node) lookupFn {
	type key struct {
		valueType reflect.Type
		finalType reflect.Type
	}

	var m sync.RWMutex
	cache := make(map[key]lookupFn)
	return func(s state, value reflect.Value, final interface{}) reflect.Value {
		valueType := value.Elem().Type()
		var finalType reflect.Type
		if v := reflect.ValueOf(final); v.IsValid() {
			finalType = v.Type()
		}

		k := key{valueType, finalType}
		m.RLock()
		f, exist := cache[k]
		m.RUnlock()
		if !exist {
			f, _ = compileField(valueType, name, args, finalType)
			m.Lock()
			cache[k] = f
			m.Unlock()
		}
		return f(s, value.Elem(), final)
	}
}
示例#15
0
func main() {
	Case := Init(6)
	fmt.Printf("DEADLOCK\n")

	c := make(chan int)
	var mu sync.RWMutex
	done := make(chan bool)
	go func() {
		time.Sleep(sleep3[Case][0])
		mu.RLock()
		c <- 1
		mu.RUnlock()
		done <- true
	}()
	go func() {
		time.Sleep(sleep3[Case][1])
		mu.RLock()
		<-c
		mu.RUnlock()
		done <- true
	}()
	time.Sleep(sleep3[Case][2])
	mu.Lock()
	mu.Unlock()
	<-done
	<-done
}
示例#16
0
//读文件
func (helper *FileHelper) ReadFile(filename string) (string, error) {
	lock := new(sync.RWMutex)
	lock.RLock()

	var content string
	file, err := os.Open(filename)

	if err != nil {
		fmt.Println("file open error:", err)
		return content, err
	}

	reader := bufio.NewReader(file)
	for {
		line, _, err := reader.ReadLine()
		if err != nil {
			break
		}

		content += string(line) + ","
	}

	content = strings.TrimRight(content, ",")
	file.Close()
	lock.RUnlock()

	fmt.Println(content)
	return content, err
}
示例#17
0
func main() {
	Case := Init(6)
	fmt.Printf("DEADLOCK\n")

	c := make(chan int)
	var mu sync.RWMutex
	done := make(chan bool)
	go func() {
		time.Sleep(sleep3[Case][0])
		mu.RLock()
		c <- 1
		mu.RUnlock()
		done <- true
	}()
	go func() {
		time.Sleep(sleep3[Case][1])
		mu.RLock()
	loop:
		for {
			select {
			case <-c:
				break loop
			case <-time.After(time.Millisecond):
			}
		}
		mu.RUnlock()
		done <- true
	}()
	time.Sleep(sleep3[Case][2])
	mu.Lock()
	mu.Unlock()
	<-done
	<-done
}
示例#18
0
// verifies the cacheWatcher.process goroutine is properly cleaned up even if
// the writes to cacheWatcher.result channel is blocked.
func TestCacheWatcherCleanupNotBlockedByResult(t *testing.T) {
	var lock sync.RWMutex
	count := 0
	filter := func(string, labels.Set, fields.Set) bool { return true }
	forget := func(bool) {
		lock.Lock()
		defer lock.Unlock()
		count++
	}
	initEvents := []watchCacheEvent{
		{Object: &api.Pod{}},
		{Object: &api.Pod{}},
	}
	// set the size of the buffer of w.result to 0, so that the writes to
	// w.result is blocked.
	w := newCacheWatcher(0, 0, initEvents, filter, forget)
	w.Stop()
	if err := wait.PollImmediate(1*time.Second, 5*time.Second, func() (bool, error) {
		lock.RLock()
		defer lock.RUnlock()
		return count == 2, nil
	}); err != nil {
		t.Fatalf("expected forget() to be called twice, because sendWatchCacheEvent should not be blocked by the result channel: %v", err)
	}
}
示例#19
0
// Serve starts an endless loop that reads FTP commands from the client and
// responds appropriately. terminated is a channel that will receive a true
// message when the connection closes. This loop will be running inside a
// goroutine, so use this channel to be notified when the connection can be
// cleaned up.
func (ftpConn *ftpConn) Serve() {
	defer func() {
		if closer, ok := ftpConn.driver.(io.Closer); ok {
			closer.Close()
		}
	}()

	ftpConn.logger.Printf("Connection Established (%s)", ftpConn.conn.RemoteAddr())
	// send welcome
	ftpConn.writeMessage(220, ftpConn.serverName)
	// read commands

	var readMutex sync.RWMutex

	cmdCh := make(chan *BoundCommand, 0)

	go func() {
		defer func() {
			if r := recover(); r != nil {
				ftpConn.logger.Printf("Recovered in ftpConn Serve: %s", r)
			}
			ftpConn.Close()
			close(cmdCh)
		}()

		for {
			readMutex.RLock()
			line, err := ftpConn.controlReader.ReadString('\n')
			readMutex.RUnlock()
			if err != nil {
				return
			} else {
				cmdObj := ftpConn.receiveLine(line)
				if cmdObj != nil {
					if !cmdObj.CmdObj.Async() {
						readMutex.Lock()
					}
					select {
					case cmdCh <- cmdObj:
						continue
					case _ = <-time.After(10 * time.Second):
						return
					}
				}

			}
		}
	}()

	for cmd := range cmdCh {
		cmd.CmdObj.Execute(ftpConn, cmd.Param)

		if !cmd.CmdObj.Async() {
			readMutex.Unlock()
		}
	}

	ftpConn.logger.Print("Connection Terminated")
}
示例#20
0
func BenchmarkConcurrentRWMutex(b *testing.B) {
	var mu sync.RWMutex
	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			mu.RLock()
			mu.RUnlock()
		}
	})
}
func (con FeedUpdateNotificator) Handler(c context.Context) http.HandlerFunc {
	var mutex sync.RWMutex

	receivers := make(map[chan readeef.Feed]bool)

	go func() {
		for {
			select {
			case feed := <-con.updateFeed:
				mutex.RLock()

				readeef.Debug.Printf("Feed %s updated. Notifying %d receivers.", feed.Link, len(receivers))
				for receiver, _ := range receivers {
					receiver <- feed
				}

				mutex.RUnlock()
			}
		}
	}()

	return func(w http.ResponseWriter, r *http.Request) {
		var err error

		receiver := make(chan readeef.Feed)

		mutex.Lock()
		receivers[receiver] = true
		mutex.Unlock()
		defer func() {
			mutex.Lock()
			delete(receivers, receiver)
			mutex.Unlock()
		}()

		f := <-receiver
		readeef.Debug.Println("Feed " + f.Link + " updated")

		resp := map[string]interface{}{"Feed": feed{
			Id: f.Id, Title: f.Title, Description: f.Description,
			Link: f.Link, Image: f.Image,
		}}

		var b []byte
		if err == nil {
			b, err = json.Marshal(resp)
		}
		if err != nil {
			webfw.GetLogger(c).Print(err)

			w.WriteHeader(http.StatusInternalServerError)
			return
		}

		w.Write(b)
	}
}
示例#22
0
文件: cache.go 项目: pastebt/go_play
func main() {
	m := new(sync.RWMutex)
	println("new RWMutex")
	m.RLock()
	println("got RLock")
	m.RUnlock()
	m.Lock()
	println("Got Lock")
}
示例#23
0
// discoverAndPollChromecasts runs an infinite loop, discovering and polling
// chromecast devices in the local network for whether they are currently
// playing. The first discovered device is used.
func discoverAndPollChromecasts() {
	var chromecastsMu sync.RWMutex
	chromecasts := make(map[string]chan bool)
	entriesCh := make(chan *mdns.ServiceEntry, 5)

	go mdnsLookup(entriesCh)
	for {
		select {
		case entry := <-entriesCh:
			if !strings.Contains(entry.Name, castService) {
				continue
			}

			var deviceType chromecastDevice
			for _, field := range entry.InfoFields {
				if !strings.HasPrefix(field, "md=") {
					continue
				}
				if field == "md=Chromecast" {
					deviceType = chromecast
				} else if field == "md=Chromecast Audio" {
					deviceType = chromecastAudio
				}
			}
			hostport := fmt.Sprintf("%s:%d", entry.Addr, entry.Port)
			chromecastsMu.RLock()
			_, exists := chromecasts[hostport]
			chromecastsMu.RUnlock()
			if exists {
				continue
			}
			fmt.Printf("Found new chromecast at %q: %+v\n", hostport, entry)
			done := make(chan bool)
			chromecastsMu.Lock()
			chromecasts[hostport] = done
			chromecastsMu.Unlock()
			go func(deviceType chromecastDevice, done chan bool, hostport string) {
				err := pollChromecast(deviceType, done, hostport)
				if err != nil {
					log.Printf("Error polling chromecast %s:%d: %v\n", entry.Addr, entry.Port, err)
				}
				chromecastsMu.Lock()
				delete(chromecasts, hostport)
				chromecastsMu.Unlock()
			}(deviceType, done, hostport)
			stateMu.Lock()
			state.chromecastPlaying = false
			stateMu.Unlock()
			stateChanged.Broadcast()

		case <-time.After(10 * time.Second):
			log.Printf("Starting new MDNS lookup\n")
			go mdnsLookup(entriesCh)
		}
	}
}
示例#24
0
func BenchmarkStreamOp_Exec(b *testing.B) {
	ctx := context.Background()
	o := NewStreamOp(ctx)
	N := b.N

	chanSize := func() int {
		if N == 1 {
			return N
		}
		return int(float64(0.5) * float64(N))
	}()

	in := make(chan interface{}, chanSize)
	o.SetInput(in)
	go func() {
		for i := 0; i < N; i++ {
			in <- []string{
				testutil.GenWord(),
				testutil.GenWord(),
				testutil.GenWord(),
			}
		}
		close(in)
	}()

	counter := 0
	expected := N * 3
	var m sync.RWMutex

	// process output
	done := make(chan struct{})
	go func() {
		defer close(done)
		for _ = range o.GetOutput() {
			m.Lock()
			counter++
			m.Unlock()
		}
	}()

	if err := o.Exec(); err != nil {
		b.Fatal("Error during execution:", err)
	}

	select {
	case <-done:
	case <-time.After(time.Second * 60):
		b.Fatal("Took too long")
	}
	m.RLock()
	b.Logf("Input %d, counted %d", N, counter)
	if counter != expected {
		b.Fatalf("Expected %d items processed,  got %d", expected, counter)
	}
	m.RUnlock()
}
示例#25
0
文件: block_test.go 项目: twmb/dash
func BenchmarkRWMutexR(b *testing.B) {
	b.SetParallelism(10)
	var mtx sync.RWMutex
	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			mtx.RLock()
			mtx.RUnlock()
		}
	})
}
示例#26
0
func (p *genericPacketTransport) acceptConnections(clientMap map[string]*genericPacketClient,
	clientMapLock sync.RWMutex) {

	log.Println("Started accepting clients")

	var pkt [65535]byte
	var n int
	var err error
	var addr net.Addr

	for {
		n, addr, err = p.conn.ReadFrom(pkt[:])
		if err != nil {
			log.Printf("Error reading new packet: %s\n")
		}
		log.Printf("Got packet of length %d\n", n)

		clientMapLock.RLock()
		client, found := clientMap[addr.String()]
		clientMapLock.RUnlock()

		if !found {
			log.Printf("Got new client: %s\n", addr)

			// We create a new client that will proxy all writes to this
			// transport's underlying send channel.
			recv_ch := make(chan []byte)
			send_ch := make(chan []byte)

			// When the client is closed, it needs to remove itself from the map.
			onClose := func() {
				clientMapLock.Lock()
				delete(clientMap, addr.String())
				clientMapLock.Unlock()
			}

			client = &genericPacketClient{
				send_ch, recv_ch,
				nil, addr,
				"host", p.network,
				onClose,
			}
			go client.startAsServerConn(p.send_ch)

			clientMapLock.Lock()
			clientMap[addr.String()] = client
			clientMapLock.Unlock()

			p.accept_ch <- client
		}

		client.RecvChannel() <- pkt[0:n]
	}
}
示例#27
0
func hasVisitedPage(pms []*PageMap, m *sync.RWMutex, u *url.URL) bool {
	m.RLock()
	defer m.RUnlock()

	for _, pm := range pms {
		if pm.URL.String() == u.String() {
			return true
		}
	}
	return false
}
func BenchmarkMapGet(b *testing.B) {
	b.StopTimer()
	cache := map[string]string{"foo": "bar"}
	mu := sync.RWMutex{}
	b.StartTimer()
	for i := 0; i < b.N; i++ {
		mu.RLock()
		_, _ = cache["foo"]
		mu.RUnlock()
	}
}
示例#29
0
func ConcurrentInterpret(mutex *sync.RWMutex, actives map[string]*LabelNode, rule []string) bool {
	mutex.RLock()
	for i := range rule {
		if actives[rule[i]] == nil {
			mutex.RUnlock()
			return false
		}
	}
	mutex.RUnlock()
	return true

}
示例#30
0
func roLock(mu *sync.RWMutex) chan<- bool {
	unlock := make(chan bool)
	mu.RLock()

	go func() {
		<-unlock

		mu.RUnlock()
	}()

	return unlock
}