func TestStream_Open_WithOp(t *testing.T) { src := newStrSrc([]string{"HELLO", "WORLD", "HOW", "ARE", "YOU"}) snk := newStrSink() op1 := api.UnFunc(func(ctx context.Context, data interface{}) interface{} { str := data.(string) return len(str) }) var m sync.RWMutex runeCount := 0 op2 := api.UnFunc(func(ctx context.Context, data interface{}) interface{} { length := data.(int) m.Lock() runeCount += length m.Unlock() return nil }) strm := New().From(src).Transform(op1).Transform(op2).To(snk) select { case err := <-strm.Open(): if err != nil { t.Fatal(err) } case <-time.After(50 * time.Millisecond): t.Fatal("Waited too long ...") } m.RLock() if runeCount != 19 { t.Fatal("Data not streaming, runeCount 19, got ", runeCount) } m.RUnlock() }
func startJob(c *Context, w http.ResponseWriter, r *http.Request) { var mu sync.RWMutex mu.Lock() defer mu.Unlock() var job Job err := json.NewDecoder(r.Body).Decode(&job) if err != nil { w.Header().Set("Content-Type", "application/json; charset=UTF-8") w.WriteHeader(http.StatusInternalServerError) log.Printf("Error: %s", err) return } job.State = PENDING log.Printf("Submitting Job %s", job.Id) err = c.store.AddJob(&job) if err != nil { if serr, ok := err.(*StoreError); ok { if serr.Code == ErrExists { w.Header().Set("Content-Type", "application/json; charset=UTF-8") w.WriteHeader(http.StatusNotModified) return } } log.Printf("Could not store job %s: %s", job.Id, err) w.Header().Set("Content-Type", "application/json; charset=UTF-8") w.WriteHeader(http.StatusInternalServerError) return } w.Header().Set("Content-Type", "application/json; charset=UTF-8") w.WriteHeader(http.StatusOK) }
func Task(f func(in Receiver, out Sender)) Sender { var running bool var l sync.RWMutex inR, inW := Pipe() outR, outW := Pipe() obj := NewServer() obj.OnAttach(Handler(func(msg *Message) error { msg.Ret.Send(&Message{Verb: Ack, Ret: inW}) fmt.Printf("copying task output from %#v to %#v\n", outR, msg.Ret) defer fmt.Printf("(DONE) copying task output from %#v to %#v\n", outR, msg.Ret) Copy(msg.Ret, outR) return nil })) obj.OnStart(Handler(func(msg *Message) error { l.RLock() r := running l.RUnlock() if r { return fmt.Errorf("already running") } l.Lock() go f(inR, outW) running = true l.Unlock() msg.Ret.Send(&Message{Verb: Ack}) return nil })) return obj }
func TestSendReceiveWithWait(t *testing.T) { conn := NewConn() conn.ReceiveWait = true conn.Command("HGETALL", "person:1").ExpectMap(map[string]string{ "name": "Mr. Johson", "age": "42", }) conn.Command("HGETALL", "person:2").ExpectMap(map[string]string{ "name": "Ms. Jennifer", "age": "28", }) ids := []string{"1", "2"} for _, id := range ids { conn.Send("HGETALL", fmt.Sprintf("person:%s", id)) } var people []Person var peopleLock sync.RWMutex go func() { for i := 0; i < len(ids); i++ { values, err := redis.Values(conn.Receive()) if err != nil { t.Fatal(err) } var person Person err = redis.ScanStruct(values, &person) if err != nil { t.Fatal(err) } peopleLock.Lock() people = append(people, person) peopleLock.Unlock() } }() for i := 0; i < len(ids); i++ { conn.ReceiveNow <- true } time.Sleep(10 * time.Millisecond) peopleLock.RLock() defer peopleLock.RUnlock() if len(people) != 2 { t.Fatalf("Wrong number of people. Expected '2' and got '%d'", len(people)) } if people[0].Name != "Mr. Johson" || people[1].Name != "Ms. Jennifer" { t.Error("People name order are wrong") } if people[0].Age != 42 || people[1].Age != 28 { t.Error("People age order are wrong") } }
func compileFieldDynamic(name string, args []parse.Node) lookupFn { type key struct { valueType reflect.Type finalType reflect.Type } var m sync.RWMutex cache := make(map[key]lookupFn) return func(s state, value reflect.Value, final interface{}) reflect.Value { valueType := value.Elem().Type() var finalType reflect.Type if v := reflect.ValueOf(final); v.IsValid() { finalType = v.Type() } k := key{valueType, finalType} m.RLock() f, exist := cache[k] m.RUnlock() if !exist { f, _ = compileField(valueType, name, args, finalType) m.Lock() cache[k] = f m.Unlock() } return f(s, value.Elem(), final) } }
func main() { Case := Init(6) fmt.Printf("DEADLOCK\n") c := make(chan int) var mu sync.RWMutex done := make(chan bool) go func() { time.Sleep(sleep3[Case][0]) mu.RLock() c <- 1 mu.RUnlock() done <- true }() go func() { time.Sleep(sleep3[Case][1]) mu.RLock() <-c mu.RUnlock() done <- true }() time.Sleep(sleep3[Case][2]) mu.Lock() mu.Unlock() <-done <-done }
func compileVariableNode(node *parse.VariableNode, dotType reflect.Type, args []parse.Node, finalType reflect.Type) (cmd command, retType reflect.Type) { var mu sync.RWMutex type key struct { dotType, finalType reflect.Type } cache := make(map[key]command) name := node.Ident[0] cmd = func(s state, dot, final interface{}) interface{} { value := s.varValue(name) if len(node.Ident) == 1 { return value.Interface() } if dotType == nil { dotType = reflect.ValueOf(dot).Type() } if finalType == nil && final != nil { finalType = reflect.ValueOf(final).Type() } k := key{dotType, finalType} mu.RLock() f, exist := cache[k] mu.RUnlock() if !exist { f, _ = compileFieldChain(node.Ident[1:], dotType, args, finalType) mu.Lock() cache[k] = f mu.Unlock() } return f(s, dot, value) } return }
//GetCache checks nodes in lookuptable have the cache. //if found gets records. func GetCache(background bool, c *thread.Cache) bool { const searchDepth = 5 // Search node size ns := manager.NodesForGet(c.Datfile, searchDepth) found := false var wg sync.WaitGroup var mutex sync.RWMutex dm := NewManger(c) for _, n := range ns { wg.Add(1) go func(n *node.Node) { defer wg.Done() if !headWithRange(n, c, dm) { return } if getWithRange(n, c, dm) { mutex.Lock() found = true mutex.Unlock() return } }(n) } if background { bg(c, &wg) } else { wg.Wait() } mutex.RLock() defer mutex.RUnlock() return found }
func (lm *lockManager) UnlockPolicy(lock *sync.RWMutex, lockType bool) { if lockType == exclusive { lock.Unlock() } else { lock.RUnlock() } }
func TestNoRaceRWMutexMultipleReaders(t *testing.T) { var mu sync.RWMutex x := int64(0) ch := make(chan bool, 3) go func() { mu.Lock() defer mu.Unlock() x = 2 ch <- true }() go func() { mu.RLock() y := x + 1 _ = y mu.RUnlock() ch <- true }() go func() { mu.RLock() y := x + 2 _ = y mu.RUnlock() ch <- true }() <-ch <-ch <-ch }
func TestRaceRWMutexMultipleReaders(t *testing.T) { var mu sync.RWMutex var x, y int64 = 0, 1 ch := make(chan bool, 3) go func() { mu.Lock() defer mu.Unlock() x = 2 ch <- true }() go func() { mu.RLock() y = x + 1 mu.RUnlock() ch <- true }() go func() { mu.RLock() y = x + 2 mu.RUnlock() ch <- true }() <-ch <-ch <-ch _ = y }
// verifies the cacheWatcher.process goroutine is properly cleaned up even if // the writes to cacheWatcher.result channel is blocked. func TestCacheWatcherCleanupNotBlockedByResult(t *testing.T) { var lock sync.RWMutex count := 0 filter := func(string, labels.Set, fields.Set) bool { return true } forget := func(bool) { lock.Lock() defer lock.Unlock() count++ } initEvents := []watchCacheEvent{ {Object: &api.Pod{}}, {Object: &api.Pod{}}, } // set the size of the buffer of w.result to 0, so that the writes to // w.result is blocked. w := newCacheWatcher(0, 0, initEvents, filter, forget) w.Stop() if err := wait.PollImmediate(1*time.Second, 5*time.Second, func() (bool, error) { lock.RLock() defer lock.RUnlock() return count == 2, nil }); err != nil { t.Fatalf("expected forget() to be called twice, because sendWatchCacheEvent should not be blocked by the result channel: %v", err) } }
func main() { Case := Init(6) fmt.Printf("DEADLOCK\n") c := make(chan int) var mu sync.RWMutex done := make(chan bool) go func() { time.Sleep(sleep3[Case][0]) mu.RLock() c <- 1 mu.RUnlock() done <- true }() go func() { time.Sleep(sleep3[Case][1]) mu.RLock() loop: for { select { case <-c: break loop case <-time.After(time.Millisecond): } } mu.RUnlock() done <- true }() time.Sleep(sleep3[Case][2]) mu.Lock() mu.Unlock() <-done <-done }
func doRunCommand(c *cli.Context) { config := ParseConfigOrDie(c.GlobalString("config")) client := github.NewClient(config.GitHubAPIToken) // Create and start monitoring queues. lock := sync.RWMutex{} queues := createQueues(client, config, &lock) stopChan := monitorQueues(queues) // Graceful stop on SIGTERM and SIGINT. s := make(chan os.Signal, 64) signal.Notify(s, syscall.SIGTERM, syscall.SIGINT) // Compute next tick time for the synchronization event. nextTickTime := resetNextTickTime(config.PeriodicSync) for { select { case <-stopChan: log.Debug("All queues exited") return case sig := <-s: log.WithField("signal", sig).Debug("received signal") for _, q := range queues { q.Consumer.Stop() } case <-time.After(nextTickTime): lock.Lock() // Take a write lock, which pauses all queue processing. log.Infof("Starting periodic sync") runPeriodicSync(client, config) nextTickTime = resetNextTickTime(config.PeriodicSync) lock.Unlock() } } }
// BenchmarkMutableCopy benchmarks how long it takes to copy a mutable treap // to another one when it contains 'numTicketKeys' entries. func BenchmarkMutableCopy(b *testing.B) { // Populate mutable treap with a bunch of key/value pairs. testTreap := NewMutable() ticketKeys := genTicketKeys() for j := 0; j < len(ticketKeys); j++ { hashBytes := ticketKeys[j] value := &Value{Height: uint32(j)} testTreap.Put(hashBytes, value) } b.ReportAllocs() b.ResetTimer() // Copying a mutable treap requires iterating all of the entries and // populating them into a new treap all with a lock held for concurrency // safety. var mtx sync.RWMutex for i := 0; i < b.N; i++ { benchTreap := NewMutable() mtx.Lock() testTreap.ForEach(func(k Key, v *Value) bool { benchTreap.Put(k, v) return true }) mtx.Unlock() } }
func BenchmarkRWMutexLock(b *testing.B) { var l sync.RWMutex for i := 0; i < b.N; i++ { l.Lock() l.Unlock() } }
func (items RDBItems) watch(ctx context.Context, sess *r.Session, lock sync.RWMutex, table r.Term) { go pkg.Retry(time.Second*15, time.Minute, func() error { changes, err := table.Changes().Run(sess) if err != nil { pkg.LogError(errors.New(err)) return errors.New(err) } defer changes.Close() var update = map[string]*RdbSchema{} for changes.Next(&update) { newVal := update["new_val"] oldVal := update["old_val"] lock.Lock() if newVal == nil && oldVal != nil { delete(items, oldVal.ID) } else if newVal != nil && oldVal != nil { delete(items, oldVal.ID) items[newVal.ID] = newVal } else { items[newVal.ID] = newVal } lock.Unlock() } if changes.Err() != nil { err = errors.New(changes.Err()) pkg.LogError(err) return err } return nil }) }
// Readers-Writers func rwlockExample() { runtime.GOMAXPROCS(runtime.NumCPU()) // rand.Seed(time.Now().UnixNano()) // rand.Seed(0) l := new(sync.RWMutex) wg := new(sync.WaitGroup) for i := 0; i < 10; i++ { wg.Add(1) go func(i int) { r := rand.Intn(10) time.Sleep(50 * time.Millisecond) if r < 5 { // I am reader. fmt.Printf("Reader waiting %d\n", i) l.RLock() fmt.Printf("go reader %d\n", i) l.RUnlock() wg.Done() } else { // I am writer fmt.Printf("Writer waiting %d\n", i) l.Lock() fmt.Printf("go writer %d\n", i) time.Sleep(50 * time.Millisecond) l.Unlock() wg.Done() } }(i) } wg.Wait() }
func BenchmarkRWMutexW1(b *testing.B) { var mtx sync.RWMutex for i := 0; i < b.N; i++ { mtx.Lock() mtx.Unlock() } }
// Returns an Action that runs the original Action when there is no cached value. // The cached value is unset after the given ttl (time to live) duration. // A negative ttl will permanently cache func (a Action) cache(ttl time.Duration) Action { var data map[string]interface{} lock := sync.RWMutex{} return func(r *http.Request) (map[string]interface{}, error) { lock.RLock() if data != nil { lock.RUnlock() return data, nil } lock.RUnlock() lock.Lock() defer lock.Unlock() var err error data, err = a(r) if data != nil { if ttl > 0 { time.AfterFunc(ttl, func() { lock.Lock() data = nil lock.Unlock() }) } } return data, err } }
// Scrape reports scrape for one or more files, using HTTP format func (h HTTPTracker) Scrape(files []data.FileRecord) []byte { // Response struct scrape := scrapeResponse{ Files: make(map[string]scrapeFile), } // WaitGroup to wait for all scrape file entries to be generated var wg sync.WaitGroup wg.Add(len(files)) // Mutex for safe locking on map writes var mutex sync.RWMutex // Iterate all files in parallel for _, f := range files { go func(f data.FileRecord, scrape *scrapeResponse, mutex *sync.RWMutex, wg *sync.WaitGroup) { // Generate scrapeFile struct fileInfo := scrapeFile{} var err error // Seeders count fileInfo.Complete, err = f.Seeders() if err != nil { log.Println(err.Error()) } // Completion count fileInfo.Downloaded, err = f.Completed() if err != nil { log.Println(err.Error()) } // Leechers count fileInfo.Incomplete, err = f.Leechers() if err != nil { log.Println(err.Error()) } // Add hash and file info to map mutex.Lock() scrape.Files[f.InfoHash] = fileInfo mutex.Unlock() // Inform waitgroup that this file is ready wg.Done() }(f, &scrape, &mutex, &wg) } // Wait for all information to be generated wg.Wait() // Marshal struct into bencode buf := bytes.NewBuffer(make([]byte, 0)) if err := bencode.Marshal(buf, scrape); err != nil { log.Println(err.Error()) return h.Error(ErrScrapeFailure.Error()) } return buf.Bytes() }
func (mp *MysqlProvider) Pager(entity dal.QueryEntity) (dal.QueryPagerResult, error) { var qResult dal.QueryPagerResult if entity.ResultType != dal.QPager { entity.ResultType = dal.QPager } sqlText, values := mp.parseQuerySQL(entity) if mp.config.IsPrint { mp.PrintSQL(sqlText[0], values...) mp.PrintSQL(sqlText[1], values...) } var ( errs []error mux = new(sync.RWMutex) wg = new(sync.WaitGroup) ) wg.Add(2) go func(result *dal.QueryPagerResult, errs *[]error) { defer wg.Done() rData := make([]map[string]interface{}, 0) data, err := mp.queryData(sqlText[0], values...) mux.Lock() defer mux.Unlock() if err != nil { *errs = append(*errs, err) return } if len(data) > 0 { err = utils.NewDecoder(data).Decode(&rData) if err != nil { *errs = append(*errs, err) return } } (*result).Rows = rData }(&qResult, &errs) go func(result *dal.QueryPagerResult, errs *[]error) { defer wg.Done() var count int64 row := GDB.QueryRow(sqlText[1], values...) err := row.Scan(&count) mux.Lock() defer mux.Unlock() if err != nil { *errs = append(*errs, err) return } (*result).Total = count }(&qResult, &errs) wg.Wait() if len(errs) > 0 { return qResult, mp.Error(errs[0].Error()) } return qResult, nil }
func (fs Filesystem) checksyncstatus(path string) error { path = strings.TrimPrefix(path, "/home/minio") path = "mnt/minio/data" + path var lock sync.RWMutex nosync := make(map[string]bool) kubeClient, err := client.NewInCluster() if err != nil { return fmt.Errorf("unable to create client") } pclient := kubeClient.Pods("default") selector, _ := labels.Parse("app=minio-sync") list, err := pclient.List(selector, nil) if err != nil { return fmt.Errorf("list pods failed") } for _, pod := range list.Items { fmt.Println(pod.Status.PodIP) if pod.Status.Phase == "Running" { nosync[pod.Status.PodIP] = false } } allsync := true var duration float64 for duration = 1; duration < 60; duration++ { timeperiod := time.Duration(time.Second * time.Duration(duration)) fmt.Println(timeperiod) time.Sleep(timeperiod) var wg sync.WaitGroup wg.Add(len(nosync)) for ip, sync := range nosync { go func(ip string, sync bool) { if !sync { if doCurl("http://" + ip + ":3000/" + path) { lock.Lock() nosync[ip] = true lock.Unlock() } else { if allsync { allsync = false } } } wg.Done() }(ip, sync) } wg.Wait() if allsync { break } allsync = true } for _, sync := range nosync { if !sync { return fmt.Errorf("sync failed took more time ") } } return nil }
// Serve starts an endless loop that reads FTP commands from the client and // responds appropriately. terminated is a channel that will receive a true // message when the connection closes. This loop will be running inside a // goroutine, so use this channel to be notified when the connection can be // cleaned up. func (ftpConn *ftpConn) Serve() { defer func() { if closer, ok := ftpConn.driver.(io.Closer); ok { closer.Close() } }() ftpConn.logger.Printf("Connection Established (%s)", ftpConn.conn.RemoteAddr()) // send welcome ftpConn.writeMessage(220, ftpConn.serverName) // read commands var readMutex sync.RWMutex cmdCh := make(chan *BoundCommand, 0) go func() { defer func() { if r := recover(); r != nil { ftpConn.logger.Printf("Recovered in ftpConn Serve: %s", r) } ftpConn.Close() close(cmdCh) }() for { readMutex.RLock() line, err := ftpConn.controlReader.ReadString('\n') readMutex.RUnlock() if err != nil { return } else { cmdObj := ftpConn.receiveLine(line) if cmdObj != nil { if !cmdObj.CmdObj.Async() { readMutex.Lock() } select { case cmdCh <- cmdObj: continue case _ = <-time.After(10 * time.Second): return } } } } }() for cmd := range cmdCh { cmd.CmdObj.Execute(ftpConn, cmd.Param) if !cmd.CmdObj.Async() { readMutex.Unlock() } } ftpConn.logger.Print("Connection Terminated") }
func (con FeedUpdateNotificator) Handler(c context.Context) http.HandlerFunc { var mutex sync.RWMutex receivers := make(map[chan readeef.Feed]bool) go func() { for { select { case feed := <-con.updateFeed: mutex.RLock() readeef.Debug.Printf("Feed %s updated. Notifying %d receivers.", feed.Link, len(receivers)) for receiver, _ := range receivers { receiver <- feed } mutex.RUnlock() } } }() return func(w http.ResponseWriter, r *http.Request) { var err error receiver := make(chan readeef.Feed) mutex.Lock() receivers[receiver] = true mutex.Unlock() defer func() { mutex.Lock() delete(receivers, receiver) mutex.Unlock() }() f := <-receiver readeef.Debug.Println("Feed " + f.Link + " updated") resp := map[string]interface{}{"Feed": feed{ Id: f.Id, Title: f.Title, Description: f.Description, Link: f.Link, Image: f.Image, }} var b []byte if err == nil { b, err = json.Marshal(resp) } if err != nil { webfw.GetLogger(c).Print(err) w.WriteHeader(http.StatusInternalServerError) return } w.Write(b) } }
// discoverAndPollChromecasts runs an infinite loop, discovering and polling // chromecast devices in the local network for whether they are currently // playing. The first discovered device is used. func discoverAndPollChromecasts() { var chromecastsMu sync.RWMutex chromecasts := make(map[string]chan bool) entriesCh := make(chan *mdns.ServiceEntry, 5) go mdnsLookup(entriesCh) for { select { case entry := <-entriesCh: if !strings.Contains(entry.Name, castService) { continue } var deviceType chromecastDevice for _, field := range entry.InfoFields { if !strings.HasPrefix(field, "md=") { continue } if field == "md=Chromecast" { deviceType = chromecast } else if field == "md=Chromecast Audio" { deviceType = chromecastAudio } } hostport := fmt.Sprintf("%s:%d", entry.Addr, entry.Port) chromecastsMu.RLock() _, exists := chromecasts[hostport] chromecastsMu.RUnlock() if exists { continue } fmt.Printf("Found new chromecast at %q: %+v\n", hostport, entry) done := make(chan bool) chromecastsMu.Lock() chromecasts[hostport] = done chromecastsMu.Unlock() go func(deviceType chromecastDevice, done chan bool, hostport string) { err := pollChromecast(deviceType, done, hostport) if err != nil { log.Printf("Error polling chromecast %s:%d: %v\n", entry.Addr, entry.Port, err) } chromecastsMu.Lock() delete(chromecasts, hostport) chromecastsMu.Unlock() }(deviceType, done, hostport) stateMu.Lock() state.chromecastPlaying = false stateMu.Unlock() stateChanged.Broadcast() case <-time.After(10 * time.Second): log.Printf("Starting new MDNS lookup\n") go mdnsLookup(entriesCh) } } }
func BenchmarkStreamOp_Exec(b *testing.B) { ctx := context.Background() o := NewStreamOp(ctx) N := b.N chanSize := func() int { if N == 1 { return N } return int(float64(0.5) * float64(N)) }() in := make(chan interface{}, chanSize) o.SetInput(in) go func() { for i := 0; i < N; i++ { in <- []string{ testutil.GenWord(), testutil.GenWord(), testutil.GenWord(), } } close(in) }() counter := 0 expected := N * 3 var m sync.RWMutex // process output done := make(chan struct{}) go func() { defer close(done) for _ = range o.GetOutput() { m.Lock() counter++ m.Unlock() } }() if err := o.Exec(); err != nil { b.Fatal("Error during execution:", err) } select { case <-done: case <-time.After(time.Second * 60): b.Fatal("Took too long") } m.RLock() b.Logf("Input %d, counted %d", N, counter) if counter != expected { b.Fatalf("Expected %d items processed, got %d", expected, counter) } m.RUnlock() }
func BenchmarkRWMutexW2(b *testing.B) { defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2)) var mtx sync.RWMutex b.RunParallel(func(pb *testing.PB) { for pb.Next() { mtx.Lock() mtx.Unlock() } }) }
func BenchmarkRWMutexW(b *testing.B) { b.SetParallelism(10) var mtx sync.RWMutex b.RunParallel(func(pb *testing.PB) { for pb.Next() { mtx.Lock() mtx.Unlock() } }) }
func getJob(c *Context, w http.ResponseWriter, r *http.Request) { var mu sync.RWMutex mu.Lock() defer mu.Unlock() vars := mux.Vars(r) jobId := vars["id"] job, err := c.store.GetJob(jobId) if err != nil { if serr, ok := err.(*StoreError); ok { if serr.Code == ErrNotFound { w.Header().Set("Content-Type", "application/json; charset=UTF-8") w.WriteHeader(http.StatusNotFound) return } } log.Printf("Error: %s", err) w.Header().Set("Content-Type", "application/json; charset=UTF-8") w.WriteHeader(http.StatusInternalServerError) return } w.Header().Set("Content-Type", "application/json; charset=UTF-8") if err := json.NewEncoder(w).Encode(job); err != nil { log.Printf("Error: %s", err) panic(err) } w.WriteHeader(http.StatusOK) }