func BenchmarkRWMutexR1(b *testing.B) { var mtx sync.RWMutex for i := 0; i < b.N; i++ { mtx.RLock() mtx.RUnlock() } }
func BenchmarkRWMutexLock(b *testing.B) { var l sync.RWMutex for i := 0; i < b.N; i++ { l.Lock() l.Unlock() } }
// Scrape reports scrape for one or more files, using HTTP format func (h HTTPTracker) Scrape(files []data.FileRecord) []byte { // Response struct scrape := scrapeResponse{ Files: make(map[string]scrapeFile), } // WaitGroup to wait for all scrape file entries to be generated var wg sync.WaitGroup wg.Add(len(files)) // Mutex for safe locking on map writes var mutex sync.RWMutex // Iterate all files in parallel for _, f := range files { go func(f data.FileRecord, scrape *scrapeResponse, mutex *sync.RWMutex, wg *sync.WaitGroup) { // Generate scrapeFile struct fileInfo := scrapeFile{} var err error // Seeders count fileInfo.Complete, err = f.Seeders() if err != nil { log.Println(err.Error()) } // Completion count fileInfo.Downloaded, err = f.Completed() if err != nil { log.Println(err.Error()) } // Leechers count fileInfo.Incomplete, err = f.Leechers() if err != nil { log.Println(err.Error()) } // Add hash and file info to map mutex.Lock() scrape.Files[f.InfoHash] = fileInfo mutex.Unlock() // Inform waitgroup that this file is ready wg.Done() }(f, &scrape, &mutex, &wg) } // Wait for all information to be generated wg.Wait() // Marshal struct into bencode buf := bytes.NewBuffer(make([]byte, 0)) if err := bencode.Marshal(buf, scrape); err != nil { log.Println(err.Error()) return h.Error(ErrScrapeFailure.Error()) } return buf.Bytes() }
func New(wd string, ss Strategizer, gro ...GrabberOption) (*Grabber, error) { if wd == "" { return nil, MissingWorkDirError } mx := new(sync.RWMutex) g := &Grabber{ wd: wd, s: ss, writeState: mx, readState: mx.RLocker(), qIn: make(chan Segmenter, 100), // TODO(negz): Determine best buffer len. qOut: make(chan Segmenter, 100), maxRetry: 3, doneMx: new(sync.Mutex), pp: make(chan bool), decoder: yenc.NewDecoder, // TODO(negz): Detect encoding. fileCreator: createSegmentFile, grabT: new(tomb.Tomb), enqueueT: new(tomb.Tomb), } for _, o := range gro { if err := o(g); err != nil { return nil, err } } if g.name == "" { return nil, MissingNameError } g.hash = util.HashString(g.name) return g, nil }
// BenchmarkMutableCopy benchmarks how long it takes to copy a mutable treap // to another one when it contains 'numTicketKeys' entries. func BenchmarkMutableCopy(b *testing.B) { // Populate mutable treap with a bunch of key/value pairs. testTreap := NewMutable() ticketKeys := genTicketKeys() for j := 0; j < len(ticketKeys); j++ { hashBytes := ticketKeys[j] value := &Value{Height: uint32(j)} testTreap.Put(hashBytes, value) } b.ReportAllocs() b.ResetTimer() // Copying a mutable treap requires iterating all of the entries and // populating them into a new treap all with a lock held for concurrency // safety. var mtx sync.RWMutex for i := 0; i < b.N; i++ { benchTreap := NewMutable() mtx.Lock() testTreap.ForEach(func(k Key, v *Value) bool { benchTreap.Put(k, v) return true }) mtx.Unlock() } }
func (items RDBItems) watch(ctx context.Context, sess *r.Session, lock sync.RWMutex, table r.Term) { go pkg.Retry(time.Second*15, time.Minute, func() error { changes, err := table.Changes().Run(sess) if err != nil { pkg.LogError(errors.New(err)) return errors.New(err) } defer changes.Close() var update = map[string]*RdbSchema{} for changes.Next(&update) { newVal := update["new_val"] oldVal := update["old_val"] lock.Lock() if newVal == nil && oldVal != nil { delete(items, oldVal.ID) } else if newVal != nil && oldVal != nil { delete(items, oldVal.ID) items[newVal.ID] = newVal } else { items[newVal.ID] = newVal } lock.Unlock() } if changes.Err() != nil { err = errors.New(changes.Err()) pkg.LogError(err) return err } return nil }) }
func main() { var l *sync.RWMutex l = new(sync.RWMutex) l.RUnlock() fmt.Println("l") l.RLock() }
//读文件 func (helper *FileHelper) ReadFile(filename string) (string, error) { lock := new(sync.RWMutex) lock.RLock() var content string file, err := os.Open(filename) if err != nil { fmt.Println("file open error:", err) return content, err } reader := bufio.NewReader(file) for { line, _, err := reader.ReadLine() if err != nil { break } content += string(line) + "," } content = strings.TrimRight(content, ",") file.Close() lock.RUnlock() fmt.Println(content) return content, err }
func doRunCommand(c *cli.Context) { config := ParseConfigOrDie(c.GlobalString("config")) client := github.NewClient(config.GitHubAPIToken) // Create and start monitoring queues. lock := sync.RWMutex{} queues := createQueues(client, config, &lock) stopChan := monitorQueues(queues) // Graceful stop on SIGTERM and SIGINT. s := make(chan os.Signal, 64) signal.Notify(s, syscall.SIGTERM, syscall.SIGINT) // Compute next tick time for the synchronization event. nextTickTime := resetNextTickTime(config.PeriodicSync) for { select { case <-stopChan: log.Debug("All queues exited") return case sig := <-s: log.WithField("signal", sig).Debug("received signal") for _, q := range queues { q.Consumer.Stop() } case <-time.After(nextTickTime): lock.Lock() // Take a write lock, which pauses all queue processing. log.Infof("Starting periodic sync") runPeriodicSync(client, config) nextTickTime = resetNextTickTime(config.PeriodicSync) lock.Unlock() } } }
func (lm *lockManager) UnlockPolicy(lock *sync.RWMutex, lockType bool) { if lockType == exclusive { lock.Unlock() } else { lock.RUnlock() } }
func startJob(c *Context, w http.ResponseWriter, r *http.Request) { var mu sync.RWMutex mu.Lock() defer mu.Unlock() var job Job err := json.NewDecoder(r.Body).Decode(&job) if err != nil { w.Header().Set("Content-Type", "application/json; charset=UTF-8") w.WriteHeader(http.StatusInternalServerError) log.Printf("Error: %s", err) return } job.State = PENDING log.Printf("Submitting Job %s", job.Id) err = c.store.AddJob(&job) if err != nil { if serr, ok := err.(*StoreError); ok { if serr.Code == ErrExists { w.Header().Set("Content-Type", "application/json; charset=UTF-8") w.WriteHeader(http.StatusNotModified) return } } log.Printf("Could not store job %s: %s", job.Id, err) w.Header().Set("Content-Type", "application/json; charset=UTF-8") w.WriteHeader(http.StatusInternalServerError) return } w.Header().Set("Content-Type", "application/json; charset=UTF-8") w.WriteHeader(http.StatusOK) }
func (fs Filesystem) checksyncstatus(path string) error { path = strings.TrimPrefix(path, "/home/minio") path = "mnt/minio/data" + path var lock sync.RWMutex nosync := make(map[string]bool) kubeClient, err := client.NewInCluster() if err != nil { return fmt.Errorf("unable to create client") } pclient := kubeClient.Pods("default") selector, _ := labels.Parse("app=minio-sync") list, err := pclient.List(selector, nil) if err != nil { return fmt.Errorf("list pods failed") } for _, pod := range list.Items { fmt.Println(pod.Status.PodIP) if pod.Status.Phase == "Running" { nosync[pod.Status.PodIP] = false } } allsync := true var duration float64 for duration = 1; duration < 60; duration++ { timeperiod := time.Duration(time.Second * time.Duration(duration)) fmt.Println(timeperiod) time.Sleep(timeperiod) var wg sync.WaitGroup wg.Add(len(nosync)) for ip, sync := range nosync { go func(ip string, sync bool) { if !sync { if doCurl("http://" + ip + ":3000/" + path) { lock.Lock() nosync[ip] = true lock.Unlock() } else { if allsync { allsync = false } } } wg.Done() }(ip, sync) } wg.Wait() if allsync { break } allsync = true } for _, sync := range nosync { if !sync { return fmt.Errorf("sync failed took more time ") } } return nil }
func BenchmarkConcurrentRWMutex(b *testing.B) { var mu sync.RWMutex b.RunParallel(func(pb *testing.PB) { for pb.Next() { mu.RLock() mu.RUnlock() } }) }
func BenchmarkRWMutexW(b *testing.B) { b.SetParallelism(10) var mtx sync.RWMutex b.RunParallel(func(pb *testing.PB) { for pb.Next() { mtx.Lock() mtx.Unlock() } }) }
// SKPs are captured in parallel leading to multiple chrome instances coming up // at the same time, when there are crashes chrome processes stick around which // can severely impact the machine's performance. To stop this from // happening chrome zombie processes are periodically killed. func chromeProcessesCleaner(mutex *sync.RWMutex) { for _ = range time.Tick(*chromeCleanerTimer) { glog.Info("The chromeProcessesCleaner goroutine has started") glog.Info("Waiting for all existing tasks to complete before killing zombie chrome processes") mutex.Lock() skutil.LogErr(util.ExecuteCmd("pkill", []string{"-9", "chrome"}, []string{}, util.PKILL_TIMEOUT, nil, nil)) mutex.Unlock() } }
func BenchmarkRWMutexW2(b *testing.B) { defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2)) var mtx sync.RWMutex b.RunParallel(func(pb *testing.PB) { for pb.Next() { mtx.Lock() mtx.Unlock() } }) }
//GetCache checks nodes in lookuptable have the cache. //if found gets records. func GetCache(background bool, c *thread.Cache) bool { const searchDepth = 5 // Search node size ns := manager.NodesForGet(c.Datfile, searchDepth) found := false var wg sync.WaitGroup var mutex sync.RWMutex dm := NewManger(c) for _, n := range ns { wg.Add(1) go func(n *node.Node) { defer wg.Done() if !headWithRange(n, c, dm) { return } if getWithRange(n, c, dm) { mutex.Lock() found = true mutex.Unlock() return } }(n) } if background { bg(c, &wg) } else { wg.Wait() } mutex.RLock() defer mutex.RUnlock() return found }
func TestStream_Open_WithOp(t *testing.T) { src := newStrSrc([]string{"HELLO", "WORLD", "HOW", "ARE", "YOU"}) snk := newStrSink() op1 := api.UnFunc(func(ctx context.Context, data interface{}) interface{} { str := data.(string) return len(str) }) var m sync.RWMutex runeCount := 0 op2 := api.UnFunc(func(ctx context.Context, data interface{}) interface{} { length := data.(int) m.Lock() runeCount += length m.Unlock() return nil }) strm := New().From(src).Transform(op1).Transform(op2).To(snk) select { case err := <-strm.Open(): if err != nil { t.Fatal(err) } case <-time.After(50 * time.Millisecond): t.Fatal("Waited too long ...") } m.RLock() if runeCount != 19 { t.Fatal("Data not streaming, runeCount 19, got ", runeCount) } m.RUnlock() }
func compileFieldDynamic(name string, args []parse.Node) lookupFn { type key struct { valueType reflect.Type finalType reflect.Type } var m sync.RWMutex cache := make(map[key]lookupFn) return func(s state, value reflect.Value, final interface{}) reflect.Value { valueType := value.Elem().Type() var finalType reflect.Type if v := reflect.ValueOf(final); v.IsValid() { finalType = v.Type() } k := key{valueType, finalType} m.RLock() f, exist := cache[k] m.RUnlock() if !exist { f, _ = compileField(valueType, name, args, finalType) m.Lock() cache[k] = f m.Unlock() } return f(s, value.Elem(), final) } }
func compileVariableNode(node *parse.VariableNode, dotType reflect.Type, args []parse.Node, finalType reflect.Type) (cmd command, retType reflect.Type) { var mu sync.RWMutex type key struct { dotType, finalType reflect.Type } cache := make(map[key]command) name := node.Ident[0] cmd = func(s state, dot, final interface{}) interface{} { value := s.varValue(name) if len(node.Ident) == 1 { return value.Interface() } if dotType == nil { dotType = reflect.ValueOf(dot).Type() } if finalType == nil && final != nil { finalType = reflect.ValueOf(final).Type() } k := key{dotType, finalType} mu.RLock() f, exist := cache[k] mu.RUnlock() if !exist { f, _ = compileFieldChain(node.Ident[1:], dotType, args, finalType) mu.Lock() cache[k] = f mu.Unlock() } return f(s, dot, value) } return }
// verifies the cacheWatcher.process goroutine is properly cleaned up even if // the writes to cacheWatcher.result channel is blocked. func TestCacheWatcherCleanupNotBlockedByResult(t *testing.T) { var lock sync.RWMutex count := 0 filter := func(string, labels.Set, fields.Set) bool { return true } forget := func(bool) { lock.Lock() defer lock.Unlock() count++ } initEvents := []watchCacheEvent{ {Object: &api.Pod{}}, {Object: &api.Pod{}}, } // set the size of the buffer of w.result to 0, so that the writes to // w.result is blocked. w := newCacheWatcher(0, 0, initEvents, filter, forget) w.Stop() if err := wait.PollImmediate(1*time.Second, 5*time.Second, func() (bool, error) { lock.RLock() defer lock.RUnlock() return count == 2, nil }); err != nil { t.Fatalf("expected forget() to be called twice, because sendWatchCacheEvent should not be blocked by the result channel: %v", err) } }
// Readers-Writers func rwlockExample() { runtime.GOMAXPROCS(runtime.NumCPU()) // rand.Seed(time.Now().UnixNano()) // rand.Seed(0) l := new(sync.RWMutex) wg := new(sync.WaitGroup) for i := 0; i < 10; i++ { wg.Add(1) go func(i int) { r := rand.Intn(10) time.Sleep(50 * time.Millisecond) if r < 5 { // I am reader. fmt.Printf("Reader waiting %d\n", i) l.RLock() fmt.Printf("go reader %d\n", i) l.RUnlock() wg.Done() } else { // I am writer fmt.Printf("Writer waiting %d\n", i) l.Lock() fmt.Printf("go writer %d\n", i) time.Sleep(50 * time.Millisecond) l.Unlock() wg.Done() } }(i) } wg.Wait() }
func Task(f func(in Receiver, out Sender)) Sender { var running bool var l sync.RWMutex inR, inW := Pipe() outR, outW := Pipe() obj := NewServer() obj.OnAttach(Handler(func(msg *Message) error { msg.Ret.Send(&Message{Verb: Ack, Ret: inW}) fmt.Printf("copying task output from %#v to %#v\n", outR, msg.Ret) defer fmt.Printf("(DONE) copying task output from %#v to %#v\n", outR, msg.Ret) Copy(msg.Ret, outR) return nil })) obj.OnStart(Handler(func(msg *Message) error { l.RLock() r := running l.RUnlock() if r { return fmt.Errorf("already running") } l.Lock() go f(inR, outW) running = true l.Unlock() msg.Ret.Send(&Message{Verb: Ack}) return nil })) return obj }
func TestSendReceiveWithWait(t *testing.T) { conn := NewConn() conn.ReceiveWait = true conn.Command("HGETALL", "person:1").ExpectMap(map[string]string{ "name": "Mr. Johson", "age": "42", }) conn.Command("HGETALL", "person:2").ExpectMap(map[string]string{ "name": "Ms. Jennifer", "age": "28", }) ids := []string{"1", "2"} for _, id := range ids { conn.Send("HGETALL", fmt.Sprintf("person:%s", id)) } var people []Person var peopleLock sync.RWMutex go func() { for i := 0; i < len(ids); i++ { values, err := redis.Values(conn.Receive()) if err != nil { t.Fatal(err) } var person Person err = redis.ScanStruct(values, &person) if err != nil { t.Fatal(err) } peopleLock.Lock() people = append(people, person) peopleLock.Unlock() } }() for i := 0; i < len(ids); i++ { conn.ReceiveNow <- true } time.Sleep(10 * time.Millisecond) peopleLock.RLock() defer peopleLock.RUnlock() if len(people) != 2 { t.Fatalf("Wrong number of people. Expected '2' and got '%d'", len(people)) } if people[0].Name != "Mr. Johson" || people[1].Name != "Ms. Jennifer" { t.Error("People name order are wrong") } if people[0].Age != 42 || people[1].Age != 28 { t.Error("People age order are wrong") } }
func (l *locker) Lock(lockfilename string) error { if l.fd != INVALID_FILE_HANDLE { return ErrFailedToAcquireLock } var flags uint32 if l.nonblock { flags = LOCKFILE_EXCLUSIVE_LOCK | LOCKFILE_FAIL_IMMEDIATELY } else { flags = LOCKFILE_EXCLUSIVE_LOCK } if lockfilename == "" { return ErrLockFileEmpty } fd, err := syscall.CreateFile(&(syscall.StringToUTF16(lockfilename)[0]), syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, nil, syscall.OPEN_ALWAYS, syscall.FILE_ATTRIBUTE_NORMAL, 0) if err != nil { return fmt.Errorf("setlock: fatal: unable to open %s: temporary failure", lockfilename) } if fd == INVALID_FILE_HANDLE { return ErrFailedToAcquireLock } defer func() { // Close this descriptor if we failed to lock if l.fd == INVALID_FILE_HANDLE { // l.fd is not set, I guess we didn't suceed syscall.CloseHandle(fd) } }() var ol syscall.Overlapped var mu sync.RWMutex mu.Lock() defer mu.Unlock() r1, _, _ := syscall.Syscall6( procLockFileEx.Addr(), 6, uintptr(fd), // handle uintptr(flags), uintptr(0), // reserved uintptr(1), // locklow uintptr(0), // lockhigh uintptr(unsafe.Pointer(&ol)), ) if r1 == 0 { return ErrFailedToAcquireLock } l.fd = fd return nil }
func BenchmarkMapSet(b *testing.B) { b.StopTimer() cache := make(map[string]string) mu := sync.RWMutex{} b.StartTimer() for i := 0; i < b.N; i++ { mu.Lock() cache["foo"] = "bar" mu.Unlock() } }
func BenchmarkRWMutexMapSet(b *testing.B) { b.StopTimer() m := map[string]string{} mu := sync.RWMutex{} b.StartTimer() for i := 0; i < b.N; i++ { mu.Lock() m["foo"] = "bar" mu.Unlock() } }
func (mp *MysqlProvider) Pager(entity dal.QueryEntity) (dal.QueryPagerResult, error) { var qResult dal.QueryPagerResult if entity.ResultType != dal.QPager { entity.ResultType = dal.QPager } sqlText, values := mp.parseQuerySQL(entity) if mp.config.IsPrint { mp.PrintSQL(sqlText[0], values...) mp.PrintSQL(sqlText[1], values...) } var ( errs []error mux = new(sync.RWMutex) wg = new(sync.WaitGroup) ) wg.Add(2) go func(result *dal.QueryPagerResult, errs *[]error) { defer wg.Done() rData := make([]map[string]interface{}, 0) data, err := mp.queryData(sqlText[0], values...) mux.Lock() defer mux.Unlock() if err != nil { *errs = append(*errs, err) return } if len(data) > 0 { err = utils.NewDecoder(data).Decode(&rData) if err != nil { *errs = append(*errs, err) return } } (*result).Rows = rData }(&qResult, &errs) go func(result *dal.QueryPagerResult, errs *[]error) { defer wg.Done() var count int64 row := GDB.QueryRow(sqlText[1], values...) err := row.Scan(&count) mux.Lock() defer mux.Unlock() if err != nil { *errs = append(*errs, err) return } (*result).Total = count }(&qResult, &errs) wg.Wait() if len(errs) > 0 { return qResult, mp.Error(errs[0].Error()) } return qResult, nil }
func NewSegment(ns *nzb.Segment, f Filer) Segmenter { mx := new(sync.RWMutex) return &Segment{ ns: ns, f: f, writeState: mx, readState: mx.RLocker(), failedServer: make(map[Serverer]bool), failedGroup: make(map[string]bool), } }
func getJob(c *Context, w http.ResponseWriter, r *http.Request) { var mu sync.RWMutex mu.Lock() defer mu.Unlock() vars := mux.Vars(r) jobId := vars["id"] job, err := c.store.GetJob(jobId) if err != nil { if serr, ok := err.(*StoreError); ok { if serr.Code == ErrNotFound { w.Header().Set("Content-Type", "application/json; charset=UTF-8") w.WriteHeader(http.StatusNotFound) return } } log.Printf("Error: %s", err) w.Header().Set("Content-Type", "application/json; charset=UTF-8") w.WriteHeader(http.StatusInternalServerError) return } w.Header().Set("Content-Type", "application/json; charset=UTF-8") if err := json.NewEncoder(w).Encode(job); err != nil { log.Printf("Error: %s", err) panic(err) } w.WriteHeader(http.StatusOK) }