func filter(idx int, vals, bits *list.List) []interface{} { if vals == nil || bits == nil { return nil } var shifted uint = 0 valsPerCase := make([]interface{}, vals.Len()) for i, v, b := 0, vals.Front(), bits.Front(); v != nil && b != nil; v, b = v.Next(), b.Next() { valIdx := idx >> shifted mask := int(math.Pow(2, float64(b.Value.(uint))) - 1) valIdx &= mask if valIdx >= len(v.Value.([]interface{})) { return nil } valsPerCase[i] = v.Value.([]interface{})[valIdx] i++ shifted += b.Value.(uint) } return valsPerCase }
func StrList2StrArray(l *list.List) []string { output := make([]string, 0) for i := l.Front(); i != nil; i = i.Next() { output = append(output, i.Value.(string)) } return output }
// bootstrapStores bootstraps uninitialized stores once the cluster // and node IDs have been established for this node. Store IDs are // allocated via a sequence id generator stored at a system key per // node. func (n *Node) bootstrapStores(bootstraps *list.List, stopper *stop.Stopper) { log.Infof("bootstrapping %d store(s)", bootstraps.Len()) if n.ClusterID == "" { panic("ClusterID missing during store bootstrap of auxiliary store") } // Bootstrap all waiting stores by allocating a new store id for // each and invoking store.Bootstrap() to persist. inc := int64(bootstraps.Len()) firstID, err := allocateStoreIDs(n.Descriptor.NodeID, inc, n.ctx.DB) if err != nil { log.Fatal(err) } sIdent := roachpb.StoreIdent{ ClusterID: n.ClusterID, NodeID: n.Descriptor.NodeID, StoreID: firstID, } for e := bootstraps.Front(); e != nil; e = e.Next() { s := e.Value.(*storage.Store) if err := s.Bootstrap(sIdent, stopper); err != nil { log.Fatal(err) } if err := s.Start(stopper); err != nil { log.Fatal(err) } n.stores.AddStore(s) sIdent.StoreID++ log.Infof("bootstrapped store %s", s) // Done regularly in Node.startGossip, but this cuts down the time // until this store is used for range allocations. s.GossipStore() } }
func convertArgumentsToSlice(arguments *list.List) []string { argumentSlice := make([]string, 0, arguments.Len()) for e := arguments.Front(); e != nil; e = e.Next() { argumentSlice = append(argumentSlice, e.Value.(string)) } return argumentSlice }
func outOfOrder(l *list.List) { iTotal := 25 if iTotal > l.Len() { iTotal = l.Len() } ll := make([]*list.List, iTotal) for i := 0; i < iTotal; i++ { ll[i] = list.New() } r := rand.New(rand.NewSource(time.Now().UnixNano())) for e := l.Front(); e != nil; e = e.Next() { fpath, ok := e.Value.(string) if !ok { panic("The path is invalid string") } if rand.Int()%2 == 0 { ll[r.Intn(iTotal)].PushFront(fpath) } else { ll[r.Intn(iTotal)].PushBack(fpath) } } r0 := rand.New(rand.NewSource(time.Now().UnixNano())) l.Init() for i := 0; i < iTotal; i++ { if r0.Intn(2) == 0 { l.PushBackList(ll[i]) } else { l.PushFrontList(ll[i]) } ll[i].Init() } }
func copyList(l *list.List) *list.List { n := list.New() for e := l.Front(); e != nil; e = e.Next() { n.PushBack(e.Value.(string)) } return n }
func (this *UKB) initSynsetVector(ls *list.List, pv []float64) { nw := 0 uniq := make(map[string]*Word) for s := ls.Front(); s != nil; s = s.Next() { for w := s.Value.(*Sentence).Front(); w != nil; w = w.Next() { if this.RE_wnpos.MatchString(w.Value.(*Word).getTag(0)) { key := w.Value.(*Word).getLCForm() + "#" + strings.ToLower(w.Value.(*Word).getTag(0))[0:1] if uniq[key] == nil { nw++ uniq[key] = w.Value.(*Word) } } } } for _, u := range uniq { lsen := u.getSenses(0) nsyn := lsen.Len() for s := lsen.Front(); s != nil; s = s.Next() { syn := this.wn.getVertex(s.Value.(FloatPair).first) if syn == VERTEX_NOT_FOUND { LOG.Warn("Unknown synset " + s.Value.(FloatPair).first + " ignored. Please check consistency between sense dictionary and KB") } else { pv[syn] += (1.0 / float64(nw)) * (1.0 / float64(nsyn)) } } } }
func houseKeeping(get, give chan interface{}, factory func() interface{}) { q := new(list.List) for { if q.Len() == 0 { atomic.AddInt64(&makes, 1) q.PushFront(queued{when: time.Now(), data: factory()}) } element := q.Front() timeout := time.NewTimer(time.Minute) select { case b := <-give: timeout.Stop() q.PushFront(queued{when: time.Now(), data: b}) case get <- element.Value.(queued).data: timeout.Stop() q.Remove(element) case <-timeout.C: e := q.Front() for e != nil { n := e.Next() if time.Since(e.Value.(queued).when) > time.Minute { q.Remove(e) e.Value = nil } e = n } } } }
func computeAverages(execution_results *list.List) (float64, float64, int) { average_concurrent_users, average_response_time := 0.0, 0.0 var result *httpActionResult var avg_rt_counter time.Duration max_concurrent_users := 0 avg_cu_counter := 0.0 counter := 0.0 if execution_results.Len() > 0 { for elem := execution_results.Front(); elem != nil; elem = elem.Next() { result = elem.Value.(*httpActionResult) avg_cu_counter += float64(result.concurrent_users) if result.concurrent_users > max_concurrent_users { max_concurrent_users = result.concurrent_users } if result.is_client_error == false && result.is_server_error == false && result.has_timed_out == false { avg_rt_counter += result.response_time counter++ } } average_concurrent_users = avg_cu_counter / float64(execution_results.Len()) numerator := avg_rt_counter.Seconds() * 1000.0 average_response_time = float64(numerator) / counter } return average_concurrent_users, average_response_time, max_concurrent_users }
func loadInputTemplate(name string, loader TemplateLoader) (Template, error) { t, err := loader.LoadTemplate(name) if err != nil { return Template{}, nil } load_tracker := map[string]bool{name: true} var load_queue list.List for _, new_name := range t.InputDependencies { if !load_tracker[new_name] { load_tracker[new_name] = true load_queue.PushBack(new_name) } } for e := load_queue.Front(); e != nil; e = e.Next() { template_name := e.Value.(string) new_template, err := loader.LoadTemplate(template_name) if err != nil { return Template{}, err } t.Inputs = append(t.Inputs, new_template.Inputs...) for _, new_name := range new_template.InputDependencies { if !load_tracker[new_name] { load_tracker[new_name] = true load_queue.PushBack(new_name) } } } return t, nil }
func main() { give := make(chan []byte) get := make(chan []byte) go func() { q := new(list.List) for { if q.Len() == 0 { q.PushFront(make([]byte, 100)) } e := q.Front() select { case s := <-give: q.PushFront(s) case get <- e.Value.([]byte): q.Remove(e) } } }() // Gets a new buffer from the recycler. buffer := <-get // Give it back to the recycler. give <- buffer // Get the recycled buffer again. buffer = <-get }
func LoadTemplates(name string, loader TemplateLoader) (*template.Template, error) { load_tracker := map[string]bool{name: true} var load_queue list.List load_queue.Init() load_queue.PushBack(name) t := template.New(name).Funcs(builtins) for e := load_queue.Front(); e != nil; e = e.Next() { template_name := e.Value.(string) new_template, err := loader.LoadTemplate(template_name) if err != nil { return nil, err } if _, err := t.Parse(new_template.Data); err != nil { return nil, err } if t.Lookup(template_name) == nil { return nil, fmt.Errorf(`template "%s"load failed.`, template_name) } for _, new_name := range new_template.Dependencies { if !load_tracker[new_name] { load_tracker[new_name] = true load_queue.PushBack(new_name) } } } return t, nil }
func listen(mr *MapReduce, stage JobType, completed *int, jobs *list.List) { NumOther := 0 switch stage { case Map: NumOther = mr.nReduce case Reduce: NumOther = mr.nMap } if jobs.Len() != 0 { select { //wait for worker responses case r := <-mr.responses: HandleResponse(r, completed, jobs) //wait for available if none are available case id := <-mr.available: w := mr.Workers[id] //pop off a job id j := jobs.Remove(jobs.Front()).(int) args := &DoJobArgs{mr.file, stage, j, NumOther} go SendRPC(mr, w, args, j) } } else { r := <-mr.responses HandleResponse(r, completed, jobs) } }
func (k *Kademlia) closestContacts(searchID ID, excludedID ID, amount int) (contacts []Contact) { contacts = make([]Contact, 0) k.doInSearchOrder(searchID, func(index int) bool { // add as many contacts from bucket i as possible, currentBucket := k.Buckets[index].contacts sortedList := new(list.List) //sort that list |suspect| for e := currentBucket.Front(); e != nil; e = e.Next() { insertSorted(sortedList, e.Value.(Contact), func(first Contact, second Contact) int { firstDistance := first.NodeID.Xor(searchID) secondDistance := second.NodeID.Xor(searchID) return firstDistance.Compare(secondDistance) }) } // (^._.^)~ kirby says add as much as you can to output slice for e := sortedList.Front(); e != nil; e = e.Next() { c := e.Value.(Contact) if !c.NodeID.Equals(excludedID) { contacts = append(contacts, c) // if the slice is full, break if len(contacts) == amount { return false } } } // slice isn't full, do on the next index return true }) return }
// pushMetric adds the metric to the end of the list and returns a comma separated string of the // previous 61 entries. We return 61 instead of 60 (an hour) because the chart on the client // tracks deltas between these values - there is nothing to compare the first value against. func pushMetric(history *list.List, ev expvar.Var) string { history.PushBack(ev.String()) if history.Len() > 61 { history.Remove(history.Front()) } return JoinStringList(history) }
func ParseTokens(t []string) *list.List { tokens := new(list.List) for i := range t { tokens.PushBack(t[i]) } n := 0 s, o := new(list.List), new(list.List) for e := tokens.Front(); e != nil; e = e.Next() { if e.Value.(string) == "(" { n++ listAppend(s, new(list.List)) listAppend(o, s) s = s.Back().Value.(*list.List) } else if e.Value.(string) == ")" { n-- s = o.Back().Value.(*list.List) listPop(o) } else { listAppend(s, e.Value.(string)) } } if n != 0 { Error("unbalanced parantheses") } return s }
func NewTComInputBitstream(buf *list.List) *TComInputBitstream { // std::vector<uint8_t>* buf); if buf != nil { return &TComInputBitstream{buf, make(map[uint]uint), 0, buf.Front(), 0, 0, 0} } return &TComInputBitstream{nil, make(map[uint]uint), 0, nil, 0, 0, 0} }
func (p *Pipeline) removeUnacceptedChanges(l *list.List, allowAdd, allowDelete, allowUpdate bool) *list.List { if allowAdd && allowDelete && allowUpdate { return l } n := list.New() for iter := l.Front(); iter != nil; iter = iter.Next() { if iter.Value == nil { continue } ch, _ := iter.Value.(*dm.Change) if ch == nil { continue } if !allowAdd && ch.ChangeType == dm.CHANGE_TYPE_ADD { fmt.Printf("[PIPELINE]: Removing add change for path of %#v\n", ch.Path) } else if !allowDelete && ch.ChangeType == dm.CHANGE_TYPE_DELETE { fmt.Printf("[PIPELINE]: Removing delete change for path of %#v\n", ch.Path) } else if !allowUpdate && ch.ChangeType == dm.CHANGE_TYPE_UPDATE { if !allowAdd || ch.Path == nil || len(ch.Path) <= 1 { fmt.Printf("[PIPELINE]: Removing update for path of %#v and allowAdd %v\n", ch.Path, allowAdd) } else { fmt.Printf("[PIPELINE]: Changing update to add for path of %#v and allowAdd %v\n", ch.Path, allowAdd) ch.ChangeType = dm.CHANGE_TYPE_ADD ch.OriginalValue = nil n.PushBack(ch) } } else { n.PushBack(ch) } } return n }
func (this *CSRKB) fillCSRTables(nv int, rels *list.List) { tmpA := List2IntPairsArray(rels) sort.Sort(IntPairsArray(tmpA)) rels = IntPairsArray2List(tmpA) this.edges = make([]int, rels.Len()) this.firstEdge = make([]int, nv) this.numEdges = make([]int, nv) this.outCoef = make([]float64, nv) n := 0 r := 0 p := rels.Front() for p != nil && n < nv { this.firstEdge[n] = r for p != nil && p.Value.(IntPair).first == n { this.edges[r] = p.Value.(IntPair).second r++ p = p.Next() } this.numEdges[n] = r - this.firstEdge[n] this.outCoef[n] = 1 / float64(this.numEdges[n]) n++ } }
func Delete(e Elem, L *list.List) bool { ret := false if L.Len() == 0 { return ret } back := L.Back() if e.GetTime() > back.Value.(Elem).GetTime() { return ret } el := L.Front() Loop: for i := 0; el != nil; i++ { elt := el.Value.(Elem).GetTime() if elt > e.GetTime() { break Loop } else if e.IsEqual(el.Value.(Elem)) { L.Remove(el) ret = true break Loop } el = el.Next() } return ret }
func List2IntPairsArray(ls *list.List) []IntPair { out := make([]IntPair, ls.Len()) for i, l := 0, ls.Front(); i < ls.Len() && l != nil; i, l = i+1, l.Next() { out[i] = l.Value.(IntPair) } return out }
func GetMinTime(L *list.List) Time { if L.Len() == 0 { return NOTIME } front := L.Front() return front.Value.(Elem).GetTime() }
// Render takes an ast list and renders it to the screen func Render(ast list.List) { for e := ast.Front(); e != nil; e = e.Next() { node, ok := e.Value.(Node) if !ok { panic(ok) } if node.Type == newline { fmt.Println() } else { if node.Type == heading { PrintHeading(node.Content, node.Heading) } else if node.Type == blockquote { fmt.Print("\t") } else if node.Type == italic { PrintItalic(node.Content) } else if node.Type == strikethrough { PrintStrikethrough(node.Content) } else if node.Type == bold { PrintBold(node.Content) } else { Print(node.Content) } } } }
func Insert(e Elem, L *list.List) int { if L.Len() == 0 { L.PushFront(e) return L.Len() } front := L.Front() if e.GetTime() < front.Value.(Elem).GetTime() { L.InsertBefore(e, front) return L.Len() } el := L.Back() Loop: for { if el.Value.(Elem).GetTime() > e.GetTime() { el = el.Prev() } else { break Loop } } L.InsertAfter(e, el) return L.Len() }
func List2Array(l *list.List) []interface{} { output := make([]interface{}, 0) for i := l.Front(); i != nil; i = i.Next() { output = append(output, i.Value) } return output }
func GetShowTime(lcinema *list.List) { for e := lcinema.Front(); e != nil; e = e.Next() { lshowtimes := list.New().Init() cinema := e.Value.(*TCinema) showtimes := getShowTimeSingleCinema(cinema.TypeIndex, "2015-10-15") for i := 0; i < len(showtimes); i++ { jshowtime := showtimes[i] showtime := new(ShowTime) showtime.SeatCount = jshowtime.SeatCount showtime.Type = 0 showtime.TypeCinemaIndex = jshowtime.CinemaID showtime.TypeCityIndex = jshowtime.CityID showtime.TypeHallID = jshowtime.HallID showtime.TypeMovieIndex = jshowtime.FilmID showtime.TypeMovieName = jshowtime.FilmName showtime.TypeName = "wangpiao" // showtime.TypeSaleEndTimeS = jshowtime.SaleEndTime ts, _ := time.Parse("2006-01-02 15:04:05", jshowtime.SaleEndTime) showtime.TypeSaleEndTimeS = ts showtime.TypeSaleEndTime = ts.Unix() showtime.TypeShowIndex = jshowtime.ShowIndex lshowtimes.PushBack(showtime) } InsertShowTimeList(lshowtimes) } }
// ValidateCommitsWithEmails checks if authors' e-mails of commits are corresponding to users. func ValidateCommitsWithEmails(oldCommits *list.List) *list.List { var ( u *User emails = map[string]*User{} newCommits = list.New() e = oldCommits.Front() ) for e != nil { c := e.Value.(*git.Commit) if v, ok := emails[c.Author.Email]; !ok { u, _ = GetUserByEmail(c.Author.Email) emails[c.Author.Email] = u } else { u = v } newCommits.PushBack(UserCommit{ User: u, Commit: c, }) e = e.Next() } return newCommits }
// Performs a scan against the Log. // For each x509 certificate found, |foundCert| will be called with the // index of the entry and certificate itself as arguments. For each precert // found, |foundPrecert| will be called with the index of the entry and the raw // precert string as the arguments. // // This method blocks until the scan is complete. func (s *Scanner) Scan(foundCert func(*ct.LogEntry), foundPrecert func(*ct.LogEntry)) error { s.Log("Starting up...\n") s.certsProcessed = 0 s.precertsSeen = 0 s.unparsableEntries = 0 s.entriesWithNonFatalErrors = 0 latestSth, err := s.logClient.GetSTH() if err != nil { return err } s.Log(fmt.Sprintf("Got STH with %d certs", latestSth.TreeSize)) ticker := time.NewTicker(time.Second) startTime := time.Now() fetches := make(chan fetchRange, 1000) jobs := make(chan matcherJob, 100000) go func() { for range ticker.C { throughput := float64(s.certsProcessed) / time.Since(startTime).Seconds() remainingCerts := int64(latestSth.TreeSize) - int64(s.opts.StartIndex) - s.certsProcessed remainingSeconds := int(float64(remainingCerts) / throughput) remainingString := humanTime(remainingSeconds) s.Log(fmt.Sprintf("Processed: %d certs (to index %d). Throughput: %3.2f ETA: %s\n", s.certsProcessed, s.opts.StartIndex+int64(s.certsProcessed), throughput, remainingString)) } }() var ranges list.List for start := s.opts.StartIndex; start < int64(latestSth.TreeSize); { end := min(start+int64(s.opts.BatchSize), int64(latestSth.TreeSize)) - 1 ranges.PushBack(fetchRange{start, end}) start = end + 1 } var fetcherWG sync.WaitGroup var matcherWG sync.WaitGroup // Start matcher workers for w := 0; w < s.opts.NumWorkers; w++ { matcherWG.Add(1) go s.matcherJob(w, jobs, foundCert, foundPrecert, &matcherWG) } // Start fetcher workers for w := 0; w < s.opts.ParallelFetch; w++ { fetcherWG.Add(1) go s.fetcherJob(w, fetches, jobs, &fetcherWG) } for r := ranges.Front(); r != nil; r = r.Next() { fetches <- r.Value.(fetchRange) } close(fetches) fetcherWG.Wait() close(jobs) matcherWG.Wait() s.Log(fmt.Sprintf("Completed %d certs in %s", s.certsProcessed, humanTime(int(time.Since(startTime).Seconds())))) s.Log(fmt.Sprintf("Saw %d precerts", s.precertsSeen)) s.Log(fmt.Sprintf("%d unparsable entries, %d non-fatal errors", s.unparsableEntries, s.entriesWithNonFatalErrors)) return nil }
// Workers report back how many RPCs they have processed in the Shutdown reply. // Check that they processed at least 1 RPC. func checkWorker(t *testing.T, l *list.List) { for e := l.Front(); e != nil; e = e.Next() { if e.Value == 0 { t.Fatalf("Some worker didn't do any work\n") } } }
// removes RCB element from a linked list func rcbListRemove(r *RCB, ls *list.List) { for e := ls.Front(); e != nil; e = e.Next() { if e.Value.(*RCB).RID == r.RID { ls.Remove(e) } } }