func searchRiverStates(queue *list.List) { current := queue.Back().Value.(itemState) if current.isFinal() { // 已经是最后状态了 printRiver(queue) } else { if current.currentAction.direct == 0 { for i := 0; i < 5; i++ { next := current.move(backAction[i]) //next.printState() if next.validate() && !isProcessedRiverState(queue, next) { queue.PushBack(next) searchRiverStates(queue) queue.Remove(queue.Back()) } } } else { for i := 0; i < 5; i++ { next := current.move(goAction[i]) //next.printState() if next.validate() && !isProcessedRiverState(queue, next) { queue.PushBack(next) searchRiverStates(queue) queue.Remove(queue.Back()) } } } } }
func DecomposeRecordLayer(tlsPayload []byte) list.List { if len(tlsPayload) < 5 { return list.List{} } log.Println("Parsing one packet......") var tlsLayerlist list.List total := uint16(len(tlsPayload)) var offset uint16 = 0 for offset < total { var p TLSHandshakeDecoder.TLSRecordLayer p.ContentType = uint8(tlsPayload[0+offset]) p.Version = uint16(tlsPayload[1+offset])<<8 | uint16(tlsPayload[2+offset]) p.Length = uint16(tlsPayload[3+offset])<<8 | uint16(tlsPayload[4+offset]) p.Fragment = make([]byte, p.Length) l := copy(p.Fragment, tlsPayload[5+offset:5+p.Length+offset]) tlsLayerlist.PushBack(p) log.Println("Length: ", p.Length, "Type: ", p.ContentType) offset += 5 + p.Length if l < int(p.Length) { fmt.Errorf("Payload to short: copied %d, expected %d.", l, p.Length) } } return tlsLayerlist }
// queueUnseenImports scans a package's imports and adds any new ones to the // processing queue. func (r *Resolver) queueUnseen(pkg string, queue *list.List) error { // A pkg is marked "seen" as soon as we have inspected it the first time. // Seen means that we have added all of its imports to the list. // Already queued indicates that we've either already put it into the queue // or intentionally not put it in the queue for fatal reasons (e.g. no // buildable source). deps, err := r.imports(pkg) if err != nil && !strings.HasPrefix(err.Error(), "no buildable Go source") { msg.Error("Could not find %s: %s", pkg, err) return err // NOTE: If we uncomment this, we get lots of "no buildable Go source" errors, // which don't ever seem to be helpful. They don't actually indicate an error // condition, and it's perfectly okay to run into that condition. //} else if err != nil { // msg.Warn(err.Error()) } for _, d := range deps { if _, ok := r.alreadyQ[d]; !ok { r.alreadyQ[d] = true queue.PushBack(d) } } return nil }
func (s system) NewTasksFromConfig(config map[string]interface{}) (*list.List, error) { tasks, ok := config["tasks"] if !ok { return nil, errors.New("The field tasks was not found") } switch vt := tasks.(type) { case []interface{}: fmt.Printf("vt: %v\n", vt) tasks := new(list.List) for t, val := range vt { fmt.Printf("%v == %v\n", t, val) mt, ok := val.(map[string]interface{}) if ok { task, err := s.NewTask(mt) if err == nil { log.Printf("Adding %v", task) tasks.PushBack(task) } else { log.Printf("Could not add %v, %v", val, err) } } } return tasks, nil default: return nil, errors.New("tasks field was wrong type") } return nil, nil }
// pushMetric adds the metric to the end of the list and returns a comma separated string of the // previous 61 entries. We return 61 instead of 60 (an hour) because the chart on the client // tracks deltas between these values - there is nothing to compare the first value against. func pushMetric(history *list.List, ev expvar.Var) string { history.PushBack(ev.String()) if history.Len() > 61 { history.Remove(history.Front()) } return JoinStringList(history) }
func (server *Server) buildArgumentsMonochrome(arguments *list.List, params imageserver.Params) error { monochrome, _ := params.GetBool("monochrome") if monochrome { arguments.PushBack("-monochrome") } return nil }
func ParseTokens(t []string) *list.List { tokens := new(list.List) for i := range t { tokens.PushBack(t[i]) } n := 0 s, o := new(list.List), new(list.List) for e := tokens.Front(); e != nil; e = e.Next() { if e.Value.(string) == "(" { n++ listAppend(s, new(list.List)) listAppend(o, s) s = s.Back().Value.(*list.List) } else if e.Value.(string) == ")" { n-- s = o.Back().Value.(*list.List) listPop(o) } else { listAppend(s, e.Value.(string)) } } if n != 0 { Error("unbalanced parantheses") } return s }
func testContainers() { fmt.Println("\n\n") fmt.Println("****************************************************") fmt.Println("Containers, lists, sorts") fmt.Println("****************************************************") var x list.List for i := 0; i < 10; i++ { x.PushBack(rand.Int() % 20) } fmt.Println("A list") for e := x.Front(); e != nil; e = e.Next() { fmt.Println(e.Value.(int)) } //Sort kids := []Person{ {"Kara", 2}, {"Bethany", 1}, {"Zach", 3}, } fmt.Println("People:", kids) sort.Sort(ByName(kids)) fmt.Println("Sorted People by Name:", kids) sort.Sort(ByAge(kids)) fmt.Println("Sorted People by Age", kids) }
func prime_gen(out chan<- int64) { out <- int64(2) out <- int64(3) known_primes := new(list.List) known_primes.PushBack(int64(2)) known_primes.PushBack(int64(3)) for try := int64(5); ; try += int64(2) { try_is_prime := true for e := known_primes.Front(); try_is_prime && e != nil; e = e.Next() { p, _ := e.Value.(int64) if try%p == 0 { try_is_prime = false } else if (p + p) > try { break } } if try_is_prime { out <- try known_primes.PushBack(try) } } }
func GetAndParseXML(_xml_url string, _queue *list.List) int { if _queue == nil { logger.Alwaysln("Error: queue is nil") return 0 } // Get .xml file from server xmlfile, err := http.Get(_xml_url) if err != nil { logger.Alwaysln("Get \"" + _xml_url + "\": " + err.Error()) return 0 } logger.Moreln("Get \"" + _xml_url + "\": OK") xmltext, err := ioutil.ReadAll(xmlfile.Body) // tokenize .xml file var xmldoc XMLSTRUCT err = xml.Unmarshal([]byte(xmltext), &xmldoc) if err != nil { logger.Alwaysln("Unmarshal \"" + _xml_url + "\" failed: " + err.Error()) return 0 } logger.Moreln("Unmarshal \"" + _xml_url + "\": OK ") count := len(xmldoc.Urls) for index := 0; index < count; index++ { if !ShouldItBeDownloaded(xmldoc.Urls[index]) { logger.Debugln(xmldoc.Urls[index] + " already downloaded") continue } _queue.PushBack(xmldoc.Urls[index]) } logger.Alwaysln(_xml_url + " OK") return count }
func collectNewVideo(endVideoId string, endDateTime string, videos *list.List) { count := 0 breakCount := 0 limit := 300 next := true for pageNo := 1; next; pageNo++ { doc := getSearchResultDoc(pageNo) doc.Find(".thumb_col_1").Each(func(_ int, s *goquery.Selection) { videoLink := s.Find(".watch") rawVideoId, _ := videoLink.Attr("href") videoId := regexp.MustCompile("[0-9]+").FindString(rawVideoId) postDatetime := regexp.MustCompile("[年月日 /:]").ReplaceAllString(s.Find(".thumb_num strong").Text(), "") title, _ := videoLink.Attr("title") if len(postDatetime) == 12 { // NOP } else if len(postDatetime) == 10 { postDatetime = "20" + postDatetime } else if len(postDatetime) == 8 { postMonth, _ := strconv.Atoi(postDatetime[0:2]) nowMonth, _ := strconv.Atoi(fmt.Sprint(time.Now().Month())) if nowMonth < postMonth { postDatetime = fmt.Sprint(time.Now().AddDate(-1, 0, 0).Year()) + postDatetime } else { postDatetime = fmt.Sprint(time.Now().Year()) + postDatetime } } else { panic("投稿日時の長さがおかしいですよ") } if postDatetime < endDateTime { breakCount++ } // 読み込み中断判定 if (endVideoId != "" && 100 <= breakCount) || (limit != 0 && limit <= count) { next = false count++ return } isNewVideo := true for vi := videos.Front(); vi != nil; vi = vi.Next() { viMap := vi.Value.(map[string]string) if videoId == viMap["id"] { isNewVideo = false continue } } if isNewVideo { videoMap := map[string]string{"id": videoId, "datetime": postDatetime, "title": title} videos.PushBack(videoMap) } count++ }) } }
// our simplified version of MapReduce does not supply a // key to the Map function, as in the paper; only a value, // which is a part of the input file contents // map function return a list of KeyValue, represents a words total occurance in each split file func Map(value string) *list.List { f := func(c rune) bool { return !unicode.IsLetter(c) && !unicode.IsNumber(c) } words := strings.FieldsFunc(value, f) kvs := make(map[string]int) for i := 0; i < len(words); i++ { word := words[i] _, ok := kvs[word] if !ok { kvs[word] = 1 } else { kvs[word] += 1 } } var r list.List for key, value := range kvs { r.PushBack(mapreduce.KeyValue{key, strconv.Itoa(value)}) } return &r }
func testScatterDeleteMulti(tree T, t *testing.T) { name := "test" pointNum := 1000 points := fillView(tree.View(), pointNum) for i, p := range points { for d := 0; d < dups; d++ { tree.Insert(p.x, p.y, name+strconv.Itoa(i)+"_"+strconv.Itoa(d)) } } delView := subView(tree.View()) expDel := new(list.List) expCol := new(list.List) for i, p := range points { if delView.contains(p.x, p.y) { for d := 0; d < dups; d++ { expDel.PushBack(name + strconv.Itoa(i) + "_" + strconv.Itoa(d)) } } else { for d := 0; d < dups; d++ { expCol.PushBack(name + strconv.Itoa(i) + "_" + strconv.Itoa(d)) } } } pred, deleted := CollectingDelete() testDelete(tree, delView, pred, deleted, expDel, t, "Scatter Insert and Delete Under Area With Three Elements Per Location") fun, results := SimpleSurvey() testSurvey(tree, tree.View(), fun, results, expCol, t, "Scatter Insert and Delete Under Area With Three Elements Per Location") }
// Tests a very limited deletion scenario. Here we will insert every element in 'insert' into the tree at a // single random point. Then we will delete every element in delete from the tree. // If exact == true then the view used to delete covers eactly the insertion point. Otherwise, it covers the // entire tree. // We assert that every element of delete has been deleted from the tree (testDelete) // We assert that every element in insert but not in delete is still in the tree (testSurvey) // errPrfx is used to distinguish the error messages from different tests using this method. func testDeleteSimple(tree T, insert, delete []interface{}, exact bool, errPrfx string, t *testing.T) { x, y := randomPosition(tree.View()) for _, e := range insert { tree.Insert(x, y, e) } expCol := new(list.List) OUTER_LOOP: for _, i := range insert { for _, d := range delete { if i == d { continue OUTER_LOOP } } expCol.PushBack(i) } expDel := new(list.List) for _, d := range delete { expDel.PushBack(d) } pred, deleted := makeDelClosure(delete) delView := tree.View() if exact { delView = NewViewP(x, x, y, y) } testDelete(tree, delView, pred, deleted, expDel, t, errPrfx) fun, collected := SimpleSurvey() testSurvey(tree, tree.View(), fun, collected, expCol, t, errPrfx) }
func loadInputTemplate(name string, loader TemplateLoader) (Template, error) { t, err := loader.LoadTemplate(name) if err != nil { return Template{}, nil } load_tracker := map[string]bool{name: true} var load_queue list.List for _, new_name := range t.InputDependencies { if !load_tracker[new_name] { load_tracker[new_name] = true load_queue.PushBack(new_name) } } for e := load_queue.Front(); e != nil; e = e.Next() { template_name := e.Value.(string) new_template, err := loader.LoadTemplate(template_name) if err != nil { return Template{}, err } t.Inputs = append(t.Inputs, new_template.Inputs...) for _, new_name := range new_template.InputDependencies { if !load_tracker[new_name] { load_tracker[new_name] = true load_queue.PushBack(new_name) } } } return t, nil }
func (this *SSNDB) scanPosts(posts *list.List, rows *sql.Rows) { for rows.Next() { post := new(Post) rows.Scan( &post.Id, &post.Message, &post.CreatedAt, &post.UpdatedAt, &post.DeletedAt, &post.TTL, &post.Published, &post.OriginatorId, &post.AuthorId, &post.PostedAt, &post.PublishedAt, &post.RemotePublishedAt, &post.Hash, &post.ParentId) post.ParentHash, _ = this.GetPostHashById(post.ParentId) post.Originator = this.getOnionById(post.OriginatorId) post.Author = this.getOnionById(post.AuthorId) posts.PushBack(post) } }
func (server *Server) buildArgumentsGravity(arguments *list.List, params imageserver.Params) error { gravity, _ := params.GetString("gravity") var translatedGravity string if gravity != "" { switch { case gravity == "n": translatedGravity = "North" case gravity == "s": translatedGravity = "South" case gravity == "e": translatedGravity = "East" case gravity == "w": translatedGravity = "West" case gravity == "ne": translatedGravity = "NorthEast" case gravity == "se": translatedGravity = "SouthEast" case gravity == "nw": translatedGravity = "NorthWest" case gravity == "sw": translatedGravity = "SouthWest" } if translatedGravity == "" { return &imageserver.ParamError{Param: "gravity", Message: "gravity should n, s, e, w, ne, se, nw or sw"} } } else { // Default gravity is center. translatedGravity = "Center" } arguments.PushBack("-gravity") arguments.PushBack(fmt.Sprintf("%s", translatedGravity)) return nil }
func explore(visited map[Vertex]bool, queue *list.List, v Vertex) { _, ok := visited[v] if accessible(v) && !ok { visited[v] = true queue.PushBack(v) } }
func (server *Server) buildArgumentsFlop(arguments *list.List, params imageserver.Params) error { flop, _ := params.GetBool("flop") if flop { arguments.PushBack("-flop") } return nil }
/** * Manage I/O client from server socket * * @param connection - Socket between client and server * @param messageChannel - The shared bus message between all clients * @param clients - The list of all clients connected */ func ClientHandler(connection net.Conn, messageChannel chan string, clients *list.List) { // Create new client instance newClient := &Client{make(chan string), messageChannel, connection, make(chan bool), clients} go ClientSender(newClient) // Manage sending message go ClientReceiver(newClient) // Manage receiving message clients.PushBack(*newClient) // Register client to server list of connected clients // Send Message to the current user only newClient.SendMessage(fmt.Sprintf("Connection to Pilebones's Backdoor, Welcome %s\n", connection.RemoteAddr().String())) newClient.SendMessage(fmt.Sprintf("To logout : press \"/quit\"\n")) newClient.SendMessage(fmt.Sprintf("List of all user connected :\n")) for element := clients.Front(); element != nil; element = element.Next() { client := element.Value.(Client) isCurrentUser := client.Connection.RemoteAddr().String() == newClient.Connection.RemoteAddr().String() if !isCurrentUser { // Notify other clients for the new connection client.SendMessage(fmt.Sprintf("Another client as joined the server %s\n", connection.RemoteAddr().String())) } message := fmt.Sprintf("- " + client.Connection.RemoteAddr().String()) if isCurrentUser { message += " (you)" } newClient.SendMessage(message + "\n") } // Notify all clients for the new connection // messageChannel <- fmt.Sprintf("Another client as joined the server %s\n", connection.RemoteAddr().String()) }
func (s *Surface) filterPixel(p Coord2D, used, unUsed *list.List) { if s.IsUsed(p.X, p.Y) { used.PushBack(p) } else { unUsed.PushBack(p) } }
func main() { fmt.Println("strings:") fmt.Println(strings.Contains("test", "es")) //true fmt.Println(strings.Replace("abcdabcdabcd", "a", "A", 2)) // AbcdAbcdabcd fmt.Println(strings.Join([]string{"a", "b"}, "-")) // a-b fmt.Println(strings.Split("a-b-c", "-")) // []string{"a", "b", "c"} fmt.Println("\nlists:") var x list.List x.PushBack(1) x.PushBack(2) x.PushBack(3) for e := x.Front(); e != nil; e = e.Next() { fmt.Println(e.Value.(int)) } fmt.Println("\nsort:") kids := []Person{ {"Jill", 9}, {"Jack", 10}, } fmt.Println("unsorted: ", kids) sort.Sort(ByName(kids)) fmt.Println("sorted by name:", kids) sort.Sort(ByAge(kids)) fmt.Println("sorted by Age:", kids) fmt.Println("\nhashing:") h := sha1.New() h.Write([]byte("test")) bs := h.Sum([]byte{}) fmt.Println(bs) }
// Performs a scan against the Log. // For each x509 certificate found, |foundCert| will be called with the // index of the entry and certificate itself as arguments. For each precert // found, |foundPrecert| will be called with the index of the entry and the raw // precert string as the arguments. // // This method blocks until the scan is complete. func (s *Scanner) Scan(foundCert func(*ct.LogEntry), foundPrecert func(*ct.LogEntry)) error { s.Log("Starting up...\n") s.certsProcessed = 0 s.precertsSeen = 0 s.unparsableEntries = 0 s.entriesWithNonFatalErrors = 0 latestSth, err := s.logClient.GetSTH() if err != nil { return err } s.Log(fmt.Sprintf("Got STH with %d certs", latestSth.TreeSize)) ticker := time.NewTicker(time.Second) startTime := time.Now() fetches := make(chan fetchRange, 1000) jobs := make(chan matcherJob, 100000) go func() { for range ticker.C { throughput := float64(s.certsProcessed) / time.Since(startTime).Seconds() remainingCerts := int64(latestSth.TreeSize) - int64(s.opts.StartIndex) - s.certsProcessed remainingSeconds := int(float64(remainingCerts) / throughput) remainingString := humanTime(remainingSeconds) s.Log(fmt.Sprintf("Processed: %d certs (to index %d). Throughput: %3.2f ETA: %s\n", s.certsProcessed, s.opts.StartIndex+int64(s.certsProcessed), throughput, remainingString)) } }() var ranges list.List for start := s.opts.StartIndex; start < int64(latestSth.TreeSize); { end := min(start+int64(s.opts.BatchSize), int64(latestSth.TreeSize)) - 1 ranges.PushBack(fetchRange{start, end}) start = end + 1 } var fetcherWG sync.WaitGroup var matcherWG sync.WaitGroup // Start matcher workers for w := 0; w < s.opts.NumWorkers; w++ { matcherWG.Add(1) go s.matcherJob(w, jobs, foundCert, foundPrecert, &matcherWG) } // Start fetcher workers for w := 0; w < s.opts.ParallelFetch; w++ { fetcherWG.Add(1) go s.fetcherJob(w, fetches, jobs, &fetcherWG) } for r := ranges.Front(); r != nil; r = r.Next() { fetches <- r.Value.(fetchRange) } close(fetches) fetcherWG.Wait() close(jobs) matcherWG.Wait() s.Log(fmt.Sprintf("Completed %d certs in %s", s.certsProcessed, humanTime(int(time.Since(startTime).Seconds())))) s.Log(fmt.Sprintf("Saw %d precerts", s.precertsSeen)) s.Log(fmt.Sprintf("%d unparsable entries, %d non-fatal errors", s.unparsableEntries, s.entriesWithNonFatalErrors)) return nil }
func addPixelToQueue(pixelToAdd workSurface.Coord2D, queue *list.List, pixelQueueMap [][]bool) { if pixelQueueMap[pixelToAdd.X][pixelToAdd.Y] { return } pixelQueueMap[pixelToAdd.X][pixelToAdd.Y] = true queue.PushBack(pixelToAdd) }
/** * construct functions */ func MakePattern(a ...Data) *list.List { pattern := new(list.List) for _, v := range a { pattern.PushBack(v) } return pattern }
func fetchMoves(curr_state string, moves *list.List, steps int) { if len(curr_state) <= steps-1 { return } val := []byte(curr_state) for i, _ := range val { if i+steps-1 < len(val) { success := true for k := 0; k < steps; k++ { if val[i+k] == '0' { success = false } } if success { for k := 0; k < steps; k++ { val[i+k] = '0' } moves.PushBack(string(val)) for k := 0; k < steps; k++ { val[i+k] = '1' } } } } }
func DecomposeHandshakes(data []byte) list.List { if len(data) < 4 { return list.List{} } log.Println("Parsing one TLSLayer.......") var handshakelist list.List total := uint32(len(data)) var offset uint32 = 0 for offset < total { var p TLSHandshakeDecoder.TLSHandshake p.HandshakeType = uint8(data[0+offset]) p.Length = uint32(data[1+offset])<<16 | uint32(data[2+offset])<<8 | uint32(data[3+offset]) p.Body = make([]byte, p.Length) if p.Length < 2048 { l := copy(p.Body, data[4+offset:4+p.Length+offset]) if l < int(p.Length) { fmt.Errorf("Payload to short: copied %d, expected %d.", l, p.Length) } offset += 4 + p.Length } else { p.HandshakeType = 99 p.Length = 0 offset = total } log.Printf("Handshake Type: %d, length: %d ", p.HandshakeType, p.Length) handshakelist.PushBack(p) } return handshakelist }
func LoadTemplates(name string, loader TemplateLoader) (*template.Template, error) { load_tracker := map[string]bool{name: true} var load_queue list.List load_queue.Init() load_queue.PushBack(name) t := template.New(name).Funcs(builtins) for e := load_queue.Front(); e != nil; e = e.Next() { template_name := e.Value.(string) new_template, err := loader.LoadTemplate(template_name) if err != nil { return nil, err } if _, err := t.Parse(new_template.Data); err != nil { return nil, err } if t.Lookup(template_name) == nil { return nil, fmt.Errorf(`template "%s"load failed.`, template_name) } for _, new_name := range new_template.Dependencies { if !load_tracker[new_name] { load_tracker[new_name] = true load_queue.PushBack(new_name) } } } return t, nil }
func doStructMembers(fields []*ast.Field, pkg string, importer Importer, fn func(*ast.Object), q *list.List) { // Go Spec: For a value x of type T or *T where T is not an interface type, x.f // denotes the field or method at the shallowest depth in T where there // is such an f. // Thus we traverse shallower fields first, pushing anonymous fields // onto the queue for later. for _, f := range fields { if len(f.Names) > 0 { for _, fname := range f.Names { fn(fname.Obj) } } else { m := unnamedFieldName(f.Type) fn(m.Obj) // The unnamed field's Decl points to the // original type declaration. _, typeNode := splitDecl(m.Obj, nil) obj, typ := exprType(typeNode, false, pkg, importer) if typ.Kind == ast.Typ { q.PushBack(typ) } else { debugp("unnamed field kind %v (obj %v) not a type; %v", typ.Kind, obj, typ.Node) } } } }
func main() { var lst *list.List lst = list.New() listener, err := net.Listen("tcp", "0.0.0.0:8989") if err != nil { log.Println(fmt.Sprintf("listen failed: %s", err.Error())) } defer listener.Close() log.Println("Listen...") for { conn, err := listener.Accept() if err != nil { log.Println(fmt.Sprintf("accept failed: %s", err.Error())) return } log.Println("get a connect") lst.PushBack(conn) go handleRequest(conn, lst) } }