// Process messages from the AMQP queues func processAmqp(username, amqpAddress string) { // Open the queue and then begin processing messages // If we drop out of the processing function, wait // a little while and try again for { fmt.Printf("######################################################################################################\n") fmt.Printf("UTM-API service (%s) REST interface opening %s...\n", globals.LogTag, amqpAddress) q, err := OpenQueue(username, amqpAddress) if err == nil { defer q.Close() fmt.Printf("%s [server] --> connection opened.\n", globals.LogTag) downlinkMessages = q.Downlink // The meat is in here processDatagrams(q) } else { globals.Dbg.PrintfTrace("%s [server] --> error opening AMQP queue (%s).\n", globals.LogTag, err.Error()) } amqpRetryCount++ globals.Dbg.PrintfTrace("%s [server] --> waiting before trying again...\n", globals.LogTag) time.Sleep(time.Second * 10) } }
func handlePacket(buffer []byte) { parser := rfc5424.NewParser(buffer) err := parser.Parse() if err != nil { fmt.Printf("Error reading syslog message %s", err) return } log := parser.Dump() log["@timestamp"] = log["timestamp"] log["facility_label"] = FACILITY_LABELS[(log["facility"]).(int)] log["severity_label"] = SEVERITY_LABELS[(log["severity"]).(int)] log["type"] = "syslog" now := time.Now() index := "logstash-" + now.Format("2006.01.02") _, err = elasticSearch.Index(true, index, "logs", "", log) if err != nil { fmt.Printf("Error indexing message %s", err) return } fmt.Println("Logged") }
func main() { n := 0 if len(os.Args) > 1 { n, _ = strconv.Atoi(os.Args[1]) } minDepth := 4 maxDepth := minDepth + 2 if maxDepth < n { maxDepth = n } stretchDepth := maxDepth + 1 check := create(0, stretchDepth).Check() fmt.Printf("stretch tree of depth %d\t check: %d\n", stretchDepth, check) longLivedTree := create(0, maxDepth) for depth := minDepth; depth <= maxDepth; depth += 2 { iterations := 1 << uint(maxDepth-depth+minDepth) check = 0 for i := 1; i <= iterations; i++ { check += create(i, depth).Check() check += create(-i, depth).Check() } fmt.Printf("%d\t trees of depth %d\t check: %d\n", 2*iterations, depth, check) } fmt.Printf("long lived tree of depth %d\t check: %d\n", maxDepth, longLivedTree.Check()) }
func TestCalibrateThreshold(t *testing.T) { if !*calibrate { t.Log("not calibrating, use -calibrate to do so.") return } lower := int(1e3) // math/big is faster at this size. upper := int(300e3) // FFT is faster at this size. big, fft := measureMul(lower) lowerX := float64(big) / float64(fft) fmt.Printf("speedup at size %d: %.2f\n", lower, lowerX) big, fft = measureMul(upper) upperX := float64(big) / float64(fft) fmt.Printf("speedup at size %d: %.2f\n", upper, upperX) for { mid := (lower + upper) / 2 big, fft := measureMul(mid) X := float64(big) / float64(fft) fmt.Printf("speedup at size %d: %.2f\n", mid, X) switch { case X < 0.98: lower = mid lowerX = X case X > 1.02: upper = mid upperX = X default: fmt.Printf("speedup at size %d: %.2f\n", lower, lowerX) fmt.Printf("speedup at size %d: %.2f\n", upper, upperX) return } } }
func TestChan3() { fmt.Println("@@@@@@@@@@@@ TestChan 3") fmt.Printf("cpu num: %d\n", runtime.NumCPU()) // 8核cpu // 虽然goroutine是并发执行的,但是它们并不是并行运行的。如果不告诉Go额外的东西,同 // 一时刻只会有一个goroutine执行。利用runtime.GOMAXPROCS(n)可以设置goroutine // 并行执行的数量。GOMAXPROCS 设置了同时运行的CPU 的最大数量,并返回之前的设置。 val := runtime.GOMAXPROCS(runtime.NumCPU() * 4) fmt.Printf("last goroutine num: %d\n", val) // 8个 fmt.Printf("goroutine num: %d\n", runtime.NumGoroutine()) // 4个goroutine同时运行 var ch1 chan int = make(chan int, 0) var ch2 chan int = make(chan int, 0) var ch3 chan int = make(chan int, 0) go write(ch1, 22) go write(ch2, 33) go write(ch3, 44) go read(ch1) go read(ch2) go read(ch3) fmt.Printf("goroutine num: %d\n", runtime.NumGoroutine()) // 10个goroutine同时运行 sleep("TestChan3", 3) }
//line fitted_type.got:17 func drawFittedTableQLetters(rSeq, qSeq alphabet.QLetters, index alphabet.Index, table []int, a [][]int) { tw := tabwriter.NewWriter(os.Stdout, 0, 0, 0, ' ', tabwriter.AlignRight|tabwriter.Debug) fmt.Printf("rSeq: %s\n", rSeq) fmt.Printf("qSeq: %s\n", qSeq) fmt.Fprint(tw, "\tqSeq\t") for _, l := range qSeq { fmt.Fprintf(tw, "%c\t", l) } fmt.Fprintln(tw) r, c := rSeq.Len()+1, qSeq.Len()+1 fmt.Fprint(tw, "rSeq\t") for i := 0; i < r; i++ { if i != 0 { fmt.Fprintf(tw, "%c\t", rSeq[i-1].L) } for j := 0; j < c; j++ { p := pointerFittedQLetters(rSeq, qSeq, i, j, table, index, a, c) if p != "" { fmt.Fprintf(tw, "%s % 3v\t", p, table[i*c+j]) } else { fmt.Fprintf(tw, "%v\t", table[i*c+j]) } } fmt.Fprintln(tw) } tw.Flush() }
func urlShortenerMain(client *http.Client, argv []string) { if len(argv) != 1 { fmt.Fprintf(os.Stderr, "Usage: urlshortener http://goo.gl/xxxxx (to look up details)\n") fmt.Fprintf(os.Stderr, " urlshortener http://example.com/long (to shorten)\n") return } svc, _ := urlshortener.New(client) urlstr := argv[0] // short -> long if strings.HasPrefix(urlstr, "http://goo.gl/") || strings.HasPrefix(urlstr, "https://goo.gl/") { url, err := svc.Url.Get(urlstr).Do() if err != nil { log.Fatalf("URL Get: %v", err) } fmt.Printf("Lookup of %s: %s\n", urlstr, url.LongUrl) return } // long -> short url, err := svc.Url.Insert(&urlshortener.Url{ Kind: "urlshortener#url", // Not really needed LongUrl: urlstr, }).Do() if err != nil { log.Fatalf("URL Insert: %v", err) } fmt.Printf("Shortened %s => %s\n", urlstr, url.Id) }
func work(c *replicant.Client, C <-chan time.Time, stop chan bool, done chan bool, dl *ygor.DataLogger) { defer func() { done <- true }() for { select { case <-C: break case <-stop: return } start := time.Now() _, err := c.Call("echo", "echo", []byte("hello world"), 0) end := time.Now() if err.Status == replicant.SUCCESS { when := uint64(end.UnixNano()) data := uint64(end.Sub(start).Nanoseconds()) er := dl.Record(1, when, data) if er != nil { fmt.Printf("error: %s\n", er) os.Exit(1) } } else { fmt.Printf("error: %s\n", err) os.Exit(1) } } }
func main() { var f float64 fmt.Printf("Enter value in Fahrenheit : ") fmt.Scanf("%f", &f) fmt.Printf("Value in Celcius : %f\n", ftoc.FtoC(f)) }
func transfer(dst_name string, src io.Reader) error { config := goftp.Config{ User: username, Password: password, ConnectionsPerHost: 10, Timeout: 10 * time.Second, Logger: nil, } fmt.Printf("Dialing into %s... ", host) client, err := goftp.DialConfig(config, host) if err != nil { return err } fmt.Println("done.") fmt.Printf("Writing to file %s... ", dst_name) err = client.Store(dst_name, src) if err != nil { return err } fmt.Println("done.") return nil }
func quicksort(low, high int, buf []int) { pivot := buf[low+(high-low)/2] // ここでは、枢軸は単に buf の中心。 l, r := low, high fmt.Printf("start: buf = %v, pivot = %d, l(low) =, %d, r(high) = %d\n", buf, pivot, l, r) for { for pivot > buf[l] { // 要素 < 枢軸 ならよし。 l++ } for pivot < buf[r] { // 枢軸 < 要素 ならよし。 r-- } if l >= r { fmt.Printf("for break: buf = %v, pivot = %d, l(low) =, %d, r(high) = %d\n", buf, pivot, l, r) break // 始点と終点が交差したらループを抜ける。 } // 条件に合致しなければ交換。 buf[l], buf[r] = buf[r], buf[l] l++ r-- } // l-1 と r-1 がミソっぽい。 // l-1 と r-1 を境界にして、それぞれのグループに対してさらに quicksort() する。 fmt.Printf("recurrence? buf = %v, low < l - 1 ? %v < %v - 1, high > r + 1 ? %v > %v + 1\n", buf, low, l, high, r) if low < l-1 { fmt.Println() quicksort(low, l-1, buf) } if high > r+1 { fmt.Println() quicksort(r+1, high, buf) } }
func decodeStopRecord() { jsonStr := `{ "type": "flv", "stream_event": "flv", "ori_url": "rtmp://pushyf.hifun.mobi/live/10016593_4VZIi6Cnwxdev", "domain":"send.a.com", "app":"live", "stream":"10016593_4VZIi6Cnwxdev", "uri":"hls.a.com/live/hls_bew000/hls_bew000_20160707150625_20160707175817.m3u8", "start_time": 1470306194, "stop_time": 1470306497, "duration": 275, "size":8987799, "cdn_url": "http://hls.a.com/live/hls_bew000/hls_bew000_20160707150625_20160707175817.m3u8" }` rec := YFRecCallback{} if err := json.Unmarshal([]byte(jsonStr), &rec); err != nil { fmt.Printf("json unmarshal err:%v\n", err) } else { param["liveid"] = rec.Stream param["url"] = rec.CdnUrl param["duration"] = strconv.Itoa(int(rec.Duration)) param["size"] = strconv.Itoa(rec.Size) fmt.Printf("decodeStopRecord rec : %v \n%v \n", rec, param) } }
func main() { origin := "http://localhost:8000/" url := os.Args[1] secret := os.Args[2] clients, _ := strconv.Atoi(os.Args[3]) fmt.Printf("clients: %d\n", clients) timestamp := strconv.FormatInt(time.Now().Unix(), 10) token := auth.GenerateClientToken(secret, "test", timestamp, "") connectMessage := fmt.Sprintf("{\"params\": {\"timestamp\": \"%s\", \"token\": \"%s\", \"user\": \"test\"}, \"method\": \"connect\"}", timestamp, token) subscribeMessage := "{\"params\": {\"channel\": \"test\"}, \"method\": \"subscribe\"}" done := make(chan struct{}) for i := 0; i < clients; i += 1 { chSub := make(chan struct{}) go subscriber(chSub, url, origin, connectMessage, subscribeMessage) <-chSub fmt.Printf("\r%d", i+1) } // Just run until interrupted keeping connections open. <-done }
func Output(ps Profiles, c Config) { if c.Tsv { if !c.NoHeaders { fmt.Printf("Count\tMin\tMax\tSum\tAvg\tP1\tP50\tP99\tStddev\tMin(Body)\tMax(Body)\tSum(Body)\tAvg(Body)\tMethod\tUri") fmt.Println("") } for _, p := range ps { fmt.Printf("%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v", p.Cnt, Round(p.Min), Round(p.Max), Round(p.Sum), Round(p.Avg), Round(p.P1), Round(p.P50), Round(p.P99), Round(p.Stddev), Round(p.MinBody), Round(p.MaxBody), Round(p.SumBody), Round(p.AvgBody), p.Method, p.Uri) fmt.Println("") } } else { table := tablewriter.NewWriter(os.Stdout) table.SetHeader([]string{"Count", "Min", "Max", "Sum", "Avg", "P1", "P50", "P99", "Stddev", "Min(Body)", "Max(Body)", "Sum(Body)", "Avg(Body)", "Method", "Uri"}) for _, p := range ps { data := []string{ fmt.Sprint(p.Cnt), Round(p.Min), Round(p.Max), Round(p.Sum), Round(p.Avg), Round(p.P1), Round(p.P50), Round(p.P99), Round(p.Stddev), Round(p.MinBody), Round(p.MaxBody), Round(p.SumBody), Round(p.AvgBody), p.Method, p.Uri} table.Append(data) } table.Render() } }
func main() { flag.Parse() b, err := parseFile(fmt.Sprintf("%s/%s.txt", *dir, *month)) if err != nil { log.Fatal(err) } fmt.Println("Total:", b.Total) fmt.Println("Remaining:", b.Remaining) mon := *month year, err := strconv.Atoi(mon[0:4]) if err != nil { log.Fatal(err) } m, err := strconv.Atoi(mon[4:6]) if err != nil { log.Fatal(err) } rpd := b.Remaining / float64(daysIn(time.Month(m), year)-time.Now().Day()) fmt.Printf("Remaining/day: %.2f\n", rpd) top := map[string]float64{} for _, t := range b.Transactions { top[t.Name] += t.Cost } fmt.Println("Top costs:") pl := sortMapByValue(top) for _, p := range pl { fmt.Printf("\t%s: %.2f\n", p.Key, p.Value) } }
func testAccCheckDatadogMetricAlertDestroy(s *terraform.State) error { client := testAccProvider.Meta().(*datadog.Client) for _, rs := range s.RootModule().Resources { for _, v := range strings.Split(rs.Primary.ID, "__") { if v == "" { fmt.Printf("Could not parse IDs. %s", v) return fmt.Errorf("Id not set.") } ID, iErr := strconv.Atoi(v) if iErr != nil { fmt.Printf("Received error converting string %s", iErr) return iErr } _, err := client.GetMonitor(ID) if err != nil { // 404 is what we want, anything else is an error. Sadly our API will return a string like so: // return errors.New("API error: " + resp.Status) // For now we'll use unfold :| if strings.EqualFold(err.Error(), "API error: 404 Not Found") { continue } else { fmt.Errorf("Received an error retrieving monitor %s", err) } } else { fmt.Errorf("Monitor still exists. %s", err) } } } return nil }
func (loop *SimpleLoop) Dump(indent int) { for i := 0; i < indent; i++ { fmt.Printf(" ") } // No ? operator ? fmt.Printf("loop-%d nest: %d depth %d ", loop.counter, loop.nestingLevel, loop.depthLevel) if !loop.isReducible { fmt.Printf("(Irreducible) ") } // must have > 0 if len(loop.children) > 0 { fmt.Printf("Children: ") for ll, _ := range loop.Children() { fmt.Printf("loop-%d", ll.Counter()) } } if len(loop.basicBlocks) > 0 { fmt.Printf("(") for bb, _ := range loop.basicBlocks { fmt.Printf("BB#%03d ", bb.Name()) if loop.header == bb { fmt.Printf("*") } } fmt.Printf("\b)") } fmt.Printf("\n") }
func clockOut(usr user) error { fmt.Printf("Clocking user Out %s.\n", usr.ID) //Do other checking? punch, err := getLastTimepunch(usr.ID) if err != nil { return err } //If the last punch exists, has an in, but not an out, //complete the punch. if (!punch.In.Equal(time.Time{})) && (punch.Out.Equal(time.Time{})) { punch.Out = time.Now() punch.Duration = (punch.Out.Sub(punch.In)) err = updatePunch(punch) if err != nil { return err } } else { //in every other case, we just want to create a new punch. err = createPunch(timePunch{UID: usr.ID, Out: time.Now()}) if err != nil { return err } } fmt.Printf("Done.") return setUserStatus(usr.ID, false) }
func display(r io.Reader) error { data, err := ioutil.ReadAll(r) if err != nil { return err } width, height := widthAndHeight() fmt.Print("\033]1337;") fmt.Printf("File=inline=1") if width != "" || height != "" { if width != "" { fmt.Printf(";width=%s", width) } if height != "" { fmt.Printf(";height=%s", height) } } if *preserveAspectRatio { fmt.Print("preserveAspectRatio=1") } fmt.Print(":") fmt.Printf("%s", base64.StdEncoding.EncodeToString(data)) fmt.Print("\a\n") return nil }
//Return nil if successful, else returns an error. func clockIn(usr user) error { fmt.Printf("Clocking user in %s.\n", usr.ID) status, err := getStatus(usr.ID) if err != nil { fmt.Printf("Done. Error %s.\n", err.Error()) return err } //Can't clock in if we're already in. //TODO: Think about this? Do we want to allow users to clock in anyway and //create a missed punch like thing? if status { err := errors.New("Could not clock in, already clocked in.") fmt.Printf("Done. Error %s.\n", err.Error()) return err } err = createPunch(timePunch{UID: usr.ID, In: time.Now()}) if err != nil { return err } //Do other checking? err = setUserStatus(usr.ID, true) if err != nil { return err } return nil }
func TestUnreliableOneKey(t *testing.T) { const nservers = 3 cfg := make_config(t, "onekey", nservers, true, -1) defer cfg.cleanup() ck := cfg.makeClient(cfg.All()) fmt.Printf("Test: Concurrent Append to same key, unreliable ...\n") ck.Put("k", "") const nclient = 5 const upto = 10 spawn_clients_and_wait(t, cfg, nclient, func(me int, myck *Clerk, t *testing.T) { n := 0 for n < upto { myck.Append("k", "x "+strconv.Itoa(me)+" "+strconv.Itoa(n)+" y") n++ } }) var counts []int for i := 0; i < nclient; i++ { counts = append(counts, upto) } vx := ck.Get("k") checkConcurrentAppends(t, vx, counts) fmt.Printf(" ... Passed\n") }
func showVersion() { if utils.ExperimentalBuild() { fmt.Printf("Docker version %s, build %s, experimental\n", dockerversion.Version, dockerversion.GitCommit) } else { fmt.Printf("Docker version %s, build %s\n", dockerversion.Version, dockerversion.GitCommit) } }
func main() { consumer := consumer.New(dopplerAddress, &tls.Config{InsecureSkipVerify: true}, nil) consumer.SetDebugPrinter(ConsoleDebugPrinter{}) messages, err := consumer.RecentLogs(appGuid, authToken) if err != nil { fmt.Printf("===== Error getting recent messages: %v\n", err) } else { fmt.Println("===== Recent logs") for _, msg := range messages { fmt.Println(msg) } } fmt.Println("===== Streaming metrics") msgChan, errorChan := consumer.Stream(appGuid, authToken) go func() { for err := range errorChan { fmt.Fprintf(os.Stderr, "%v\n", err.Error()) } }() for msg := range msgChan { fmt.Printf("%v \n", msg) } }
func (manager *SectorManager) Create(x int64, y int64, name string) (*Sector, bool) { var coords = SectorCoords{X: x, Y: y} var key = coords.String() sector, ok := manager.Sectors[key] if ok { fmt.Printf("Sector exists %s\n", key) return sector, false } var db = manager.context.DB() var success = false db.Transact(func(t *loge.Transaction) { if !t.Exists("sector", loge.LogeKey(key)) { sector = &Sector{ Coords: coords, Name: name, } t.Set("sector", loge.LogeKey(key), sector) success = true } }, 0) fmt.Printf("Create success: %v\n", success) if success { return sector, true } return nil, false }
func tcpserver(laddr string) { listener, err := net.Listen("tcp4", laddr) if err != nil { panic("Could not ListenTCP") } for { conn, err := listener.Accept() if err != nil { fmt.Printf("Error accepting") continue } go func(conn net.Conn) { buffer := make([]byte, 1024) rlen, err := conn.Read(buffer) if err != nil { fmt.Printf("Error reading from tcp connection") return } conn.Close() go handlePacket(buffer[:rlen-1]) }(conn) } }
func (n *node) debug() { if len(n.string) == 1 { fmt.Printf("%v %v '%v' %d\n", n.id, TypeMap[n.Type], n.string, n.string[0]) } else { fmt.Printf("%v %v '%v'\n", n.id, TypeMap[n.Type], n.string) } }
func main() { fmt.Printf("Factorial of %d is %d\n", 10, factorial(10)) fmt.Printf("Factorial of %d is %d\n", 3, factorial(3)) fmt.Printf("Factorial of %d is %d\n", 5, factorial(5)) fmt.Printf("Factorial of %d is %d\n", 0, factorial(0)) fmt.Printf("Factorial of %d is %d\n", 1, factorial(1)) }
func PrintBytes(label string, data []byte) { fmt.Printf("%s: ", label) for i := 0; i < len(data); i++ { fmt.Printf("0x%02x,", data[i]) } fmt.Printf("\n") }
func (obj *Idcsv) addItem(key string, value string) { if key == "" { fmt.Println("add no key item") return } item := obj._dict[key] if item != nil { if item.Value != value { fmt.Printf("already has %v : #%v# --- #%v#\n", key, item.Value, value) } return } if value == "" { fmt.Printf("add no value item #%v#\n", key) return } item = new(tool.Item) *item = tool.Item{ Key: 0, Desc: key, Value: value, } obj._dict[key] = item // fmt.Printf("add item %v\n", item) return }
// Applies the context to an authorizable controller. func (ac *AuthContext) ApplyContext(controller web.Controller, response http.ResponseWriter, request *http.Request, chain []web.ChainableContext) { ac.isInit = true ac.request = request ac.response = response v, ok := controller.(AuthorizableController) if ok { if err := v.SetAuthContext(ac); err != nil { fmt.Printf("Error setting authorization context: %s \n", err.Error()) } } else { fmt.Printf("Tried to wrap a controller that is not AuthContext aware \n") } getSession: for i := 0; i < len(chain); i++ { v, ok := chain[i].(SessionChainLink) if ok { ac.session = v // access session safely in here for user_id perm checks. break getSession } } }