// Using Panamax data, loop through the checks that are delayed, executing them // sequentially. This method is intended to run in background. func (g *GoDutch) runDelayedChecks() { var name string var lastRun int64 var req *Request var err error if g.lastRunThreshold <= 0 { log.Println("[GoDutch] lastRunThreshold is disabled, no auto-run.") return } log.Printf("[GoDutch] Checks will run automatically after: %ds", g.lastRunThreshold) for { time.Sleep(10 * time.Second) log.Println("[GoDutch] Sleep done, looking for delayed checks.") for name, lastRun = range g.p.ChecksRunReport(g.lastRunThreshold) { log.Printf("[GoDutch] Executing '%s', last run at %ds ago (%ds threshold)", name, lastRun, g.lastRunThreshold) if req, err = NewRequest(name, []string{}); err != nil { log.Fatalln("[GoDutch] Error on creating request to: '%s'", name) } if _, err = g.p.Execute(req); err != nil { log.Println("[GoDutch] Error on execute: ", err) } } } }
// Run runs the the tunnel session func (s *Session) Run() (err error) { defer s.recoverPanic("Session.Run") go func() { defer s.recoverPanic("Session.mux.Wait") code, err, debug1 := s.mux.Wait() // s.Info("Session mux shutdown with code %v error %v debug %v", code, err, debug) log.Println(fmt.Sprintf("[INFO] Session mux shutdown with code %v error %v debug %v", code, err, debug1)) }() defer s.mux.Close() // A tunnel session starts with an auth stream if err = s.handleAuth(); err != nil { return } // then we handle new streams sent from the client for { stream, err := s.mux.Accept() if err != nil { s.Shutdown() err := fmt.Errorf(fmt.Sprintf("Failed to accept stream: %v", err)) log.Println(fmt.Sprintf("[ERROR] %v", err)) return err } go s.handleStream(conn.Wrap(stream, "stream", s.id)) } }
func DefaultHandler(w http.ResponseWriter, r *http.Request) { log.Println("Serving index.html to ", r.RemoteAddr) tmpl, err := template.ParseFiles("templates/index.html") if err != nil { Write404(w, r) log.Println("error loading index.html!") return } if client, ok := clientList[r.RemoteAddr]; ok { client.LastUpdate = time.Now() clientList[r.RemoteAddr] = client } else { mutex.Lock() client := clientInfo{ NodeID: "SC-0.1-" + strconv.Itoa(GNodeID), Address: net.ParseIP(r.RemoteAddr), LastUpdate: time.Now(), } GNodeID++ mutex.Unlock() clientList[r.RemoteAddr] = client } tmpl.Execute(w, struct { NodeID string NumNodes int }{clientList[r.RemoteAddr].NodeID, len(clientList)}) }
func (tnt *TnTServer) CopyFileFromPeer(srv int, path string, dest string) error { /* (1) Call 'GetFile' RPC on server 'srv' (2) Write the file preserving permissions */ args := &GetFileArgs{FilePath: path} var reply GetFileReply ok := call(tnt.servers[srv], "TnTServer.GetFile", args, &reply) if ok { if reply.Err != nil { log.Println("CopyFileFromPeer:", tnt.me, ": Error opening file:", reply.Err) } else { err := ioutil.WriteFile(tnt.root+dest, reply.Content, reply.Perm) if err != nil { log.Println("CopyFileFromPeer:", tnt.me, ": Error writing file:", err) } } } else { log.Println(tnt.me, ": GetFile RPC failed") } return reply.Err }
func (self *SourceManager) monitorFlume() { for self.isRunning { time.Sleep(1 * time.Second) monitor := "FLUME_TPS|" for k, v := range self.sourceServers { succ, fail := v.monitor() monitor += fmt.Sprintf("%s|%d/%d \t", k, succ, fail) } log.Println(monitor) mk := make([]string, 0) monitor = "FLUME_POOL|\n" for k, _ := range self.hp2flumeClientPool { mk = append(mk, k.Host+":"+strconv.Itoa(k.Port)) } sort.Strings(mk) for _, hp := range mk { v, ok := self.hp2flumeClientPool[config.NewHostPort(hp)] if !ok { continue } active, core, max := v.FlumePool.MonitorPool() monitor += fmt.Sprintf("%s|%d/%d/%d\n", hp, active, core, max) } log.Println(monitor) } }
func connectToDiscord() { log.Println("Connecting to discord") //var err error c := config.Get() dg, err := discordgo.New(c.Email, c.Password, c.Token) if err != nil { log.Println(err.Error()) return } // Register messageCreate as a callback for the OnMessageCreate event. dg.AddHandler(messageCreate) // Retry after broken websocket dg.ShouldReconnectOnError = true // Open websocket connection err = dg.Open() if err != nil { log.Println(err) return } log.Println("Connected") }
// stream receiver func (s *server) recv(stream TunService_StreamServer, sess_die chan struct{}) chan []byte { ch := make(chan []byte) go func() { defer func() { close(ch) }() decoder, err := rc4.NewCipher([]byte(_key_recv)) if err != nil { log.Println(err) return } for { in, err := stream.Recv() if err == io.EOF { // client closed log.Println(err) return } if err != nil { log.Println(err) return } decoder.XORKeyStream(in.Message, in.Message) select { case ch <- in.Message: case <-sess_die: } } }() return ch }
// buildExternal downloads and builds external packages, and // reports their build status to the dashboard. // It will re-build all packages after pkgBuildInterval nanoseconds or // a new release tag is found. func (b *Builder) buildExternal() { var prevTag string var nextBuild int64 for { time.Sleep(waitInterval) err := run(nil, goroot, "hg", "pull", "-u") if err != nil { log.Println("hg pull failed:", err) continue } hash, tag, err := firstTag(releaseRe) if err != nil { log.Println(err) continue } if *verbose { log.Println("latest release:", tag) } // don't rebuild if there's no new release // and it's been less than pkgBuildInterval // nanoseconds since the last build. if tag == prevTag && time.Nanoseconds() < nextBuild { continue } // build will also build the packages if err := b.buildHash(hash); err != nil { log.Println(err) continue } prevTag = tag nextBuild = time.Nanoseconds() + pkgBuildInterval } }
func (s *websocketService) Run() { defer s.conn.Close() defer s.Quit() log.Println("Upgrading connection to websocket") websocketErrors := make(chan error) go func(conn *websocket.Conn, errChan chan error) { for { var message interface{} err := conn.ReadJSON(&message) if err != nil { errChan <- err conn.Close() return } } }(s.conn, websocketErrors) for { select { case d := <-s.messages: log.Printf("Received a message: %s", d) s.conn.WriteJSON(d) case err := <-websocketErrors: if err != nil { log.Println("Closing websocket connection") return } } } }
// Save saves all lines to the text file, excep when: // + it starts with some space // + it is an empty line func (h *history) Save() (err error) { if _, err = h.file.Seek(0, 0); err != nil { return } out := bufio.NewWriter(h.file) element := h.li.Front() // Get the first element. for i := 0; i < h.li.Len(); i++ { line := element.Value.(string) if strings.HasPrefix(line, " ") { goto _next } if line = strings.TrimSpace(line); line == "" { goto _next } if _, err = out.WriteString(line + "\n"); err != nil { log.Println("history.Save:", err) break } _next: if element = element.Next(); element == nil { continue } } if err = out.Flush(); err != nil { log.Println("history.Save:", err) } h.close() return }
func HandleIssues(b []byte) { defer PanicHandler(true) payload, err := UnmarshalIssues(b) if err != nil { log.Println("Error decoding Commit Comment payload:", err) return } gu := payload.Sender gr := payload.Repo o := payload.Organization p, ta, err := FindIssuesPointsAndTrophyAction(payload) if err != nil { log.Println("Issues payload error:", err) return } user, _ := entities.NewUserWithGithubData(gu) repo, _ := entities.NewRepoWithGithubData(gr) org, _ := entities.NewOrganizationWithGithubData(o) err = DistributeScores(p, user, repo, org) if err != nil { log.Println("Error distributing scores:", err) } err = RegisterAction(ta, user) if err != nil { log.Println("Error registrating action:", err) } }
func (s *Server) ListenAndServe() error { uaddr, err := net.ResolveUDPAddr("udp", s.Addr) if err != nil { return err } conn, err := net.ListenUDP("udp", uaddr) if err != nil { return err } defer conn.Close() log.Println("listening on", uaddr) newmsg := make(chan Message) go messageReceiver(s, newmsg) for { b := make([]byte, 1024) n, addr, err := conn.ReadFrom(b) if err != nil { log.Println("error %v", err) continue } heartbeat := Message{From: addr.String()} b = bytes.TrimLeft(b[:n], "\n") heartbeat.extract(b) // remove newline newmsg <- heartbeat } }
func (t *timer) watchdog(remove chan Message) { alertcount := 0 loop: for { select { case ctrl := <-t.ctrl: switch ctrl { case RESET: t.Handler.Feed(t.Message) t.relight() alertcount = 0 case STOP: break loop } case b := <-t.cupdate: log.Println("got update Message") t.Message.change(b) case <-t.Fuse: log.Println("Timeout reached", t.Message) t.Handler.Starve(t.Message) t.relight() alertcount++ if alertcount > t.Alertlimit || t.Alertonce { log.Printf("Alert limit %d reached", t.Alertlimit) break loop } } } remove <- t.Message }
func getFile(filename string, cp chan *cachePutRequest, cacheCheck chan *cacheCheckRequest, fileSent chan bool) (io.Reader, error) { cacheReq := new(cacheCheckRequest) cacheReq.name = filename cacheReq.isNotCached = make(chan bool) cacheReq.isCached = make(chan *cachePutRequest) //send cache check request cacheCheck <- cacheReq //handle cache request select { case c := <-cacheReq.isCached: log.Println("Cache hit sending", c.name, "to the client") return c.file, nil case <-cacheReq.isNotCached: log.Println("Cache miss sending", filename, "to the client") fd, err := os.Open(filename) if err != nil { return fd, err } //when cacheFileReader.Read is called, responseFileReader will be written to responseFileReader := new(bytes.Buffer) cacheFileReader := io.TeeReader(fd, responseFileReader) cachePut := new(cachePutRequest) cachePut.name = filename cachePut.file = responseFileReader cachePut.fileIsSent = fileSent //the cache needs to know when the file has been read so it can write it to the cp <- cachePut return cacheFileReader, nil } }
// buildOrBench checks for a new commit for this builder // and builds or benchmarks it if one is found. // It returns true if a build/benchmark was attempted. func (b *Builder) buildOrBench() bool { var kinds []string if *doBuild { kinds = append(kinds, "build-go-commit") } if *doBench { kinds = append(kinds, "benchmark-go-commit") } kind, hash, benchs, err := b.todo(kinds, "", "") if err != nil { log.Println(err) return false } if hash == "" { return false } switch kind { case "build-go-commit": if err := b.buildHash(hash); err != nil { log.Println(err) } return true case "benchmark-go-commit": if err := b.benchHash(hash, benchs); err != nil { log.Println(err) } return true default: log.Println("Unknown todo kind %v", kind) return false } }
func (s *ChatServer) Run() { ticks := time.Tick(time.Second * 1) go muduo.ServeTcp(s.listener, s, "chat") for { select { case c := <-s.register: s.conns[c] = true case c := <-s.unregister: delete(s.conns, c) close(c.send) case m := <-s.boardcast: for c := range s.conns { select { case c.send <- m: default: delete(s.conns, c) close(c.send) log.Println("kick slow connection") } } case _ = <-ticks: log.Println(len(s.conns), runtime.NumGoroutine()) } } }
func setExchange(q *anyq.Rabbitmq) { log.Println("declaring Exchange: ", ex) if err := q.ExchangeDeclare(ex, "direct", false, false, false, false, nil); err != nil { log.Fatal(err) } log.Println("declared Exchange") }
func (self *Table) insert(peer *Peer, offset int) bool { if peer == nil { log.Printf("PeerNil %v", offset) return false } if !peer.isDead() && offset < len(self.peers) { for i := len(self.peers) - 1 - offset; i >= 0; i-- { peernode := self.peers[i] mask := (uint64(1) << uint(i)) if peernode == nil || (peernode.isDead() || peernode.Key()^(self.center^mask) > peer.Key()^(self.center^mask)) { self.insert(self.peers[i], len(self.peers)-i) self.peers[i] = peer //log.Println(peer.Addr, pos) return true } else if peer.Key() == peernode.Key() { if peer != peernode { log.Println("killed because same", peer.Addr, offset) peer.kill() } return false } } } log.Println("kill cause no fit", peer.Addr) peer.evict() return false }
// build checks for a new commit for this builder // and builds it if one is found. // It returns true if a build was attempted. func (b *Builder) build() bool { defer func() { err := recover() if err != nil { log.Println(b.name, "build:", err) } }() hash, err := b.todo() if err != nil { log.Println(err) return false } if hash == "" { return false } // Look for hash locally before running hg pull. if _, err := fullHash(hash[:12]); err != nil { // Don't have hash, so run hg pull. if err := run(nil, goroot, "hg", "pull"); err != nil { log.Println("hg pull failed:", err) return false } } err = b.buildHash(hash) if err != nil { log.Println(err) } return true }
// write goroutine func (c *CopyOnWriteSlice) loopWrite(clean_interval time.Duration) { clean_ticker := time.NewTicker(clean_interval) defer clean_ticker.Stop() for { select { case _, ok := <-c.closed: if !ok { log.Println("---------loopWrite closed--------") return } case e, ok := <-c.addChan: if ok { c.copyOnAdd(e) log.Println("< copyOnAdd actioned in loopWrite >", e) } case <-clean_ticker.C: select { case _, ok := <-c.cleanMark: if ok { c.copyOnClean() log.Println("< ------------Clean actioned in loopWrite >") } default: log.Println("+++ skip clean") } } } log.Println("loopWrite ended") }
// endpoint receiver func (s *server) endpoint(sess_die chan struct{}) (c net.Conn, ch_endpoint <-chan []byte) { ch := make(chan []byte) conn, err := net.Dial("tcp", "localhost:1194") if err != nil { log.Println(err) return } go func() { defer func() { close(ch) }() for { bts := make([]byte, 512) n, err := conn.Read(bts) if err != nil { log.Println(err) return } select { case ch <- bts[:n]: case <-sess_die: } } }() return conn, ch }
func addTree(w *fsnotify.Watcher, root string) error { err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { isDir, err := IsDirectory(path) if err != nil { log.Println(err) return nil } switch { case isDir && IsHidden(path): log.Println(path) return filepath.SkipDir case isDir: log.Println(path) if err := w.Add(path); err != nil { return err } default: return nil } return nil }) if err != nil { return err } return nil }
func writeInput(conn *net.TCPConn) { fmt.Print("Enter username: "******"username": string(username)}) if err != nil { fmt.Println("It is not property name") return } err = common.WriteMsg(conn, string(str)) if err != nil { log.Println(err) } fmt.Println("Enter text: ") for { text, err := reader.ReadString('\n') if err != nil { log.Fatal(err) } err = common.WriteMsg(conn, username+": "+text) if err != nil { log.Println(err) } } }
func main() { flag.Parse() file := flag.Arg(0) io, err := os.Open(file) if err != nil { log.Fatalln("Can't open file: ", file) } img, err := jpeg.Decode(io, &jpeg.DecoderOptions{}) if img == nil { log.Fatalln("Got nil") } if err != nil { log.Fatalln("Got Error: %v", err) } // // write your code here ... // switch img.(type) { case *image.YCbCr: log.Println("decoded YCbCr") case *image.Gray: log.Println("decoded Gray") default: log.Println("unknown format") } }
func (s *Session) handleUnbind(stream conn.Conn, unbind *proto.Unbind) (err error) { // s.Debug("Unbinding tunnel") log.Println("[DEBUG] Unbinding tunnel") // remote it from the list of tunnels t, ok := s.delTunnel(unbind.Url) if !ok { err := fmt.Errorf("Failed to unbind tunnel %s: no tunnel found.", unbind.Url) log.Println("[ERROR]", err) // return s.Error("Failed to unbind tunnel %s: no tunnel found.", unbind.Url) return err } if err = t.shutdown(); err != nil { err := fmt.Errorf("Failed to unbind tunnel %s: %v", unbind.Url, err) log.Println("[ERROR]", err) // return s.Error("Failed to unbind tunnel %s: %v", unbind.Url, err) return err } // acknowledge success unbindResp := &proto.UnbindResp{} if err = proto.WriteMsg(stream, unbindResp); err != nil { err := fmt.Errorf("Failed to write unbind resp: %v", err) // return s.Error("Failed to write unbind resp: %v", err) log.Println("[ERROR]", err) return err } return }
// Search and sort a data from the storage func (storage *Record) Search(query string) (answer string, ok bool) { ok = false answer = "not found\n" if len(strings.TrimSpace(query)) == 0 { log.Println("Empty query") } else { mapp, err := storage.LoadMapper(strings.TrimSpace(query)) if err != nil { log.Println("Query:", query, err.Error()) } else { if mapp == nil || mapp.Count() == 0 { return answer, ok } ok = true // get keys of a map and sort their keys := make([]string, 0, mapp.Count()) for key := range mapp.Fields { keys = append(keys, key) } sort.Strings(keys) answer = prepareAnswer(mapp, keys) } } return answer, ok }
func (plug Autoban) Ban(msg *IRCMessage, match bool, spam bool) { logMsg, banMsg := plug.computeReasonAndTime(msg, match, spam) log.Println(logMsg) plug.write <- IRCMessage{ Channel: "Rodya", Msg: logMsg, User: msg.User, When: msg.When, Unlimited: true, } if len(msg.Mask) < 3 { log.Println("msg.Mask too short to ban! %s", msg.Mask) return } log.Println(banMsg) plug.write <- IRCMessage{ Channel: "ChanServ", Msg: banMsg, User: msg.User, When: msg.When, Unlimited: true, } }
func (c *Correspondent) StartReporting() { tick := time.NewTicker(time.Second * c.refreshInterval).C expectedRepliant := "" for { select { case <-tick: log.Println("Getting new news...") //step 1, add news to cache news := c.agent.getNews() if len(news.AgentId) > 0 { entry := Entry{IpAddress: c.wireService.GetAddress(), Timestamp: time.Now(), News: news} c.cache.addEntries(entry) //step 2, find a random peer peer := findPeer(c.peers.getAll()) //step 3, send cache to peer log.Println("Sending cache to: " + peer) c.wireService.SendNews(peer, c.cache.getEntries()) //keep track of who we sent to, so we can expect a response expectedRepliant = peer } case ch := <-c.repliantRequest: ch <- expectedRepliant expectedRepliant = "" case <-c.done: log.Println("Done") return } } }
func (ths *ImageTagServer) add_tag(w http.ResponseWriter, r *http.Request) { tag := r.URL.Query().Get("tag") desc := r.URL.Query().Get("description") db := ths.get_db() if db == nil { http.Error(w, "DataBase error.", http.StatusInternalServerError) return } defer db.Close() res, resErr := db.Exec("INSERT INTO Tags(Tag,Description) VALUES('" + tag + "','" + desc + "');") if resErr != nil { log.Println(resErr.Error()) http.Error(w, resErr.Error(), http.StatusInternalServerError) return } c, _ := res.RowsAffected() if c < 1 { log.Println("No tag added") http.Error(w, "No tag added", http.StatusInternalServerError) return } log.Println("Tag: " + tag + " added with success.") }
func main() { jenkins_address := os.Getenv("JENKINS_URL") listen_address := os.Getenv("LISTEN_ADDRESS") if listen_address == "" { listen_address = "[::]:8080" } if jenkins_address == "" { log.Fatalln("Use environment variables JENKINS_URL and LISTEN_ADDRESS (default \"[::]:8080\")") } remote, err := url.Parse(jenkins_address) if err != nil { log.Panic(err) } proxy := httputil.NewSingleHostReverseProxy(remote) http.HandleFunc("/", handler(proxy)) log.Println("jenkins-authentication-proxy", version, "starting") log.Println("Authentication endpoint:", planio_url) err = http.ListenAndServe(listen_address, nil) if err != nil { log.Panic(err) } }