func messageListener(o *gomegle.Omegle, logger *log.Logger) { for { err := o.ShowTyping() if err != nil { logger.Print(err) } reader := bufio.NewReader(os.Stdin) text, err := reader.ReadString('\n') if err != nil { err = o.Disconnect() if err != nil { logger.Fatal(err) } fmt.Println("- Disconnected") ret := o.GetID() if ret != nil { logger.Fatal(ret) } continue } err = o.StopTyping() if err != nil { logger.Print(err) } err = o.SendMessage(text) if err != nil { logger.Fatal(err) continue } } }
// DefaultFormat returns a middleware that logs http requests // to the given logger using the default log format. func DefaultFormat(l *log.Logger) httpmux.MiddlewareFunc { return func(next http.HandlerFunc) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { start := time.Now() rw := NewResponseWriter(w) next(rw, r) b := getBuffer() b.WriteString(r.Proto) b.Write([]byte(" ")) b.WriteString(strconv.Itoa(rw.Code())) b.Write([]byte(" ")) b.WriteString(r.Method) b.Write([]byte(" ")) b.WriteString(r.URL.RequestURI()) b.Write([]byte(" from ")) b.WriteString(r.RemoteAddr) b.Write([]byte(" ")) fmt.Fprintf(b, "%q", r.Header.Get("User-Agent")) b.Write([]byte(" ")) b.WriteString(strconv.Itoa(rw.Bytes())) b.Write([]byte(" bytes in ")) b.WriteString(time.Since(start).String()) if err := httpmux.Context(r).Value(ErrorID); err != nil { fmt.Fprintf(b, " err: %v", err) } l.Print(b.String()) putBuffer(b) } } }
// Print calls Print on a logger or the default logger. // Arguments are handled in the manner of fmt.Print. func Print(l *log.Logger, v ...interface{}) { if l == nil { log.Print(v...) } else { l.Print(v...) } }
func StartWorker(CacheInvalidation bool, WorkQueue chan File, log *log.Logger) { var err error for { select { case f := <-WorkQueue: startTime := time.Now().UTC() jobId := "b-" + randomString(5) + " " log.SetPrefix(jobId) log.Print("Batch process starting: " + f.Tag + ", " + f.Filename) // Simulate some processing time if f.MediaType() == "image" { err = f.GenerateImage(75, 75, true) if err != nil { log.Print(err) } err = f.GenerateImage(1140, 0, false) if err != nil { log.Print(err) } if CacheInvalidation { if err := f.Purge(); err != nil { log.Print(err) } } } finishTime := time.Now().UTC() elapsedTime := finishTime.Sub(startTime) log.Println("Completed in: " + elapsedTime.String()) } } }
func notify(config localConfig, hook hookEvent, ws, logDir string, notification grimNotification, logger *log.Logger) error { if hook.EventName != "push" && hook.EventName != "pull_request" { return nil } ghErr := setRefStatus(config.gitHubToken(), hook.Owner, hook.Repo, hook.StatusRef, notification.GithubRefStatus(), "", "") context := buildContext(hook, ws, logDir) message, color, err := notification.HipchatNotification(context, config) logger.Print(message) if config.hipChatToken() != "" && config.hipChatRoom() != "" { if err != nil { logger.Printf("Hipchat: Error while rendering message: %v", err) return err } err = sendMessageToRoom(config.hipChatToken(), config.hipChatRoom(), config.grimServerID(), message, color) if err != nil { logger.Printf("Hipchat: Error while sending message to room: %v", err) return err } } else { logger.Print("HipChat: config.hipChatToken and config.hitChatRoom not set") } return ghErr }
func LoginHandler(w http.ResponseWriter, r *http.Request, session sessions.Session, log *log.Logger) string { c, err := config.ReadDefault("users.cfg") if err != nil { return "Can't login. Problems reading user and password." } username := r.FormValue("username") password := r.FormValue("password") allowedUsername, _ := c.RawStringDefault("user") allowedPassword, _ := c.RawStringDefault("password") if username == allowedUsername && password == allowedPassword { log.Print("User WAS logged in.") session.Set("username", username) session.Set("password", password) http.Redirect(w, r, "/home", http.StatusFound) return "OK" } log.Print("User wasn't logged in. User " + username + " and password " + password) http.Redirect(w, r, "/login", http.StatusFound) return "Username or password incorrect" }
func addCategories(files []fileData, client *mwclient.Client, verbose *log.Logger, catFileLimit int32, allCategories map[string]bool, catCounts map[string]int32, stats *stats) { for i := range files { if files[i].processed { continue } // The cat size limit needs to be checked again, since adding // previous files in the batch may have pushed it over the // limit. if catFileLimit > 0 && catCounts[files[i].catMapped] >= catFileLimit { stats.populated++ verbose.Print(files[i].title, "\n", "Already populated: ", files[i].catMapped) } else { // Identifying emtpy categories helps identify // when we are adding a file to a redirect page // for a renamed category. if catCounts[files[i].catMapped] == 0 { warn.Print(files[i].title, "\n", "Adding to empty ", files[i].catMapped) files[i].warning = "Added to empty category" stats.warnings++ } else { verbose.Printf("%s\nAdding to %s (%d files)", files[i].title, files[i].catMapped, int(catCounts[files[i].catMapped])) } stats.edited++ addCategory(files[i].title, files[i].catMapped, client) incCatCount(files[i].catMapped, catCounts) } files[i].processed = true } }
// Determine if any of cats (a file's current categories) match either the // Exif target category, any known target category, or any unknown category // that's named like a target category. func matchCategories(file *fileData, cats []string, mapped string, verbose *log.Logger, ignoreCurrentCats bool, allCategories map[string]bool, stats *stats) bool { result := false for _, cat := range cats { if mapped == cat { stats.inCat++ verbose.Print(file.title, "\n", "Already in mapped: ", mapped) result = true break } if !ignoreCurrentCats { if allCategories[cat] { result = true stats.inCat++ verbose.Print(file.title, "\n", "Already in known: ", cat) break } if strings.HasPrefix(cat, "Category:Taken ") || strings.HasPrefix(cat, "Category:Scanned ") { result = true warn.Print(file.title, "\n", "Already in unknown: ", cat) file.warning = "In unknown " + cat stats.warnings++ break } } } return result }
// createContainer initializes a struct needed to call docker.client.CreateContainer() func createContainer(ctx *ExecContext, task *structs.Task, logger *log.Logger) docker.CreateContainerOptions { if task.Resources == nil { panic("task.Resources is nil and we can't constrain resource usage. We shouldn't have been able to schedule this in the first place.") } hostConfig := createHostConfig(task) logger.Printf("[DEBUG] driver.docker: using %d bytes memory for %s", hostConfig.Memory, task.Config["image"]) logger.Printf("[DEBUG] driver.docker: using %d cpu shares for %s", hostConfig.CPUShares, task.Config["image"]) // Setup port mapping (equivalent to -p on docker CLI). Ports must already be // exposed in the container. if len(task.Resources.Networks) == 0 { logger.Print("[WARN] driver.docker: No networks are available for port mapping") } else { network := task.Resources.Networks[0] dockerPorts := map[docker.Port][]docker.PortBinding{} for _, port := range network.ListStaticPorts() { dockerPorts[docker.Port(strconv.Itoa(port)+"/tcp")] = []docker.PortBinding{docker.PortBinding{HostIP: network.IP, HostPort: strconv.Itoa(port)}} dockerPorts[docker.Port(strconv.Itoa(port)+"/udp")] = []docker.PortBinding{docker.PortBinding{HostIP: network.IP, HostPort: strconv.Itoa(port)}} logger.Printf("[DEBUG] driver.docker: allocated port %s:%d -> %d (static) %s\n", network.IP, port, port) } for label, port := range network.MapDynamicPorts() { // If the label is numeric we expect that there is a service // listening on that port inside the container. In this case we'll // setup a mapping from our random host port to the label port. // // Otherwise we'll setup a direct 1:1 mapping from the host port to // the container, and assume that the process inside will read the // environment variable and bind to the correct port. if _, err := strconv.Atoi(label); err == nil { dockerPorts[docker.Port(label+"/tcp")] = []docker.PortBinding{docker.PortBinding{HostIP: network.IP, HostPort: strconv.Itoa(port)}} dockerPorts[docker.Port(label+"/udp")] = []docker.PortBinding{docker.PortBinding{HostIP: network.IP, HostPort: strconv.Itoa(port)}} logger.Printf("[DEBUG] driver.docker: allocated port %s:%d -> %s (mapped)", network.IP, port, label) } else { dockerPorts[docker.Port(strconv.Itoa(port)+"/tcp")] = []docker.PortBinding{docker.PortBinding{HostIP: network.IP, HostPort: strconv.Itoa(port)}} dockerPorts[docker.Port(strconv.Itoa(port)+"/udp")] = []docker.PortBinding{docker.PortBinding{HostIP: network.IP, HostPort: strconv.Itoa(port)}} logger.Printf("[DEBUG] driver.docker: allocated port %s:%d -> %d for label %s\n", network.IP, port, port, label) } } hostConfig.PortBindings = dockerPorts } config := &docker.Config{ Env: TaskEnvironmentVariables(ctx, task).List(), Image: task.Config["image"], } // If the user specified a custom command to run, we'll inject it here. if command, ok := task.Config["command"]; ok { config.Cmd = strings.Split(command, " ") } return docker.CreateContainerOptions{ Config: config, HostConfig: hostConfig, } }
func AddServiceEndpointLoggingMiddleware(logger log.Logger) endpoint.Middleware { return func(next endpoint.Endpoint) endpoint.Endpoint { return func(ctx context.Context, request interface{}) (interface{}, error) { logger.Print("calling endpoint") defer logger.Print("called endpoint") return next(ctx, request) } } }
// Returns a new server with the given name and empty cache // It serves the files in the directory given when is created func NewWebServer(dir string, logger log.Logger) *Server { server := &Server{} server.pwd = dir server.cache = NewCache() // server.sessions server.logger = logger logger.Print("Server created") return server }
func runTriggers(triggers []*triggers.Trigger, action string, logger *log.Logger) bool { hadFailures := false needRestart := false logPrefix := "" if *disableTriggers { logPrefix = "Disabled: " } // For "start" action, if there is a reboot trigger, just do that one. if action == "start" { for _, trigger := range triggers { if trigger.Service == "reboot" { logger.Print(logPrefix, "Rebooting") if *disableTriggers { return hadFailures } if !runCommand(logger, "reboot") { hadFailures = true } return hadFailures } } } ppid := fmt.Sprint(os.Getppid()) for _, trigger := range triggers { if trigger.Service == "reboot" && action == "stop" { continue } if trigger.Service == "subd" { // Never kill myself, just restart. if action == "start" { needRestart = true } continue } logger.Printf("%sAction: service %s %s\n", logPrefix, trigger.Service, action) if *disableTriggers { continue } if !runCommand(logger, "run-in-mntns", ppid, "service", trigger.Service, action) { hadFailures = true } } if needRestart { logger.Printf("%sAction: service subd restart\n", logPrefix) if !runCommand(logger, "run-in-mntns", ppid, "service", "subd", "restart") { hadFailures = true } } return hadFailures }
func fwdSignal(logger *log.Logger, command *exec.Cmd, sig os.Signal) { switch sig { case os.Interrupt, syscall.SIGTERM, syscall.SIGCHLD: return } if err := command.Process.Signal(sig); err != nil { logger.Printf("glock: Error while sending signal %s: %s", sig, err.Error()) } if sig == syscall.SIGTSTP { logger.Print("glock: SIGTSTP sent to child process, but will continue to send heartbeats for the locks") } }
// ApacheCommonFormat returns a middleware that logs http requests // to the given logger using the Apache Common log format. func ApacheCommonFormat(l *log.Logger) httpmux.MiddlewareFunc { return func(next http.HandlerFunc) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { start := time.Now() rw := NewResponseWriter(w) next(rw, r) b := apacheCommonLog(rw, r, start) l.Print(b.String()) putBuffer(b) } } }
func saveToFile(data com.MasterData, masterLogger log.Logger) { file, err := os.Create("backupData.json") if err != nil { masterLogger.Print(err) } buf, err := json.Marshal(data) if err != nil { masterLogger.Print(err) } file.Write(buf) file.Close() }
// DumpClientEvents reads client to server events from r, and dumps them as // text to l. func DumpClientEvents(r io.Reader, l *log.Logger, maxFrameLen int) error { var ger artproto.ClientEventReader ger.Init(r, maxFrameLen) var ev ClientEventsDumper ev.Init(l) for ger.FramingErr == nil { if err := ger.Next(&ev, nil); err != nil { l.Print("Error: ", err) } } return ger.FramingErr }
// DumpServerEvents reads server to client events from r, and dumps them as // text to l. valueIDFn if not nil, is called with values from object updates. func DumpServerEvents(r io.Reader, valueIDFn ValueIDFn, l *log.Logger, maxFrameLen int) error { var ger artproto.ServerEventReader ger.Init(r, maxFrameLen) var ev ServerEventsDumper ev.Init(l, valueIDFn) for ger.FramingErr == nil { if err := ger.Next(&ev, nil); err != nil { l.Print("Error: ", err) } } return ger.FramingErr }
func (lw *LogWriter) logTrace(traceId string, logString string) bool { var logger *log.Logger lw.traceMu.RLock() tl := lw.traceFileMap[traceId] lw.traceMu.RUnlock() if tl == nil { filePath := getDefaultPath() + pathSeparator() + "trace_" + traceId + ".log" file, err := os.OpenFile(filePath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) if err != nil { lw.logger.Print("Logger: Unable to create trace file %s, Error %s", filePath, err.Error()) return false } logger = log.New(file, "", log.Lmicroseconds) fileLockPath := getDefaultPath() + pathSeparator() + "trace_" + traceId + ".lock" fl, _ := lockfile.New(fileLockPath) tl = &TraceLogger{file: file, logger: logger, counter: lw.logCounter, fileLock: fl} lw.traceMu.Lock() lw.traceFileMap[traceId] = tl lw.traceMu.Unlock() if lw.cleanerRunning == false { // restart the cleaner go cleanupMap(lw) } } else { logger = tl.(*TraceLogger).logger tl.(*TraceLogger).counter = lw.logCounter } var locked bool err := tl.(*TraceLogger).fileLock.TryLock() if err != nil { // busy wait a few times before giving up for i := 0; i < MAX_LOCK_RETRY; i++ { time.Sleep(10 * time.Millisecond) err = tl.(*TraceLogger).fileLock.TryLock() if err == nil { locked = true break } } // unable to acquire lock if locked == false { return false } } defer tl.(*TraceLogger).fileLock.Unlock() logger.Print(logString) return true }
func UDPInit(master bool, sendChannel, receiveChannel chan UDPMessage, networkLogger log.Logger) { var localPort, broadcastPort string if master { networkLogger.Print("Connecting as master") localPort = masterPort broadcastPort = slavePort } else { networkLogger.Print("Connecting as slave") localPort = slavePort broadcastPort = masterPort } laddr, err := net.ResolveUDPAddr("udp", ":"+localPort) if err != nil { networkLogger.Print(err) } conn, err := net.ListenUDP("udp", laddr) if err != nil { networkLogger.Print("Failed to connect") return } defer conn.Close() go listenServer(conn, receiveChannel, networkLogger) broadcastServer(conn, broadcastPort, sendChannel, networkLogger) }
func logMaybeMap(l *log.Logger, args ...interface{}) { msg, mok := args[0].(string) fields, fok := args[1].(map[string]interface{}) if !(mok && fok) { l.Println(args) return } msg = msg + ": " for k, v := range fields { msg = fmt.Sprintf("%s %s = %v", msg, k, v) } l.Print(msg) return }
func logWithLongFile(l *log.Logger, format string, v ...interface{}) { _, file, line, _ := runtime.Caller(2) // Shorten the path. // From // /builddir/build/BUILD/heketi-3f4a5b1b6edff87232e8b24533c53b4151ebd9c7/src/github.com/heketi/heketi/apps/glusterfs/volume_entry.go // to // src/github.com/heketi/heketi/apps/glusterfs/volume_entry.go i := strings.Index(file, "/src/") if i == -1 { i = 0 } l.Print(fmt.Sprintf("%v:%v: ", file[i:], line) + fmt.Sprintf(format, v...)) }
func displaySummary(logger *log.Logger, start time.Time, files, rawLines, validLines int) { defer T.Un(T.Trace("")) delta := time.Since(start) summary := fmt.Sprintf("Parsed %s/%s(%.4f%s) lines in %d files within %s [%.1f lines per second]\n", size.Comma(int64(validLines)), size.Comma(int64(rawLines)), 100*float64(validLines)/float64(rawLines), "%%", files, delta, float64(rawLines)/delta.Seconds()) // render to both log and stderr logger.Print(summary) fmt.Fprintf(os.Stderr, summary) }
// ApacheCombinedFormat returns a middleware that logs http requests // to the given logger using the Apache Combined log format. func ApacheCombinedFormat(l *log.Logger) httpmux.MiddlewareFunc { return func(next http.HandlerFunc) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { start := time.Now() rw := NewResponseWriter(w) next(rw, r) b := apacheCommonLog(rw, r, start) b.Write([]byte(" ")) fmt.Fprintf(b, "%q %q", r.Header.Get("Referer"), r.Header.Get("User-Agent"), ) l.Print(b.String()) putBuffer(b) } } }
// checkPkgPath checks the directories of compiled packages taking an import or // filesystem path. // Returns "pkgTowatch", the absolute path of compiled packages; and "pkgFiles", // for systems without kernel subsystem like inotify. func checkPkgPath(path []string, logg *log.Logger) (pkgTowatch, pkgFiles []string, err error) { var hasError bool pkgTowatch = make([]string, len(path)) for i, p := range path { pkg, err := build.Import(p, build.Default.GOPATH, build.AllowBinary) /*if err != nil { logg.Print("FAIL! checkPkgPath: at getting directory: ", err) hasError = true continue }*/ if pkg.IsCommand() { logg.Print("FAIL! no package: ", p) hasError = true continue } pkgPath := filepath.Join(pkg.PkgRoot, runtime.GOOS+"_"+runtime.GOARCH, pkg.ImportPath) _, err = os.Stat(pkgPath + _EXT_COMPILED) if err != nil && os.IsNotExist(err) { // Get compiled packages switch files, err := filepath.Glob(filepath.Join(pkgPath, "*"+_EXT_COMPILED)); { case err != nil: logg.Print("FAIL! checkPkgPath: at getting compiled packages: ", err) hasError = true case len(files) == 0: logg.Printf("FAIL! checkPkgPath: no compiled packages in directory %q", p) hasError = true case !USE_KERNEL: // there is to get all files to watching pkgFiles = append(pkgFiles, files...) } } else { pkgPath += _EXT_COMPILED } if !hasError { pkgTowatch[i] = pkgPath } } if hasError { return nil, nil, errWatcher } return }
// Process files where the category is missing or already populated. func filterCatLimit(files []fileData, client *mwclient.Client, verbose *log.Logger, catFileLimit int32, catCounts map[string]int32, stats *stats) { for i := range files { if !files[i].processed && files[i].catMapped != "" { count, found := catCounts[files[i].catMapped] if !found { warn.Print(files[i].title, "\n", "Mapped category doesn't exist: ", files[i].catMapped) files[i].warning = files[i].catMapped + " doesn't exist" stats.warnings++ files[i].processed = true continue } if catFileLimit > 0 && count >= catFileLimit { stats.populated++ verbose.Print(files[i].title, "\n", "Already populated: ", files[i].catMapped) files[i].processed = true continue } } } }
// sysWatcher starts the watcher. func sysWatcher(cmdTocompile string, pkgTowatch []string, logg *log.Logger) (*pkgWatcher, error) { watcher, err := inotify.NewWatcher() if err != nil { logg.Print("FAIL! sysWatcher: ", err) return nil, errWatcher } ok := true // Watch every path for _, path := range pkgTowatch { if err = watcher.AddWatch(path, inotify.IN_MODIFY); err != nil { logg.Print("FAIL! sysWatcher: ", err) ok = false } } if !ok { return nil, errWatcher } return &pkgWatcher{watcher, logg, cmdTocompile}, nil }
func InitMaster(events com.MasterEvent, initialOrders []order.Order, initialSlaves map[network.IP]com.Slave, masterLogger log.Logger) { backupDeadlineTimer := time.NewTimer(backupDeadline) selfAsBackup := false orders := initialOrders slaves := initialSlaves masterLogger.Print("Waiting for backup") for { select { case <-backupDeadlineTimer.C: masterLogger.Print("Not contacted by external slave within deadline. Can now use self as backup.") selfAsBackup = true case message := <-events.FromSlaves: _, err := com.DecodeSlaveMessage(message.Data) if err != nil { break } if (selfAsBackup && message.Address == myIP) || message.Address != myIP { orders, slaves = masterLoop(events, message.Address, orders, slaves, masterLogger) masterLogger.Print("Waiting for new backup") backupDeadlineTimer.Reset(backupDeadline) } } } }
/* Sort every line which arrives through reader onto one of the channels from * s, depending on the line's content. */ func (s stream_channels) sort_stream(conn *net.Conn, logger *log.Logger) { reader := bufio.NewReader(*conn) for { str, err := reader.ReadString('\n') if err != nil { //something went wrong, stop sorting return } if len(str) > 0 { if strings.HasPrefix(str, ">") { /* end of a shell command execution, create clean newline */ s.rcv_other <- ">\n" /* remove > from str*/ str = strings.TrimPrefix(str, "> ") } if str == "EOF" { // TODO properly fmt.Println("found an eof") break } /* If there's something left, log, check line content and sort */ if len(str) > 0 { logger.Print(str) switch get_content_type(str) { case CONTENT_TYPE_JSON: /* this line contains a JSON */ s.rcv_json <- str case CONTENT_TYPE_OTHER: /* this line contains something else */ s.rcv_other <- str } } } } }
func StartWorker(WorkQueue chan File, log *log.Logger) { for { select { case f := <-WorkQueue: startTime := time.Now().UTC() jobId := "b-" + randomString(5) + " " log.SetPrefix(jobId) log.Print("Batch process starting: " + f.Tag + ", " + f.Filename) // Simulate some processing time if f.MediaType() == "image" { err := f.GenerateThumbnail() if err != nil { log.Print(err) } } finishTime := time.Now().UTC() elapsedTime := finishTime.Sub(startTime) log.Println("Completed in: " + elapsedTime.String()) } } }
// Determine Commons category from imageinfo (Exif) data, if possible. func mapCategories(files []fileData, verbose *log.Logger, categoryMap map[string]string, catRegex []catRegex, stats *stats) { for i := range files { var err error files[i].title, err = files[i].pageObj.GetString("title") if err != nil { panic(err) } imageinfo, err := files[i].pageObj.GetObjectArray("imageinfo") if err == nil { files[i].make, files[i].model = extractCamera(imageinfo[0]) } if err != nil || (files[i].make == "" && files[i].model == "") { verbose.Print(files[i].title, "\n", "No camera details in Exif") files[i].processed = true continue } stats.withCamera++ // Category mapping: first try the simple map for an exact // match (which is fast), when try each regex match in turn. // If mapping fails, processing continues with blank catMapped // to determine file's current categories before displaying a // warning. key := files[i].make + files[i].model var found bool files[i].catMapped, found = categoryMap[key] if !found { files[i].catMapped = applyRegex(key, catRegex) } if files[i].catMapped == "Category:CanonS100 (special case)" { files[i].catMapped = mapCanonS100(imageinfo) } else if files[i].catMapped == "Category:CanonS110 (special case)" { files[i].catMapped = mapCanonS110(imageinfo) } } }