func TestLoadDaemonCliConfigWithLogLevel(t *testing.T) { c := &daemon.Config{} common := &cli.CommonFlags{} f, err := ioutil.TempFile("", "docker-config-") if err != nil { t.Fatal(err) } configFile := f.Name() f.Write([]byte(`{"log-level": "warn"}`)) f.Close() flags := mflag.NewFlagSet("test", mflag.ContinueOnError) flags.String([]string{"-log-level"}, "", "") loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile) if err != nil { t.Fatal(err) } if loadedConfig == nil { t.Fatalf("expected configuration %v, got nil", c) } if loadedConfig.LogLevel != "warn" { t.Fatalf("expected warn log level, got %v", loadedConfig.LogLevel) } if logrus.GetLevel() != logrus.WarnLevel { t.Fatalf("expected warn log level, got %v", logrus.GetLevel()) } }
// ReadBinary reads bytes into a Report. // // Will decompress the binary if gzipped is true, and will use the given // codecHandle to decode it. func (rep *Report) ReadBinary(r io.Reader, gzipped bool, codecHandle codec.Handle) error { var err error var compressedSize, uncompressedSize uint64 // We have historically had trouble with reports being too large. We are // keeping this instrumentation around to help us implement // weaveworks/scope#985. if log.GetLevel() == log.DebugLevel { r = byteCounter{next: r, count: &compressedSize} } if gzipped { r, err = gzip.NewReader(r) if err != nil { return err } } if log.GetLevel() == log.DebugLevel { r = byteCounter{next: r, count: &uncompressedSize} } if err := codec.NewDecoder(r, codecHandle).Decode(&rep); err != nil { return err } log.Debugf( "Received report sizes: compressed %d bytes, uncompressed %d bytes (%.2f%%)", compressedSize, uncompressedSize, float32(compressedSize)/float32(uncompressedSize)*100, ) return nil }
func TestDisableDebug(t *testing.T) { DisableDebug() if os.Getenv("DEBUG") != "" { t.Fatalf("expected DEBUG=\"\", got %s\n", os.Getenv("DEBUG")) } if logrus.GetLevel() != logrus.InfoLevel { t.Fatalf("expected log level %v, got %v\n", logrus.InfoLevel, logrus.GetLevel()) } }
func (s *ConfigTestSuite) TestDebugMode(c *C) { type AnonConfig struct { Debug bool `json:"Debug"` } utils.WriteJsonFile(AnonConfig{Debug: true}, s.configPath) LoadSettingsFromFile() c.Assert(log.GetLevel(), Equals, log.DebugLevel) utils.WriteJsonFile(AnonConfig{Debug: false}, s.configPath) // Not need to reset because the conf file exists and loading it will overwrite LoadSettingsFromFile() c.Assert(log.GetLevel(), Equals, log.InfoLevel) }
func TestClientDebugEnabled(t *testing.T) { defer utils.DisableDebug() clientFlags.Common.FlagSet.Parse([]string{"-D"}) clientFlags.PostParse() if os.Getenv("DEBUG") != "1" { t.Fatal("expected debug enabled, got false") } if logrus.GetLevel() != logrus.DebugLevel { t.Fatalf("expected logrus debug level, got %v", logrus.GetLevel()) } }
func TestEnableDebug(t *testing.T) { defer func() { os.Setenv("DEBUG", "") logrus.SetLevel(logrus.InfoLevel) }() EnableDebug() if os.Getenv("DEBUG") != "1" { t.Fatalf("expected DEBUG=1, got %s\n", os.Getenv("DEBUG")) } if logrus.GetLevel() != logrus.DebugLevel { t.Fatalf("expected log level %v, got %v\n", logrus.DebugLevel, logrus.GetLevel()) } }
func (n *node) rquloop() { for { time.Sleep(rqudelay) now := time.Now() n.pqs.mux.RLock() for k, v := range n.pqs.queues { v.L.Lock() _, exist := v.queue[v.waitingSeqid] if v.maxseqid > v.waitingSeqid && !exist && v.waitTime.Before(now.Add(-rqudelay)) { senderid, connid := unpacketKey(k) waiting := v.waitingSeqid v.waitTime = now.Add(rqudelay) go func() { n.write(&packet{ Senderid: senderid, Connid: connid, Seqid: waiting, Cmd: rqu, Time: now.UnixNano(), }) if logrus.GetLevel() >= logrus.DebugLevel { logrus.WithFields(logrus.Fields{ "Connid": connid, "StillWaiting": waiting, "role": n.role(), }).Debugln("send packet request") } }() } v.L.Unlock() } n.pqs.mux.RUnlock() } }
func (c *Command) findContainers(client *docker.Client) ([]docker.APIContainers, error) { results, err := client.ListContainers(docker.ListContainersOptions{}) if err != nil { return nil, err } stopped := make([]docker.APIContainers, 0) for _, container := range results { if log.GetLevel() >= log.DebugLevel { b, err := json.Marshal(&container) if err == nil { log.Debugln("check container: ", string(b)) } } if c.Name != "" && !strings.HasPrefix(container.ID, c.Name) { continue } else if c.Image != "" && container.Image != c.Image { continue } log.WithFields(log.Fields{ "id": container.ID, }).Debugln("find target container.") stopped = append(stopped, container) } return stopped, nil }
// EnvmanRun ... func EnvmanRun(envstorePth, workDirPth string, cmd []string) (int, error) { logLevel := log.GetLevel().String() args := []string{"--loglevel", logLevel, "--path", envstorePth, "run"} args = append(args, cmd...) return cmdex.RunCommandInDirAndReturnExitCode(workDirPth, "envman", args...) }
// TestWithLevel run callable with changed logging output and log level func TestWithLevel(level string, callable func(*bytes.Buffer)) { originalLevel := logrus.GetLevel() defer logrus.SetLevel(originalLevel) SetLevel(level) Test(callable) }
func (s *serv) listen() { switch s.proto { case tcp: ln, err := net.Listen("tcp", s.addr) if err != nil { logrus.Fatalln("net.Listen error", s.addr, err) } s.setalive() if logrus.GetLevel() >= logrus.DebugLevel { logrus.Debugln("listen to", s.addr) } go acceptTCP(ln, s.tcphandler) case udp: udpaddr, err := net.ResolveUDPAddr("udp", s.addr) if err != nil { logrus.Fatalln("net.ResolveUDPAddr error", s.addr, err) } udpconn, err := net.ListenUDP("udp", udpaddr) if err != nil { logrus.Fatalln("net.ListenUDP error", udpaddr, err) } s.setalive() go func() { for { s.udphandler(udpconn) } }() } }
func runCharon(logFile string) { // Ignore error os.Remove("/var/run/charon.vici") args := []string{} for _, i := range strings.Split("dmn|mgr|ike|chd|cfg|knl|net|asn|tnc|imc|imv|pts|tls|esp|lib", "|") { args = append(args, "--debug-"+i) if logrus.GetLevel() == logrus.DebugLevel { args = append(args, "3") } else { args = append(args, "1") } } cmd := exec.Command("charon", args...) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if logFile != "" { output, err := os.OpenFile(logFile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) if err != nil { logrus.Fatalf("Failed to log to file %s: %v", logFile, err) } defer output.Close() cmd.Stdout = output cmd.Stderr = output } cmd.SysProcAttr = &syscall.SysProcAttr{ Pdeathsig: syscall.SIGTERM, } logrus.Fatalf("charon exited: %v", cmd.Run()) }
// ExecuteWithOutput executes a command. If logrus's verbosity level is set to // debug, it will continuously output the command's output while it waits. func ExecuteWithOutput(cmd *exec.Cmd) (outStr string, err error) { // connect to stdout and stderr for filtering purposes errPipe, err := cmd.StderrPipe() if err != nil { log.WithFields(log.Fields{ "cmd": cmd.Args, }).Fatal("Couldn't connect to command's stderr") } outPipe, err := cmd.StdoutPipe() if err != nil { log.WithFields(log.Fields{ "cmd": cmd.Args, }).Fatal("Couldn't connect to command's stdout") } _ = bufio.NewReader(errPipe) outReader := bufio.NewReader(outPipe) // start the command and filter the output if err = cmd.Start(); err != nil { return "", err } outScanner := bufio.NewScanner(outReader) for outScanner.Scan() { outStr += outScanner.Text() + "\n" if log.GetLevel() == log.DebugLevel { fmt.Println(outScanner.Text()) } } err = cmd.Wait() return outStr, err }
func (server *Server) SendMessage(ctx context.Context, in *pb.Message) (*pb.SendMessageResponse, error) { if in.Language == "" { in.Language = server.Config.DefaultLanguage } n := len(in.Targets) logrus.Debugf("SendMessage with event='%s' and language='%s' to #%d target(s)", in.Event, in.Language, n) results := make([]*pb.MessageTargetResponse, 0) ch := make(chan drivers.DriverResult, 1) go server.send(ctx, in, ch) for i := 0; i < n; i++ { r := <-ch resp := &pb.MessageTargetResponse{ Target: string(r.Type), Output: "Success", } if r.Err != nil { resp.Output = r.Err.Error() } results = append(results, resp) } if logrus.GetLevel() >= logrus.DebugLevel { for _, t := range results { logrus.Debugf("SendMessage output[%s]= %s", t.Target, t.Output) } } return pb.NewMessageResponse(results), nil }
func (s *CacheHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { var uid string if session, ok := SessionFromContext(req.Context()); ok { uid = session.Token.UidString() } else { sendRequestProblem(w, req, http.StatusBadRequest, errors.New("CacheHandler no UID")) return } if req.Method == "GET" && infoCollectionsRoute.MatchString(req.URL.Path) { // info/collections s.infoCollection(uid, w, req) } else if req.Method == "GET" && infoConfigurationRoute.MatchString(req.URL.Path) { // info/configuration s.infoConfiguration(uid, w, req) } else { // clear the cache for the user if req.Method == "POST" || req.Method == "PUT" || req.Method == "DELETE" { if log.GetLevel() == log.DebugLevel { log.WithFields(log.Fields{ "uid": uid, }).Debug("CacheHandler clear") } s.cache.Set(uid, nil) } s.handler.ServeHTTP(w, req) return } }
func debugCmdFuncInfo(c *cli.Context) { if log.GetLevel() < log.DebugLevel { return } // get function name dbgMsg := "" pc, _, _, ok := runtime.Caller(1) if ok { dbgMsg = runtime.FuncForPC(pc).Name() i := strings.LastIndex(dbgMsg, "/") if i != -1 { dbgMsg = dbgMsg[i+1:] } } else { dbgMsg = "<unknown function name>" } dbgMsg = fmt.Sprintf("func %s", dbgMsg) // get used flags for _, flag := range c.FlagNames() { dbgMsg = fmt.Sprintf("%s\n\t%s=%+v", dbgMsg, flag, c.Generic(flag)) } log.Debugf(dbgMsg) }
// Initialize the repo to be used to announce/write config. // A seperate repo is initialized to read incoming announcements func initConfigWrite(networkCidr *net.IPNet, hostIface, gitRepoURL string) { var err error if !pathExists(EndpointPushSubDir) { log.Debugf("[ %s ] dir not found, creating it..", EndpointPushSubDir) if err = CreatePaths(EndpointPushSubDir); err != nil { log.Fatalf("Could not create the directory [ %s ]: %s", EndpointPushSubDir, err) } else { log.Warnf("Succesfully created the config path [ %s ]", EndpointPushSubDir) } } // Create the cache subdirectories time.Sleep(1 * time.Second) localEndpointIP, _ := getIfaceAddrStr(hostIface) // Fun Go fact: using a + with sprintf is faster then %s since it uses reflection endpointFile := fmt.Sprintf(localEndpointIP + dotjson) log.Debugf("The endpoint file name is [ %s ] ", endpointFile) log.Debugf("Anouncing this endpoint using the source [ %s ] and advertsing network [ %s ] to datastore file [ %s ]", networkCidr, localEndpointIP, endpointFile) endpointConfig := &LocalEndpoint{ Endpoint: localEndpointIP, Network: networkCidr.String(), Meta: "", } var configAnnounce []LocalEndpoint configAnnounce = append(configAnnounce, *endpointConfig) marshallConfig(configAnnounce, configFormat, endpointFile) if log.GetLevel().String() == "debug" { printPretty(configAnnounce, "json") } // Parse the repo name defer gitPushConfig() }
// EnvmanEnvstoreTest ... func EnvmanEnvstoreTest(pth string) error { logLevel := log.GetLevel().String() args := []string{"--loglevel", logLevel, "--path", pth, "print"} cmd := exec.Command("envman", args...) cmd.Stderr = os.Stderr return cmd.Run() }
func createBitriseCallArgs(bitriseCommandToUse, inventoryBase64, configBase64, runParamJSONBase64, workflowNameOrTriggerPattern string) []string { logLevel := log.GetLevel().String() retArgs := []string{ "--loglevel", logLevel, } if len(runParamJSONBase64) > 0 { // new style, all params in one (Base64 encoded) JSON retArgs = append(retArgs, bitriseCommandToUse, "--json-params-base64", runParamJSONBase64) } else { // old style, separate params retArgs = append(retArgs, bitriseCommandToUse, workflowNameOrTriggerPattern) } // config / bitrise.yml retArgs = append(retArgs, "--config-base64", configBase64) // inventory / secrets if inventoryBase64 != "" { retArgs = append(retArgs, "--inventory-base64", inventoryBase64) } return retArgs }
// handleWithGlobalMiddlwares wraps the handler function for a request with // the server's global middlewares. The order of the middlewares is backwards, // meaning that the first in the list will be evaluated last. func (s *Server) handleWithGlobalMiddlewares(handler httputils.APIFunc) httputils.APIFunc { next := handler handleVersion := middleware.NewVersionMiddleware(dockerversion.Version, api.DefaultVersion, api.MinVersion) next = handleVersion(next) if s.cfg.EnableCors { handleCORS := middleware.NewCORSMiddleware(s.cfg.CorsHeaders) next = handleCORS(next) } handleUserAgent := middleware.NewUserAgentMiddleware(s.cfg.Version) next = handleUserAgent(next) // Only want this on debug level if s.cfg.Logging && logrus.GetLevel() == logrus.DebugLevel { next = middleware.DebugRequestMiddleware(next) } if len(s.cfg.AuthorizationPluginNames) > 0 { s.authZPlugins = authorization.NewPlugins(s.cfg.AuthorizationPluginNames) handleAuthorization := middleware.NewAuthorizationMiddleware(s.authZPlugins) next = handleAuthorization(next) } return next }
func (n nfsDriver) mountVolume(name, source, dest string, version int) error { var cmd string options := merge(n.mountm.GetOptions(name), n.nfsopts) opts := "" if val, ok := options[NfsOptions]; ok { opts = val } mountCmd := "mount" if log.GetLevel() == log.DebugLevel { mountCmd = mountCmd + " -v" } switch version { case 3: log.Debugf("Mounting with NFSv3 - src: %s, dest: %s", source, dest) if len(opts) < 1 { opts = DefaultNfsV3 } cmd = fmt.Sprintf("%s -t nfs -o %s %s %s", mountCmd, opts, source, dest) default: log.Debugf("Mounting with NFSv4 - src: %s, dest: %s", source, dest) if len(opts) > 0 { cmd = fmt.Sprintf("%s -t nfs4 -o %s %s %s", mountCmd, opts, source, dest) } else { cmd = fmt.Sprintf("%s -t nfs4 %s %s", mountCmd, source, dest) } } log.Debugf("exec: %s\n", cmd) return run(cmd) }
// infoCollection caches a user's info/collection data. It will clear // the cached data if a POST, PUT, or DELETE method is done func (s *CacheHandler) infoCollection(uid string, w http.ResponseWriter, req *http.Request) { // cache hit if data, err := s.cache.Get(uid); err == nil && len(data) > 0 { // TODO in change this lastModified := string(data[:lastModifiedBytes]) if log.GetLevel() == log.DebugLevel { log.WithFields(log.Fields{ "uid": uid, "modified": lastModified, "data_len": len(data) - lastModifiedBytes, }).Debug("CacheHandler HIT") } modified, _ := ConvertTimestamp(lastModified) if sentNotModified(w, req, modified) { return } // add the the X-Last-Modified header w.Header().Set("Content-Type", "application/json") w.Header().Set("X-Last-Modified", lastModified) io.Copy(w, bytes.NewReader(data[lastModifiedBytes:])) return } // cache miss... cacheWriter := newCacheResponseWriter(w) s.handler.ServeHTTP(cacheWriter, req) // cache the results for next time if successful response if cacheWriter.code == http.StatusOK { data := make([]byte, cacheWriter.Len()+lastModifiedBytes) copy(data, w.Header().Get("X-Last-Modified")) copy(data[lastModifiedBytes:], cacheWriter.Bytes()) s.cache.Set(uid, data) if log.GetLevel() == log.DebugLevel { log.WithFields(log.Fields{ "uid": uid, "modified": w.Header().Get("X-Last-Modified"), }).Debug("CacheHandler MISS") } } }
// RunEnvmanRunInDir ... func RunEnvmanRunInDir(dir string, cmd []string, logLevel string) error { if logLevel == "" { logLevel = log.GetLevel().String() } args := []string{"--loglevel", logLevel, "run"} args = append(args, cmd...) return RunCommandInDir(dir, "envman", args...) }
func TestClientDebugEnabled(t *testing.T) { defer utils.DisableDebug() cmd := newDockerCommand(&command.DockerCli{}) cmd.Flags().Set("debug", "true") if err := cmd.PersistentPreRunE(cmd, []string{}); err != nil { t.Fatalf("Unexpected error: %s", err.Error()) } if os.Getenv("DEBUG") != "1" { t.Fatal("expected debug enabled, got false") } if logrus.GetLevel() != logrus.DebugLevel { t.Fatalf("expected logrus debug level, got %v", logrus.GetLevel()) } }
// RegisterReportPostHandler registers the handler for report submission func RegisterReportPostHandler(a Adder, router *mux.Router) { post := router.Methods("POST").Subrouter() post.HandleFunc("/api/report", requestContextDecorator(func(ctx context.Context, w http.ResponseWriter, r *http.Request) { var ( rpt report.Report reader = r.Body err error compressedSize, uncompressedSize uint64 ) if log.GetLevel() == log.DebugLevel { reader = byteCounter{next: reader, count: &compressedSize} } if strings.Contains(r.Header.Get("Content-Encoding"), "gzip") { reader, err = gzip.NewReader(reader) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } } if log.GetLevel() == log.DebugLevel { reader = byteCounter{next: reader, count: &uncompressedSize} } decoder := gob.NewDecoder(reader).Decode if strings.HasPrefix(r.Header.Get("Content-Type"), "application/json") { decoder = codec.NewDecoder(reader, &codec.JsonHandle{}).Decode } else if strings.HasPrefix(r.Header.Get("Content-Type"), "application/msgpack") { decoder = codec.NewDecoder(reader, &codec.MsgpackHandle{}).Decode } if err := decoder(&rpt); err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } log.Debugf( "Received report sizes: compressed %d bytes, uncompressed %d bytes (%.2f%%)", compressedSize, uncompressedSize, float32(compressedSize)/float32(uncompressedSize)*100, ) a.Add(ctx, rpt) w.WriteHeader(http.StatusOK) })) }
func main() { _, err := flags.Parse(&opts) if err != nil { os.Exit(1) } if opts.Help { showUsage() os.Exit(1) } // Null check required fields if opts.GitRepoFlag == "" { showUsage() log.Fatal("Required repo name is missing") os.Exit(1) } else { control.GitDatastoreURL = opts.GitRepoFlag // Bind to a global var } if opts.TimeIntervalFlag < control.DefaultIntervalMin { showUsage() log.Fatal("The minimum polling interval is 10 seconds.") os.Exit(1) } // Bind opts to a couple global vars for convenience if opts.BaseDirectoryFlag != "" { control.BaseDirectory = opts.BaseDirectoryFlag } // var timeInterval int // timeInterval = opts.TimeIntervalFlag // if opts.TimeIntervalFlag == 0 { // timeInterval = control.DefaultInterval // log.Debug("Polling interval not specified, setting it to 90 seconds") // } // Set logrus logging level, default is Info switch opts.LogLevelFlag { case "debug": log.SetLevel(log.DebugLevel) case "info": log.SetLevel(log.InfoLevel) case "warn": log.SetLevel(log.WarnLevel) case "error": log.SetLevel(log.ErrorLevel) default: log.SetLevel(log.InfoLevel) log.Debug("Logging level is set to : ", log.GetLevel()) } // control.Run(opts.GitRepoFlag, timeInterval, opts.Daemon) // if opts.Daemon == true { // control.RunGit() // run as a daemon for every (n) seconds // } else { // control.RunGit() // TODO: if false, run one time and exit // } // g := control.gitNet(opts.GitRepoFlag, timeInterval) }
func (d *DB) OpenWithConfig(conf *Config) (err error) { d.db, err = sql.Open("sqlite3", d.Path) if err != nil { return } // settings to apply to the database pragmas := []string{ "PRAGMA page_size=4096;", "PRAGMA journal_mode=WAL;", } if conf != nil { if log.GetLevel() == log.DebugLevel { log.WithFields(log.Fields{ "cache_size": conf.CacheSize, }).Debug("db config") } pragmas = append(pragmas, fmt.Sprintf("PRAGMA cache_size=%d;", conf.CacheSize)) } for _, p := range pragmas { if _, err = d.db.Exec(p); err != nil { return errors.Wrapf(err, "Could not set PRAGMA: %s", p) } } // Initialize Schema 0 if it doesn't exist sqlCheck := "SELECT name from sqlite_master WHERE type='table' AND name=?" var name string if err := d.db.QueryRow(sqlCheck, "KeyValues").Scan(&name); err == sql.ErrNoRows { tx, err := d.db.Begin() if err != nil { return err } if _, err := tx.Exec(SCHEMA_0); err != nil { if rollbackErr := tx.Rollback(); rollbackErr != nil { return rollbackErr } else { return err } } else { log.WithFields(log.Fields{ "path": d.Path, }).Debug("DB initialized") if err := tx.Commit(); err != nil { return err } } } return nil }
func (e *engine) runJobNotify(r *Task, client dockerclient.Client) error { name := fmt.Sprintf("drone_build_%d_notify", r.Build.ID) defer func() { client.KillContainer(name, "9") client.RemoveContainer(name, true, true) }() // encode the build payload to write to stdin // when launching the build container in, err := encodeToLegacyFormat(r) if err != nil { log.Errorf("failure to marshal work. %s", err) return err } args := DefaultNotifyArgs args = append(args, "--") args = append(args, string(in)) conf := &dockerclient.ContainerConfig{ Image: DefaultAgent, Entrypoint: DefaultEntrypoint, Cmd: args, Env: e.envs, HostConfig: dockerclient.HostConfig{ Binds: []string{"/var/run/docker.sock:/var/run/docker.sock"}, MemorySwappiness: -1, }, Volumes: map[string]struct{}{ "/var/run/docker.sock": struct{}{}, }, } log.Infof("preparing container %s", name) info, err := docker.Run(client, conf, name) if err != nil { log.Errorf("Error starting notification container %s. %s", name, err) } // for debugging purposes we print a failed notification executions // output to the logs. Otherwise we have no way to troubleshoot failed // notifications. This is temporary code until I've come up with // a better solution. if info != nil && info.State.ExitCode != 0 && log.GetLevel() >= log.InfoLevel { var buf bytes.Buffer rc, err := client.ContainerLogs(name, docker.LogOpts) if err == nil { defer rc.Close() stdcopy.StdCopy(&buf, &buf, io.LimitReader(rc, 50000)) } log.Infof("Notification container %s exited with %d", name, info.State.ExitCode) log.Infoln(buf.String()) } return err }
func (t *trafcacc) Status() { // print status s := new(runtime.MemStats) runtime.ReadMemStats(s) fields := logrus.Fields{ "NumGoroutine": runtime.NumGoroutine(), "Alloc": humanize.Bytes(s.Alloc), "HeapObjects": s.HeapObjects, } if logrus.GetLevel() >= logrus.DebugLevel { t.pool.RLock() // var us, ts, ur, tr string var su, st, ru, rt uint64 var total, alived int var latency string for _, v := range t.pool.pool { total++ if v.isAlive() { alived++ } s := atomic.LoadUint64(&v.sent) r := atomic.LoadUint64(&v.recv) if v.proto == udp { su += s ru += r // us += humanbyte(s) + "," // ur += humanbyte(r) + "," } else { st += s rt += r // ts += humanbyte(s) + "," // tr += humanbyte(r) + "," } lc := int(atomic.LoadInt64(&v.latency) / int64(time.Millisecond)) if lc > 100 { latency += strconv.Itoa(lc) + "," } } t.pool.RUnlock() fields["Sent(U)"] = humanbyte(su) // + "(" + strings.TrimRight(us, ",") + ")" fields["Recv(U)"] = humanbyte(ru) // + "(" + strings.TrimRight(ur, ",") + ")" fields["Sent(T)"] = humanbyte(st) // + "(" + strings.TrimRight(ts, ",") + ")" fields["Recv(T)"] = humanbyte(rt) // + "(" + strings.TrimRight(tr, ",") + ")" fields["POP(T)"] = humanbyte(atomic.LoadUint64(&t.pconn.pq().poptcp)) fields["POP(U)"] = humanbyte(atomic.LoadUint64(&t.pconn.pq().popudp)) fields["PQLEN"] = t.pconn.pq().len() fields["LATENCY"] = latency fields["ALIVE"] = strconv.Itoa(alived) + "/" + strconv.Itoa(total) } logrus.WithFields(fields).Infoln(t.roleString(), "status") }
// run sends req and returns the http response from the server. func (api *OssApi) run(req *request) (*http.Response, error) { u, err := req.url() if err != nil { return nil, err } hreq := &http.Request{ URL: u, Method: req.method, ProtoMajor: 1, ProtoMinor: 1, Close: true, Header: req.headers, } if v, ok := req.headers["Content-Length"]; ok { hreq.ContentLength, _ = strconv.ParseInt(v[0], 10, 64) delete(req.headers, "Content-Length") } if req.payload != nil { hreq.Body = ioutil.NopCloser(bytes.NewReader(req.payload)) } if log.GetLevel() == log.DebugLevel { dump, _ := httputil.DumpRequestOut(hreq, false) log.Debugf("request } -> %s\n", dump) } hresp, err := http.DefaultClient.Do(hreq) if err != nil { return nil, err } if log.GetLevel() == log.DebugLevel { dump, _ := httputil.DumpResponse(hresp, false) log.Debugf("response } -> %s\n", dump) } if hresp.StatusCode < 200 || hresp.StatusCode >= 300 { return nil, buildError(hresp) } return hresp, err }