//Simple pusher for testing func PusherProto(count int, finished chan int, msg *messaging.Food, port int) { log.Info("Starting pusher") socket, err := nano.NewPushSocket() if nil != err { log.Error(err) } defer socket.Close() socket.SetSendTimeout(500 * time.Millisecond) sport := strconv.Itoa(port) _, err = socket.Connect("tcp://localhost:" + sport) if nil != err { log.Error(err) return } log.Info("Connected and ready to send data") tot := 0 for { bytes, _ := msg.Marshal() _, err := socket.Send(bytes, 0) //blocking if nil != err { log.Error(err) continue } else { tot++ } if tot >= count { break } } log.Info("Finished sending data exiting") finished <- tot }
//Simple pusher for testing func Pusher(count int, finished chan int, port int) { socket, err := nano.NewPushSocket() if nil != err { log.Error(err) } defer socket.Close() sport := strconv.Itoa(port) _, err = socket.Connect("tcp://localhost:" + sport) if nil != err { log.Error(err) return } log.Info("Connected and ready to send data") tot := 0 bytes := []byte{'h', 'e', 'l', 'l', '\xc3', '\xb8', 'x', 'f', 'c', 'x', 'f'} for { _, err := socket.Send(bytes, 0) //blocking if nil != err { log.Error(err) } else { tot++ } if tot >= count { break } } finished <- tot }
func (s *ESAPIV0) Bulk(data *bytes.Buffer) { if data == nil || data.Len() == 0 { return } data.WriteRune('\n') url := fmt.Sprintf("%s/_bulk", s.Host) client := &http.Client{} reqest, _ := http.NewRequest("POST", url, data) if s.Auth != nil { reqest.SetBasicAuth(s.Auth.User, s.Auth.Pass) } resp, errs := client.Do(reqest) if errs != nil { log.Error(errs) return } body, err := ioutil.ReadAll(resp.Body) if err != nil { log.Error(err) return } log.Trace(url, string(body)) defer resp.Body.Close() defer data.Reset() if resp.StatusCode != 200 { log.Errorf("bad bulk response: %s %s", body, resp.StatusCode) return } }
func setInitLogging(logLevel string) { logLevel = strings.ToLower(logLevel) testConfig := ` <seelog type="sync" minlevel="` testConfig = testConfig + logLevel testConfig = testConfig + `"> <outputs formatid="main"> <filter levels="error"> <file path="./log/gopa.log"/> </filter> <console formatid="main" /> </outputs> <formats> <format id="main" format="[%Date(01-02) %Time] [%LEV] [%File:%Line,%FuncShort] %Msg%n"/> </formats> </seelog>` logger, err := log.LoggerFromConfigAsString(testConfig) if err != nil { log.Error("init config error,", err) } err = log.ReplaceLogger(logger) if err != nil { log.Error("init config error,", err) } }
func (this *RESTTransceiver) routine() { errors := 0 onHTTPError := func(err error) { log.Error(err) errors += 1 time.Sleep(time.Duration(errors) * time.Second) } onActionError := func(err error) { log.Error(err) } for { action, err := getAction(this.Client, this.OrchestratorURL, this.EntityID) if err != nil { onHTTPError(err) continue } err = deleteAction(this.Client, this.OrchestratorURL, action) if err != nil { onHTTPError(err) continue } err = this.onAction(action) if err != nil { onActionError(err) continue } errors = 0 } }
func main() { f, err := os.Open("ok.txt") if err != nil { log.Info(err) return } defer f.Close() stat, err := f.Stat() if err != nil { log.Error("stat err") } data := make([]byte, stat.Size()) _, err = f.Read(data) if err != nil { log.Error("read err") } dataStr := string(data) log.Info(dataStr) dirInfo() demoList() }
func initLogger() { var err error // 初始化raven if cfg.Sentry != "" { Raven, err = raven.NewClient(cfg.Sentry, nil) if err != nil { log.Error("Init Sentry Error:", err) } } // 初始化logger if cfg.Logger != "" { // 自定义一个seelog raven receiver receiver := &RavenReciver{Client: Raven} parseParams := &log.CfgParseParams{ CustomReceiverProducers: map[string]log.CustomReceiverProducer{ "sentry": func(log.CustomReceiverInitArgs) (log.CustomReceiver, error) { return receiver, nil }, }, } if logger, err := log.LoggerFromParamConfigAsFile(cfg.Logger, parseParams); err == nil { log.ReplaceLogger(logger) } else { log.Error("Parse Logger Error: ", err) } } }
func (m *Migrator) NewFileReadWorker(pb *pb.ProgressBar, wg *sync.WaitGroup) { log.Debug("start reading file") f, err := os.Open(m.Config.DumpInputFile) if err != nil { log.Error(err) return } defer f.Close() r := bufio.NewReader(f) lineCount := 0 for { line, err := r.ReadString('\n') if io.EOF == err || nil != err { break } lineCount += 1 js := map[string]interface{}{} //log.Trace("reading file,",lineCount,",", line) err = json.Unmarshal([]byte(line), &js) if err != nil { log.Error(err) continue } m.DocChan <- js pb.Increment() } defer f.Close() log.Debug("end reading file") close(m.DocChan) wg.Done() }
func GetConfig() (currConfig config, err error) { //Open the config file. Defer to closing it when the function goes out of scope configFile, err := os.Open("config/env.json") defer configFile.Close() //If we had an error log it and return our error if err != nil { log.Error("Problem grabbing configuration", err) return } // Create a json parser for this file jsonParser := json.NewDecoder(configFile) //This is a good golang pattern to use: // grab the error from the Decode call and if the err is not null log if err = jsonParser.Decode(&currConfig); err != nil { log.Error("Problem parsing configuration", err) return } return currConfig, err }
func processFile(req uploadRequest, db *database.DB, store *storage.Store) { defer req.file.Close() epub, err := openMultipartEpub(req.file) if err != nil { log.Warn("Not valid epub uploaded file ", req.filename, ": ", err) return } defer epub.Close() book, id := parseFile(epub, store) req.file.Seek(0, 0) size, err := store.Store(id, req.file, EPUB_FILE) if err != nil { log.Error("Error storing book (", id, "): ", err) return } book["filesize"] = size err = db.AddBook(book) if err != nil { log.Error("Error storing metadata (", id, "): ", err) return } log.Info("File uploaded: ", req.filename) }
func (s *ESAPIV0) NextScroll(scrollTime string, scrollId string) (*Scroll, error) { // curl -XGET 'http://es-0.9:9200/_search/scroll?scroll=5m' id := bytes.NewBufferString(scrollId) url := fmt.Sprintf("%s/_search/scroll?scroll=%s&scroll_id=%s", s.Host, scrollTime, id) resp, body, errs := Get(url, s.Auth, s.HttpProxy) if errs != nil { log.Error(errs) return nil, errs[0] } if resp.StatusCode != 200 { return nil, errors.New(body) } defer resp.Body.Close() log.Trace("next scroll,", url, body) // decode elasticsearch scroll response scroll := &Scroll{} err := json.Unmarshal([]byte(body), &scroll) if err != nil { log.Error(body) log.Error(err) return nil, err } return scroll, nil }
func (emailer *EmailNotifier) sendConsumerGroupStatusNotify() error { var bytesToSend bytes.Buffer log.Debug("send email") msgs := make([]Message, len(emailer.Groups)) i := 0 for group, msg := range emailer.groupMsgs { msgs[i] = msg delete(emailer.groupMsgs, group) i++ } err := emailer.template.Execute(&bytesToSend, struct { From string To string Results []Message }{ From: emailer.From, To: emailer.To, Results: msgs, }) if err != nil { log.Error("Failed to assemble email:", err) return err } err = smtp.SendMail(fmt.Sprintf("%s:%v", emailer.Server, emailer.Port), emailer.auth, emailer.From, []string{emailer.To}, bytesToSend.Bytes()) if err != nil { log.Error("Failed to send email message:", err) return err } return nil }
func (fs *GDriveFileSystem) Get(p string) (webdav.StatusCode, io.ReadCloser, int64) { pFile := fs.getFile(p, false) if pFile == nil { return webdav.StatusCode(404), nil, -1 } f := pFile.file downloadUrl := f.DownloadUrl log.Debug("downloadUrl=", downloadUrl) if downloadUrl == "" { log.Error("No download url: ", f) return webdav.StatusCode(500), nil, -1 } req, err := http.NewRequest("GET", downloadUrl, nil) if err != nil { log.Error("NewRequest ", err) return webdav.StatusCode(500), nil, -1 } resp, err := fs.transport.RoundTrip(req) if err != nil { log.Error("RoundTrip ", err) return webdav.StatusCode(500), nil, -1 } return webdav.StatusCode(200), resp.Body, f.FileSize }
func dial() *net.UDPConn { err, host := config.GetStringMapString("udp", "host") if err != nil { log.Error("can't find udp host") os.Exit(0) } err, port := config.GetStringMapString("udp", "port") if err != nil { log.Error("can't find udp port") os.Exit(0) } raddr, err := net.ResolveUDPAddr("udp", host+":"+port) if err != nil { log.Error("connect udp %s:%s error\n", host, port) os.Exit(0) } conn, err := net.DialUDP("udp", nil, raddr) if err != nil { log.Error("connect udp %s:%s error\n", host, port) os.Exit(0) } // bump up the packet size for large log lines err = conn.SetWriteBuffer(writeBuffer) if err != nil { log.Error("connect udp %s:%s error\n", host, port) os.Exit(0) } return conn }
func (h *handler) handleCopy(r *http.Request) StatusCode { var err error p := url2path(r.URL) strDepth := r.Header.Get("Depth") depth := 9999999 if strDepth != "" && strDepth != "infinity" { depth, err = strconv.Atoi(strDepth) if err != nil { log.Error("Can't parse depth ", err) return StatusCode(500) } } dest, err := urlstring2path(r.Header.Get("Destination")) if err != nil { log.Error("Can't parse dest ", err) return StatusCode(500) } overwrite := true if r.Header.Get("Overwrite") == "F" { overwrite = false } log.Debug("Copy from ", p, " to ", dest, " depth=", depth, " overwrite=", overwrite) return StatusCode(h.fs.Copy(p, dest, depth, overwrite)) }
func InitTokens() (err error) { signBytes, err := ioutil.ReadFile("config/rsaKey") if err != nil { log.Error("Error reading private key from file: ", err) return } signKey, err = jwt.ParseRSAPrivateKeyFromPEM(signBytes) if err != nil { log.Error("Error parsing private key from file: ", err) return } verifyBytes, err := ioutil.ReadFile("config/pubKey") if err != nil { log.Error("Error reading public key from file: ", err) return } verifyKey, err = jwt.ParseRSAPublicKeyFromPEM(verifyBytes) if err != nil { log.Error("Error parsing public key from file: ", err) return } return }
func (s *ESAPIV5) NewScroll(indexNames string, scrollTime string, docBufferCount int, query string, slicedId, maxSlicedCount int) (scroll *Scroll, err error) { url := fmt.Sprintf("%s/%s/_search?scroll=%s&size=%d", s.Host, indexNames, scrollTime, docBufferCount) jsonBody := "" if len(query) > 0 || maxSlicedCount > 0 { queryBody := map[string]interface{}{} if len(query) > 0 { queryBody["query"] = map[string]interface{}{} queryBody["query"].(map[string]interface{})["query_string"] = map[string]interface{}{} queryBody["query"].(map[string]interface{})["query_string"].(map[string]interface{})["query"] = query } if maxSlicedCount > 1 { log.Tracef("sliced scroll, %d of %d", slicedId, maxSlicedCount) queryBody["slice"] = map[string]interface{}{} queryBody["slice"].(map[string]interface{})["id"] = slicedId queryBody["slice"].(map[string]interface{})["max"] = maxSlicedCount } jsonArray, err := json.Marshal(queryBody) if err != nil { log.Error(err) } else { jsonBody = string(jsonArray) } } resp, body, errs := Post(url, s.Auth, jsonBody, s.HttpProxy) if errs != nil { log.Error(errs) return nil, errs[0] } defer resp.Body.Close() if resp.StatusCode != 200 { return nil, errors.New(body) } log.Trace("new scroll,", body) if err != nil { log.Error(err) return nil, err } scroll = &Scroll{} err = json.Unmarshal([]byte(body), scroll) if err != nil { log.Error(err) return nil, err } return scroll, err }
func errHndlr(err error, severity int) { if err != nil { switch { case severity == ERROR: log.Error(err) case severity == FATAL: log.Error(err) panic(err) } } }
func writeSummaryStats(config StartupConfig, statsSummary traffic_ops.StatsSummary) { to, err := traffic_ops.Login(config.ToURL, config.ToUser, config.ToPasswd, true) if err != nil { newErr := fmt.Errorf("Could not store summary stats! Error logging in to %v: %v", config.ToURL, err) log.Error(newErr) return } err = to.AddSummaryStats(statsSummary) if err != nil { log.Error(err) } }
func handleCredentials(apiVersion, subpath string, c *ContainerService, w http.ResponseWriter, r *http.Request) { resp, err := instanceServiceClient.RoundTrip(NewGET(baseUrl + "/" + apiVersion + "/meta-data/iam/security-credentials/")) if err != nil { log.Error("Error requesting creds path for API version ", apiVersion, ": ", err) w.WriteHeader(http.StatusInternalServerError) return } resp.Body.Close() if resp.StatusCode != http.StatusOK { w.WriteHeader(resp.StatusCode) return } clientIP := remoteIP(r.RemoteAddr) role, err := c.RoleForIP(clientIP) if err != nil { log.Error(clientIP, " ", err) http.Error(w, "An unexpected error getting container role", http.StatusInternalServerError) return } roleName := role.Arn.RoleName() if len(subpath) == 0 { w.Write([]byte(roleName)) } else if !strings.HasPrefix(subpath, roleName) || (len(subpath) > len(roleName) && subpath[len(roleName)-1] != '/') { // An idiosyncrasy of the standard EC2 metadata service: // Subpaths of the role name are ignored. So long as the correct role name is provided, // it can be followed by a slash and anything after the slash is ignored. w.WriteHeader(http.StatusNotFound) } else { creds, err := json.Marshal(&MetadataCredentials{ Code: "Success", LastUpdated: role.LastUpdated, Type: "AWS-HMAC", AccessKeyId: role.Credentials.AccessKey, SecretAccessKey: role.Credentials.SecretKey, Token: role.Credentials.Token, Expiration: role.Credentials.Expiration, }) if err != nil { log.Error("Error marshaling credentials: ", err) w.WriteHeader(http.StatusInternalServerError) } else { w.Write(creds) } } }
func (t *ContainerService) syncContainers() { log.Info("Synchronizing state with running docker containers") apiContainers, err := t.docker.ListContainers(docker.ListContainersOptions{ All: false, // only running containers Size: false, // do not need size information Limit: 0, // all running containers Since: "", // not applicable Before: "", // not applicable }) if err != nil { log.Error("Error listing running containers: ", err) return } containerIPMap := make(map[string]*ContainerInfo) containerIdMap := make(map[string]string) for _, apiContainer := range apiContainers { container, err := t.docker.InspectContainer(apiContainer.ID) if err != nil { log.Error("Error inspecting container: ", apiContainer.ID, ": ", err) continue } shortContainerId := apiContainer.ID[:6] containerIP := container.NetworkSettings.IPAddress roleArn, roleErr := getRoleArnFromEnv(container.Config.Env, t.defaultRoleArn) if roleArn.Empty() && roleErr == nil { roleErr = fmt.Errorf("No role defined for container %s: image=%s", shortContainerId, container.Config.Image) } log.Infof("Container: id=%s image=%s role=%s", shortContainerId, container.Config.Image, roleArn) containerIPMap[containerIP] = &ContainerInfo{ ContainerId: apiContainer.ID, ShortContainerId: shortContainerId, SessionName: generateSessionName(container), LastUpdated: time.Time{}, Error: roleErr, RoleArn: roleArn, } containerIdMap[apiContainer.ID] = containerIP } t.containerIPMap = containerIPMap t.containerIdMap = containerIdMap }
func main() { kingpin.CommandLine.Help = "Docker container EC2 metadata service." kingpin.Parse() defer log.Flush() configureLogging(*verboseOpt) auth, err := aws.GetAuth("", "", "", time.Time{}) if err != nil { panic(err) } containerService := NewContainerService(dockerClient(), *defaultRole, auth) // Proxy non-credentials requests to primary metadata service http.HandleFunc("/", logHandler(func(w http.ResponseWriter, r *http.Request) { match := credsRegex.FindStringSubmatch(r.URL.Path) if match != nil { handleCredentials(match[1], match[2], containerService, w, r) return } proxyReq, err := http.NewRequest(r.Method, fmt.Sprintf("%s%s", baseUrl, r.URL.Path), r.Body) if err != nil { log.Error("Error creating proxy http request: ", err) http.Error(w, "An unexpected error occurred communicating with Amazon", http.StatusInternalServerError) return } copyHeaders(proxyReq.Header, r.Header) resp, err := instanceServiceClient.RoundTrip(proxyReq) if err != nil { log.Error("Error forwarding request to EC2 metadata service: ", err) http.Error(w, "An unexpected error occurred communicating with Amazon", http.StatusInternalServerError) return } defer resp.Body.Close() copyHeaders(w.Header(), resp.Header) w.WriteHeader(resp.StatusCode) if _, err := io.Copy(w, resp.Body); err != nil { log.Warn("Error copying response content from EC2 metadata service: ", err) } })) log.Critical(http.ListenAndServe(*serverAddr, nil)) }
func Request(method string, r string, auth *Auth, body *bytes.Buffer, proxy string) (string, error) { var client *http.Client client = &http.Client{} if len(proxy) > 0 { proxyURL, err := url.Parse(proxy) if err != nil { log.Error(err) } else { transport := &http.Transport{Proxy: http.ProxyURL(proxyURL)} client = &http.Client{Transport: transport} } } var reqest *http.Request if body != nil { reqest, _ = http.NewRequest(method, r, body) } else { reqest, _ = newDeleteRequest(client, method, r) } if auth != nil { reqest.SetBasicAuth(auth.User, auth.Pass) } resp, errs := client.Do(reqest) if errs != nil { log.Error(errs) return "", errs } if resp.StatusCode != 200 { b, _ := ioutil.ReadAll(resp.Body) return "", errors.New("server error: " + string(b)) } respBody, err := ioutil.ReadAll(resp.Body) if err != nil { log.Error(err) return string(respBody), err } log.Trace(r, string(respBody)) if err != nil { return string(respBody), err } defer resp.Body.Close() return string(respBody), nil }
func Apps(w http.ResponseWriter, r *http.Request) { app, err := util.ReadBodyApp(r.Body) if err != nil { log.Error(err) } if app.Image == "" { log.Error("app name can't be empty") } if app.Version == "" { app.Version = "latest" } store.CreateApp(app) w.Write([]byte("Gorilla!\n")) }
func (this *HookSwitchInspector) Start() error { log.Debugf("Initializing Ethernet Inspector %#v", this) var err error if this.EnableTCPWatcher { this.tcpWatcher = tcpwatcher.New() } this.trans, err = transceiver.NewTransceiver(this.OrchestratorURL, this.EntityID) if err != nil { return err } this.trans.Start() zmqSocket, err := zmq.NewSocket(zmq.Pair) if err != nil { return err } zmqSocket.Bind(this.HookSwitchZMQAddr) defer zmqSocket.Close() this.zmqChannels = zmqSocket.Channels() for { select { case msgBytes := <-this.zmqChannels.In(): meta, ethBytes, err := this.decodeZMQMessageBytes(msgBytes) if err != nil { log.Error(err) continue } eth, ip, tcp := parseEthernetBytes(ethBytes) // note: tcpwatcher is not thread-safe if this.EnableTCPWatcher && this.tcpWatcher.IsTCPRetrans(ip, tcp) { meta.Op = hookswitch.Drop err = this.sendZMQMessage(*meta, nil) if err != nil { log.Error(err) } continue } go func() { if err := this.onHookSwitchMessage(*meta, eth, ip, tcp); err != nil { log.Error(err) } }() case err := <-this.zmqChannels.Errors(): return err } } // NOTREACHED }
// Start 启动监控 func (sm *TaskSessionMgnt) Start() error { conChan, listener, err := StartListen(sm.g.cfg) if err != nil { log.Error("Couldn't listen for peers connection: ", err) return err } defer listener.Close() for { select { case task := <-sm.createSessChan: if ts, err := NewTaskSession(sm.g, task, sm.stopSessChan); err != nil { log.Error("Could not create p2p task session.", err) } else { log.Infof("[%s] Created p2p task session", task.TaskID) sm.sessions[ts.taskID] = ts go func(s *TaskSession) { s.Init() }(ts) } case task := <-sm.startSessChan: if ts, ok := sm.sessions[task.TaskID]; ok { ts.Start(task) } else { log.Errorf("[%s] Not find p2p task session", task.TaskID) } case taskID := <-sm.stopSessChan: log.Infof("[%s] Stop p2p task session", taskID) if ts, ok := sm.sessions[taskID]; ok { delete(sm.sessions, taskID) ts.Quit() } case <-sm.quitChan: for _, ts := range sm.sessions { go ts.Quit() } log.Info("Closed all sessions") return nil case c := <-conChan: log.Infof("[%s] New p2p connection, peer addr %s", c.taskID, c.remoteAddr.String()) if ts, ok := sm.sessions[c.taskID]; ok { ts.AcceptNewPeer(c) } else { log.Errorf("[%s] Not find p2p task session", c.taskID) c.conn.Close() // TODO让客户端重连 } } } }
func (s *ESAPIV0) NewScroll(indexNames string, scrollTime string, docBufferCount int, query string, slicedId, maxSlicedCount int) (scroll *Scroll, err error) { // curl -XGET 'http://es-0.9:9200/_search?search_type=scan&scroll=10m&size=50' url := fmt.Sprintf("%s/%s/_search?search_type=scan&scroll=%s&size=%d", s.Host, indexNames, scrollTime, docBufferCount) jsonBody := "" if len(query) > 0 { queryBody := map[string]interface{}{} queryBody["query"] = map[string]interface{}{} queryBody["query"].(map[string]interface{})["query_string"] = map[string]interface{}{} queryBody["query"].(map[string]interface{})["query_string"].(map[string]interface{})["query"] = query jsonArray, err := json.Marshal(queryBody) if err != nil { log.Error(err) } else { jsonBody = string(jsonArray) } } resp, body, errs := Post(url, s.Auth, jsonBody, s.HttpProxy) if err != nil { log.Error(errs) return nil, errs[0] } defer resp.Body.Close() log.Trace("new scroll,", url, body) if err != nil { log.Error(err) return nil, err } if resp.StatusCode != 200 { return nil, errors.New(body) } scroll = &Scroll{} err = json.Unmarshal([]byte(body), scroll) if err != nil { log.Error(err) return nil, err } return scroll, err }
func uploadPostHandler(h handler) { problem := false h.r.ParseMultipartForm(20000000) filesForm := h.r.MultipartForm.File["epub"] for _, f := range filesForm { file, err := f.Open() if err != nil { log.Error("Can not open uploaded file ", f.Filename, ": ", err) h.sess.Notify("Upload problem!", "There was a problem with book "+f.Filename, "error") problem = true continue } uploadChannel <- uploadRequest{file, f.Filename} } if !problem { if len(filesForm) > 0 { h.sess.Notify("Upload successful!", "Thank you for your contribution", "success") } else { h.sess.Notify("Upload problem!", "No books where uploaded.", "error") } } uploadHandler(h) }
func (s *ESAPIV0) UpdateIndexSettings(name string, settings map[string]interface{}) error { log.Debug("update index: ", name, settings) cleanSettings(settings) url := fmt.Sprintf("%s/%s/_settings", s.Host, name) if _, ok := settings["settings"].(map[string]interface{})["index"]; ok { if set, ok := settings["settings"].(map[string]interface{})["index"].(map[string]interface{})["analysis"]; ok { log.Debug("update static index settings: ", name) staticIndexSettings := getEmptyIndexSettings() staticIndexSettings["settings"].(map[string]interface{})["index"].(map[string]interface{})["analysis"] = set Post(fmt.Sprintf("%s/%s/_close", s.Host, name), s.Auth, "", s.HttpProxy) body := bytes.Buffer{} enc := json.NewEncoder(&body) enc.Encode(staticIndexSettings) bodyStr, err := Request("PUT", url, s.Auth, &body, s.HttpProxy) if err != nil { log.Error(bodyStr, err) panic(err) return err } delete(settings["settings"].(map[string]interface{})["index"].(map[string]interface{}), "analysis") Post(fmt.Sprintf("%s/%s/_open", s.Host, name), s.Auth, "", s.HttpProxy) } } log.Debug("update dynamic index settings: ", name) body := bytes.Buffer{} enc := json.NewEncoder(&body) enc.Encode(settings) _, err := Request("PUT", url, s.Auth, &body, s.HttpProxy) return err }
func (r *Result) log() { json, err := jsonutil.Encode(r) if err != nil { log.Error(err) } if r.Failed { log.Error(json) } else if r.Changed { log.Info(json) } else if !r.Skipped { log.Debug(json) } else { log.Trace(json) } }