// Capabilities returns the supported capabilities of this agent / docker-client pair. // Currently, the following capabilities are possible: // // com.amazonaws.ecs.capability.privileged-container // com.amazonaws.ecs.capability.docker-remote-api.1.17 // com.amazonaws.ecs.capability.docker-remote-api.1.18 // com.amazonaws.ecs.capability.docker-remote-api.1.19 // com.amazonaws.ecs.capability.docker-remote-api.1.20 // com.amazonaws.ecs.capability.logging-driver.json-file // com.amazonaws.ecs.capability.logging-driver.syslog // com.amazonaws.ecs.capability.logging-driver.fluentd // com.amazonaws.ecs.capability.logging-driver.journald // com.amazonaws.ecs.capability.logging-driver.gelf // com.amazonaws.ecs.capability.selinux // com.amazonaws.ecs.capability.apparmor // com.amazonaws.ecs.capability.ecr-auth // com.amazonaws.ecs.capability.task-iam-role // com.amazonaws.ecs.capability.task-iam-role-network-host func (engine *DockerTaskEngine) Capabilities() []string { capabilities := []string{} if !engine.cfg.PrivilegedDisabled { capabilities = append(capabilities, capabilityPrefix+"privileged-container") } versions := make(map[dockerclient.DockerVersion]bool) for _, version := range engine.client.SupportedVersions() { capabilities = append(capabilities, capabilityPrefix+"docker-remote-api."+string(version)) versions[version] = true } for _, loggingDriver := range engine.cfg.AvailableLoggingDrivers { requiredVersion := dockerclient.LoggingDriverMinimumVersion[loggingDriver] if _, ok := versions[requiredVersion]; ok { capabilities = append(capabilities, capabilityPrefix+"logging-driver."+string(loggingDriver)) } } if engine.cfg.SELinuxCapable { capabilities = append(capabilities, capabilityPrefix+"selinux") } if engine.cfg.AppArmorCapable { capabilities = append(capabilities, capabilityPrefix+"apparmor") } if _, ok := versions[dockerclient.Version_1_19]; ok { capabilities = append(capabilities, capabilityPrefix+"ecr-auth") } if engine.cfg.TaskIAMRoleEnabled { // The "task-iam-role" capability is supported for docker v1.7.x onwards // Refer https://github.com/docker/docker/blob/master/docs/reference/api/docker_remote_api.md // to lookup the table of docker versions to API versions if _, ok := versions[dockerclient.Version_1_19]; ok { capabilities = append(capabilities, capabilityPrefix+capabilityTaskIAMRole) } else { seelog.Warn("Task IAM Role not enabled due to unsuppported Docker version") } } if engine.cfg.TaskIAMRoleEnabledForNetworkHost { // The "task-iam-role-network-host" capability is supported for docker v1.7.x onwards if _, ok := versions[dockerclient.Version_1_19]; ok { capabilities = append(capabilities, capabilityPrefix+capabilityTaskIAMRoleNetHost) } else { seelog.Warn("Task IAM Role for Host Network not enabled due to unsuppported Docker version") } } return capabilities }
// Normalize all auth types into a uniform 'dockerAuths' type. // On error, any appropriate information will be logged and an empty dockerAuths will be returned func parseAuthData(authType string, authData json.RawMessage) dockerAuths { intermediateAuthData := make(dockerAuths) switch authType { case "docker": err := json.Unmarshal(authData, &intermediateAuthData) if err != nil { seelog.Warn("Could not parse 'docker' type auth config") return dockerAuths{} } case "dockercfg": var base64dAuthInfo dockercfgData err := json.Unmarshal(authData, &base64dAuthInfo) if err != nil { seelog.Warn("Could not parse 'dockercfg' type auth config") return dockerAuths{} } for registry, auth := range base64dAuthInfo { data, err := base64.StdEncoding.DecodeString(auth.Auth) if err != nil { seelog.Warnf("Malformed auth data for registry %v", registry) continue } usernamePass := strings.SplitN(string(data), ":", 2) if len(usernamePass) != 2 { seelog.Warnf("Malformed auth data for registry %v; must contain ':'", registry) continue } intermediateAuthData[registry] = docker.AuthConfiguration{ Username: usernamePass[0], Password: usernamePass[1], } } case "": // not set; no warn return dockerAuths{} default: seelog.Warnf("Unknown auth configuration: %v", authType) return dockerAuths{} } // Normalize intermediate registry keys into not having a schema output := make(dockerAuths) for key, val := range intermediateAuthData { output[stripRegistrySchema(key)] = val } return output }
func testFuncException() { fmt.Println("testFuncException") testConfig := ` <seelog type="sync" minlevel="info"> <exceptions> <exception funcpattern="*main.test*Except*" minlevel="error"/> </exceptions> <outputs> <console/> </outputs> </seelog>` logger, _ := log.LoggerFromConfigAsBytes([]byte(testConfig)) log.ReplaceLogger(logger) log.Trace("NOT Printed") log.Debug("NOT Printed") log.Info("NOT Printed") log.Warn("NOT Printed") log.Error("Printed") log.Critical("Printed") log.Current.Trace("NOT Printed") log.Current.Debug("NOT Printed") log.Current.Info("NOT Printed") log.Current.Warn("NOT Printed") log.Current.Error("Printed") log.Current.Critical("Printed") }
func (this *ProcInspector) Start() error { log.Debugf("Initializing Process Inspector %#v", this) var err error this.trans, err = transceiver.NewTransceiver(this.OrchestratorURL, this.EntityID) if err != nil { return err } this.trans.Start() for { <-time.After(this.WatchInterval) procs, err := procutil.DescendantLWPs(this.RootPID) if err != nil { // this happens frequently, but does not matter. // e.g. "open /proc/11193/task/11193/children: no such file or directory" log.Warn(err) continue } if err = this.onWatch(procs); err != nil { log.Error(err) } } // NOTREACHED }
func processFile(req uploadRequest, db *database.DB, store *storage.Store) { defer req.file.Close() epub, err := openMultipartEpub(req.file) if err != nil { log.Warn("Not valid epub uploaded file ", req.filename, ": ", err) return } defer epub.Close() book, id := parseFile(epub, store) req.file.Seek(0, 0) size, err := store.Store(id, req.file, EPUB_FILE) if err != nil { log.Error("Error storing book (", id, "): ", err) return } book["filesize"] = size err = db.AddBook(book) if err != nil { log.Error("Error storing metadata (", id, "): ", err) return } log.Info("File uploaded: ", req.filename) }
//Close all the objects at once and wait forr them to finish with a channel. func (d *Death) closeInMass(closable ...Closable) { count := len(closable) //call close async done := make(chan bool, count) for _, c := range closable { go d.closeObjects(c, done) } //wait on channel for notifications. timer := time.NewTimer(d.timeout) for { select { case <-timer.C: log.Warn(count, " object(s) remaining but timer expired.") return case <-done: count-- log.Debug(count, " object(s) left") if count == 0 { log.Debug("Finished closing objects") return } } } }
func (s *server) start(trans transport.Transport) (*tomb.Tomb, error) { s.workerTombM.Lock() if s.workerTomb != nil { s.workerTombM.Unlock() return nil, ErrAlreadyRunning } tm := new(tomb.Tomb) s.workerTomb = tm s.workerTombM.Unlock() stop := func() { trans.StopListening(s.Name()) s.workerTombM.Lock() s.workerTomb = nil s.workerTombM.Unlock() } var inbound chan tmsg.Request connect := func() error { select { case <-trans.Ready(): inbound = make(chan tmsg.Request, 500) return trans.Listen(s.Name(), inbound) case <-time.After(connectTimeout): log.Warnf("[Mercury:Server] Timed out after %s waiting for transport readiness", connectTimeout.String()) return ttrans.ErrTimeout } } // Block here purposefully (deliberately not in the goroutine below, because we want to report a connection error // to the caller) if err := connect(); err != nil { stop() return nil, err } tm.Go(func() error { defer stop() for { select { case req, ok := <-inbound: if !ok { // Received because the channel closed; try to reconnect log.Warn("[Mercury:Server] Inbound channel closed; trying to reconnect…") if err := connect(); err != nil { log.Criticalf("[Mercury:Server] Could not reconnect after channel close: %s", err) return err } } else { go s.handle(trans, req) } case <-tm.Dying(): return tomb.ErrDying } } }) return tm, nil }
/** * Returns the DH byte offset. * * @return dh offset */ func getDHOffset1(handshakeBytes []byte) int { offset := int(handshakeBytes[768]) + int(handshakeBytes[769]) + int(handshakeBytes[770]) + int(handshakeBytes[771]) offset = (offset % 632) + 8 if offset+128 >= 1536 { log.Warn("Invalid DH offset") } return offset }
/** * Returns the DH byte offset. * * @return dh offset */ func getDHOffset0(handshakeBytes []byte) int { offset := int(handshakeBytes[1532]) + int(handshakeBytes[1533]) + int(handshakeBytes[1534]) + int(handshakeBytes[1535]) offset = (offset % 632) + 772 if offset+128 >= 1536 { log.Warn("Invalid DH offset") } return offset }
// warnレベルでログメッセージを出力する。 // // param : msg 出力するメッセージ。複数指定した場合は結合して出力される。 func Warn(msg ...interface{}) { if !valid { return } locker.Lock(lockTimeout) defer locker.Unlock() seelog.Warn(msg...) }
// Warn outputs warn level message. func Warn(msg ...interface{}) { if !isValid { return } mutex.Lock() defer mutex.Unlock() seelog.Warn(msg...) }
func CreateComplexJson(success bool, msg string, rs interface{}) []byte { retMap := map[string]interface{}{"success": success, "msg": msg, "result": rs} retJson, err := json.Marshal(retMap) if err != nil { seelog.Warn(fmt.Sprintf("CreateComplexJson : %v", err)) } return retJson }
//查询操作 func CreateQueryJson(rs interface{}) []byte { retMap := map[string]interface{}{"result": rs} retJson, err := json.Marshal(retMap) if err != nil { seelog.Warn(fmt.Sprintf("CreateQueryJson : %v", err)) } return retJson }
func main() { defer log.Flush() logger, err := log.LoggerFromConfigAsFile("seelog.xml") if nil != err { log.Warn("Failed to load config", err) } log.ReplaceLogger(logger) flag.Parse() statsTransformChannel := make(chan *diskStat.DiskStat, 10) statsOutputChannel := make(chan *diskStat.ExtendedIoStats, 10) var output outputInterface.Output proto := PStdOut switch *protocolType { case "protobuffers": { proto = PProtoBuffers } case "json": { proto = PJson } default: { if *outputType == "zmq" { proto = PProtoBuffers } else if *outputType == "stdout" { proto = PStdOut } } } switch *outputType { case "zmq": output, err = zmqOutput.NewZmqOutput(queueUrl, proto) case "nano": output, err = nanoMsgOutput.NewNanoMsgOutput(queueUrl, proto) default: output = &logOutput.LogOutput{proto} } if nil != err { log.Error("Failed to setup output ", err) } go ioStatTransform.TransformStat(statsTransformChannel, statsOutputChannel) go statsOutput.Output(statsOutputChannel, output) for { readAndSendStats(statsTransformChannel) time.Sleep(time.Second * time.Duration(*interval)) } close(statsTransformChannel) close(statsOutputChannel) }
func handshake1(rw *bufio.ReadWriter) bool { b := ReadBuf(rw, HANDSHAKE_SIZE+1) if b[0] != 0x3 { log.Warn("C0 error ", b[0]) return false } if len(b[1:]) != HANDSHAKE_SIZE { log.Warn("C1 error ", len(b[1:])) return false } input := make([]byte, HANDSHAKE_SIZE) copy(input, b[1:]) ver := input[4] & 0xff if ver == 0 { return simple_handshake(rw, input) } return complex_handshake(rw, input) }
/** * Returns a digest byte offset. * * @param pBuffer source for digest data * @return digest offset */ func getDigestOffset0(pBuffer []byte) int { offset := int(pBuffer[8]&0xff) + int(pBuffer[9]&0xff) + int(pBuffer[10]&0xff) + int(pBuffer[11]&0xff) offset = (offset % 728) + 8 + 4 if offset+32 >= 1536 { log.Warn("Invalid digest offset") } log.Debug("digest offset", offset) return offset }
/** * Returns a digest byte offset. * * @param pBuffer source for digest data * @return digest offset */ func getDigestOffset1(pBuffer []byte) int { offset := int(pBuffer[772]&0xff) + int(pBuffer[773]&0xff) + int(pBuffer[774]&0xff) + int(pBuffer[775]&0xff) offset = (offset % 728) + 772 + 4 if offset+32 >= 1536 { log.Warn("Invalid digest offset") } log.Debug("digest offset", offset) return offset }
func readStartHandler(h handler) { id := mux.Vars(h.r)["id"] e, _ := openReadEpub(h) if e == nil { log.Warn("Open epub returns an empty file") notFound(h) return } defer e.Close() it, err := e.Spine() if err != nil { log.Warn("No spine in the epub") notFound(h) return } http.Redirect(h.w, h.r, "/read/"+id+"/"+it.URL(), http.StatusTemporaryRedirect) }
func (p *Process) Fire(name string, args ...string) { // {{{ select { case p.Events <- &Event{ Name: name, Args: args, }: default: log.Warn("Lost event: ", name, " args:", args) } } // }}}
func (s *DefaultSubscriber) loadFromConfig() { for { err := s.doLoad() if err == nil { break } log.Warn(err) time.Sleep(time.Second) } }
func validateClient(input []byte) (result bool, scheme int, challenge []byte, digest []byte) { if result, scheme, challenge, digest = validateClientScheme(input, 1); result { return } if result, scheme, challenge, digest = validateClientScheme(input, 0); result { return } log.Warn("Unable to validate client") return }
func (p *Process) stderrMonitor(cmd *exec.Cmd) { // {{{ stderr, _ := cmd.StderrPipe() go func() { p.waitGroup.Add(1) defer p.waitGroup.Done() scanner := bufio.NewScanner(stderr) for scanner.Scan() { select { case p.StdErr <- scanner.Text(): default: log.Warn("OPENVPN stderr: ", scanner.Text()) } } if err := scanner.Err(); err != nil { log.Warn("OPENVPN stderr: (failed to read ", err, ")") return } }() } // }}}
// newDisconnectionTimer creates a new time object, with a callback to // disconnect from ACS on inactivity func newDisconnectionTimer(client wsclient.ClientServer, _time ttime.Time, timeout time.Duration, jitter time.Duration) ttime.Timer { timer := _time.AfterFunc(utils.AddJitter(timeout, jitter), func() { seelog.Warn("ACS Connection hasn't had any activity for too long; closing connection") closeErr := client.Close() if closeErr != nil { seelog.Warnf("Error disconnecting: %v", closeErr) } }) return timer }
func (c *client) addCall(cc clientCall) { select { case <-c.execC: log.Warn("[Mercury:Client] Request added after client execution; discarding") return default: } c.Lock() defer c.Unlock() c.calls[cc.uid] = cc }
func (p *Broadcast) start() { //p.terminal = newTerminal() go func(p *Broadcast) { defer func() { if e := recover(); e != nil { log.Critical(e) } log.Info("Broadcast " + p.path + " stopped") }() log.Info("Broadcast " + p.path + " started") for { select { case amsg := <-p.producer.audiochan: for _, s := range p.consumers { err := s.SendAudio(amsg.Clone()) if err != nil { notifyError(s, err) } } case vmsg := <-p.producer.videochan: for _, s := range p.consumers { err := s.SendVideo(vmsg.Clone()) if err != nil { notifyError(s, err) } } case obj := <-p.control: if c, ok := obj.(*RtmpNetStream); ok { if c.closed { delete(p.consumers, c.conn.remoteAddr) log.Debugf("Broadcast %v consumers %v", p.path, len(p.consumers)) } else { p.consumers[c.conn.remoteAddr] = c log.Debugf("Broadcast %v consumers %v", p.path, len(p.consumers)) } } else if v, ok := obj.(string); ok && "stop" == v { for k, ss := range p.consumers { delete(p.consumers, k) ss.Close() } return } case <-time.After(time.Second * 90): log.Warn("Broadcast " + p.path + " Video | Audio Buffer Empty,Timeout 30s") p.stop() p.producer.Close() return } } }(p) }
func errHndlr(err error, severity int) { if err != nil { switch { case severity == WARN: log.Warn(err) case severity == ERROR: log.Error(err) case severity == FATAL: log.Error(err) panic(err) } } }
func (lb logBridge) Output(depth int, s string) error { switch s[:3] { case nsqlib.LogLevelDebugPrefix: log.Debug(s[4:]) case nsqlib.LogLevelInfoPrefix: log.Info(s[4:]) case nsqlib.LogLevelWarningPrefix: log.Warn(s[4:]) case nsqlib.LogLevelErrorPrefix: log.Error(s[4:]) } return nil }
func main() { defer log.Flush() logger, err := log.LoggerFromConfigAsFile("seelog.xml") rep.ReporterConfig("ipc:///temp/testSender.ipc", 0) r := rep.NewReporter() defer r.Close() if err != nil { log.Warn("Failed to load config", err) } log.ReplaceLogger(logger) start.Run() }
func checkErr(i int, err error) { if err != nil { switch i { case 1: log.Critical(err) case 2: log.Warn(err) default: log.Info(err) } } log.Flush() }
func main() { kingpin.CommandLine.Help = "Docker container EC2 metadata service." kingpin.Parse() defer log.Flush() configureLogging(*verboseOpt) auth, err := aws.GetAuth("", "", "", time.Time{}) if err != nil { panic(err) } containerService := NewContainerService(dockerClient(), *defaultRole, auth) // Proxy non-credentials requests to primary metadata service http.HandleFunc("/", logHandler(func(w http.ResponseWriter, r *http.Request) { match := credsRegex.FindStringSubmatch(r.URL.Path) if match != nil { handleCredentials(match[1], match[2], containerService, w, r) return } proxyReq, err := http.NewRequest(r.Method, fmt.Sprintf("%s%s", baseUrl, r.URL.Path), r.Body) if err != nil { log.Error("Error creating proxy http request: ", err) http.Error(w, "An unexpected error occurred communicating with Amazon", http.StatusInternalServerError) return } copyHeaders(proxyReq.Header, r.Header) resp, err := instanceServiceClient.RoundTrip(proxyReq) if err != nil { log.Error("Error forwarding request to EC2 metadata service: ", err) http.Error(w, "An unexpected error occurred communicating with Amazon", http.StatusInternalServerError) return } defer resp.Body.Close() copyHeaders(w.Header(), resp.Header) w.WriteHeader(resp.StatusCode) if _, err := io.Copy(w, resp.Body); err != nil { log.Warn("Error copying response content from EC2 metadata service: ", err) } })) log.Critical(http.ListenAndServe(*serverAddr, nil)) }