// SinkFactory creates a new object with sinks.Sink interface func SinkFactory() sinks.Sink { sinkType := viper.GetString("sink-type") filename := viper.GetString("filesystem-filename") maxAge := viper.GetInt("filesystem-max-age") maxBackups := viper.GetInt("filesystem-max-backups") maxSize := viper.GetInt("filesystem-max-size") if sinkType == "filesystem" { return sinks.NewFilesystemSink(filename, maxAge, maxBackups, maxSize) } output := viper.GetString("console-output") var stdOutput *os.File if sinkType == "console" { if output == "stdout" { stdOutput = os.Stdout } else if output == "stderr" { stdOutput = os.Stderr } else { log.Warningf("Unknown console output type '%s'. Falling back to 'stdout'", output) } return sinks.NewConsoleSink(stdOutput) } log.Warningf("Unknown sink type '%s'. Falling back to 'filesystem'", sinkType) return sinks.NewFilesystemSink(filename, maxAge, maxBackups, maxSize) }
func sendToHTTP(pmbConn *Connection, done chan error, id string) { logrus.Debugf("Sending to URI %s.", pmbConn.uri) done <- nil for { message := <-pmbConn.Out bodies, err := prepareMessage(message, pmbConn.Keys, id) if err != nil { logrus.Warningf("Error preparing message: %s", err) continue } for _, body := range bodies { logrus.Debugf("Sending raw message: %s", string(body)) _, err := http.Post(pmbConn.uri, "application/json", bytes.NewReader(body)) if err != nil { logrus.Warningf("Error sending: %s", err) } } if message.Done != nil { logrus.Debugf("Done channel present, sending message") message.Done <- nil } } }
// Watches for new vms, or vms destroyed. func (self *Container) Watch(eventsChannel *EventChannel) error { self.stop = make(chan struct{}) go func() { for { select { case event := <-eventsChannel.channel: switch { case event.EventAction == alerts.LAUNCHED: err := self.create() if err != nil { log.Warningf("Failed to process watch event: %v", err) } case event.EventAction == alerts.DESTROYED: err := self.destroy() if err != nil { log.Warningf("Failed to process watch event: %v", err) } } case <-self.stop: log.Info("container watcher exiting") return } } }() return nil }
func listenToHTTP(pmbConn *Connection, done chan error, id string) { listenURI := fmt.Sprintf("%s/%s", pmbConn.uri, id) logrus.Debugf("Listening on URI %s.", listenURI) done <- nil for { res, err := http.Get(listenURI) if err != nil { logrus.Warningf("Error receiving: %s", err) time.Sleep(1 * time.Second) continue } body, err := ioutil.ReadAll(res.Body) res.Body.Close() if err != nil { logrus.Warningf("Error reading body: %s", err) time.Sleep(1 * time.Second) continue } if res.StatusCode == 200 { parseMessage(body, pmbConn.Keys, pmbConn.In, id) } else if res.StatusCode == 408 { continue } else { logrus.Warningf("Bad request: %s", res.Status) time.Sleep(1 * time.Second) } } }
func (s *server) bundleContainerLogs(res http.ResponseWriter, req *http.Request, f format) { defer trace.End(trace.Begin("")) readers := defaultReaders c, err := s.getSessionFromRequest(req) if err != nil { log.Errorf("Failed to get vSphere session while bundling container logs due to error: %s", err.Error()) http.Error(res, genericErrorMessage, http.StatusInternalServerError) return } // Note: we don't want to Logout() until tarEntries() completes below defer c.Client.Logout(context.Background()) logs, err := findDatastoreLogs(c) if err != nil { log.Warningf("error searching datastore: %s", err) } else { for key, rdr := range logs { readers[key] = rdr } } logs, err = findDiagnosticLogs(c) if err != nil { log.Warningf("error collecting diagnostic logs: %s", err) } else { for key, rdr := range logs { readers[key] = rdr } } s.bundleLogs(res, req, readers, f) }
func (server *Server) SendMessage(ctx context.Context, in *pb.Message) (*pb.SendMessageResponse, error) { log.Infof("server.go: SendMessage with template='%s' and language='%s' to #%d target(s)", in.Template, in.Language, len(in.Targets)) temp := server.GetTemplateGroup(in.Template) results := make([]*pb.MessageTargetResponse, 0) if temp == nil { return pb.NewCustomMessageResponse(pb.ResultTemplateGroupNotFound, fmt.Sprintf("template=%s not found", in.Template), results), nil } for _, t := range in.Targets { ty := getTargetType(t) if len(ty) > 0 { driver := server.Drivers[ty] if driver != nil { err := driver.Send(in.Language, server.Config.DefaultLanguage, t, temp) if err != nil { results = append(results, &pb.MessageTargetResponse{Type: pb.ResultInternalDriverError, Data: fmt.Sprint(err)}) } else { results = append(results, pb.NewMessageTargetResponse(pb.ResultSuccess, driver.Type(), driver.Name())) } } else { log.Warningf("server.go: %s driver not found", ty) results = append(results, pb.NewMessageTargetResponse(pb.ResultDriverNotFound, ty, "")) } } else { log.Warningf("server.go: there is no suitable driver") results = append(results, pb.NewMessageTargetResponse(pb.ResultDriverNotFound, "", "")) } } return pb.NewMessageResponse(pb.ResultSuccess, results), nil }
func (d *Dialer) dialFactory(network, address string) (net.Conn, error) { tcpAddr, err := d.aPool.GetRoundRobin() if err != nil { logrus.Warningf("aPool.GetRoundRobin error", err) return d.Dialer.Dial(network, address) } c, err := net.DialTCP(network, nil, tcpAddr) if err != nil { logrus.Warningf("Failed to setup DialTCP: %s, fallback dialer.Dial", err) return d.Dialer.Dial(network, address) } if d.KeepAlive != 0 { if err = c.SetKeepAlive(true); err != nil { return nil, err } if err = c.SetKeepAlivePeriod(d.KeepAlive); err != nil { return nil, err } if err = c.SetLinger(0); err != nil { return nil, err } if err = c.SetNoDelay(true); err != nil { return nil, err } } return c, err }
func (s *server) bundleContainerLogs(res http.ResponseWriter, req *http.Request, f format) { defer trace.End(trace.Begin("")) readers := defaultReaders if config.Service != "" { c, err := client() if err != nil { log.Errorf("failed to connect: %s", err) } else { // Note: we don't want to Logout() until tarEntries() completes below defer c.Client.Logout(context.Background()) logs, err := findDatastoreLogs(c) if err != nil { log.Warningf("error searching datastore: %s", err) } else { for key, rdr := range logs { readers[key] = rdr } } logs, err = findDiagnosticLogs(c) if err != nil { log.Warningf("error collecting diagnostic logs: %s", err) } else { for key, rdr := range logs { readers[key] = rdr } } } } s.bundleLogs(res, req, readers, f) }
func (c *fileArchiver) processPaths() { for _, path := range c.Paths { matches, err := filepath.Glob(path) if err != nil { logrus.Warningf("%s: %v", path, err) continue } found := 0 for _, match := range matches { err := filepath.Walk(match, func(path string, info os.FileInfo, err error) error { if c.process(path) { found++ } return nil }) if err != nil { logrus.Warningln("Walking", match, err) } } if found == 0 { logrus.Warningf("%s: no matching files", path) } else { logrus.Infof("%s: found %d matching files", path, found) } } }
func (p *Probe) report() report.Report { reports := make(chan report.Report, len(p.reporters)) for _, rep := range p.reporters { go func(rep Reporter) { t := time.Now() timer := time.AfterFunc(p.spyInterval, func() { log.Warningf("%v reporter took longer than %v", rep.Name(), p.spyInterval) }) newReport, err := rep.Report() if !timer.Stop() { log.Warningf("%v reporter took %v (longer than %v)", rep.Name(), time.Now().Sub(t), p.spyInterval) } metrics.MeasureSince([]string{rep.Name(), "reporter"}, t) if err != nil { log.Errorf("error generating report: %v", err) newReport = report.MakeReport() // empty is OK to merge } reports <- newReport }(rep) } result := report.MakeReport() for i := 0; i < cap(reports); i++ { result = result.Merge(<-reports) } return result }
func (lay *Layer) loopServerConn(prot *protocol.Protocol) bool { // Check if we need to quit: select { case <-lay.childCtx.Done(): return false default: break } req := wire.Request{} if err := prot.Recv(&req); err != nil { if err != io.EOF { log.Warningf("Server side recv: %v", err) } return false } log.Debugf("Got request: %v", req) fn, ok := lay.handlers[req.ReqType] if !ok { log.Warningf("Received packet without registerd handler (%d)", req.ReqType) log.Warningf("Package will be dropped.") return true } resp, err := fn(&req) if err != nil { resp = &wire.Response{ Error: err.Error(), } } if resp == nil { // '0' is the ID for broadcast. Empty response are valid there. if req.ID != 0 { log.Warningf("Handle for `%d` failed to return a response or error", req.ReqType) } return true } // Auto-fill the type and ID fields from the response: resp.ReqType = req.ReqType resp.ID = req.ID resp.Nonce = req.Nonce log.Debugf("Sending back %v", resp) if err := prot.Send(resp); err != nil { log.Warningf("Unable to send back response: %v", err) return false } return true }
func loadConfig() *config.Config { // We do not use guessRepoFolder() here. It might abort folder := repo.GuessFolder() cfg, err := repoconfig.LoadConfig(filepath.Join(folder, ".brig", "config")) if err != nil { log.Warningf("Could not load config: %v", err) log.Warningf("Falling back on config defaults...") return repoconfig.CreateDefaultConfig() } return cfg }
func (s *srv) newTLSConfig() (*tls.Config, error) { config, err := s.listener.TLSConfig() if err != nil { return nil, err } if config.NextProtos == nil { config.NextProtos = []string{"http/1.1"} } pairs := map[string]tls.Certificate{} for _, host := range s.mux.hosts { c := host.Settings.KeyPair if c == nil { continue } keyPair, err := tls.X509KeyPair(c.Cert, c.Key) if err != nil { return nil, err } if host.Settings.OCSP.Enabled { log.Infof("%v OCSP is enabled for %v, resolvers: %v", s, host, host.Settings.OCSP.Responders) r, err := s.mux.stapler.StapleHost(&host) if err != nil { log.Warningf("%v failed to staple %v, error %v", s, host, err) } else if r.Response.Status == ocsp.Good || r.Response.Status == ocsp.Revoked { keyPair.OCSPStaple = r.Staple } else { log.Warningf("%s got undefined status from OCSP responder: %v", s, r.Response.Status) } } pairs[host.Name] = keyPair } config.Certificates = make([]tls.Certificate, 0, len(pairs)) if s.defaultHost != "" { keyPair, exists := pairs[s.defaultHost] if !exists { return nil, fmt.Errorf("default host '%s' certificate is not passed", s.defaultHost) } config.Certificates = append(config.Certificates, keyPair) } for h, keyPair := range pairs { if h != s.defaultHost { config.Certificates = append(config.Certificates, keyPair) } } config.BuildNameToCertificate() return config, nil }
func (rcv *TCP) handlePickle(conn net.Conn) { atomic.AddInt32(&rcv.active, 1) defer atomic.AddInt32(&rcv.active, -1) defer conn.Close() reader := bufio.NewReader(conn) var msgLen uint32 var err error for { conn.SetReadDeadline(time.Now().Add(2 * time.Minute)) // Read prepended length err = binary.Read(reader, binary.BigEndian, &msgLen) if err != nil { if err == io.EOF { return } atomic.AddUint32(&rcv.errors, 1) logrus.Warningf("[pickle] Can't read message length: %s", err.Error()) return } // Allocate a byte array of the expected length data := make([]byte, msgLen) // Read remainder of pickle packet into byte array if err = binary.Read(reader, binary.BigEndian, data); err != nil { atomic.AddUint32(&rcv.errors, 1) logrus.Warningf("[pickle] Can't read message body: %s", err.Error()) return } msgs, err := points.ParsePickle(data) if err != nil { atomic.AddUint32(&rcv.errors, 1) logrus.Infof("[pickle] Can't unpickle message: %s", err.Error()) logrus.Debugf("[pickle] Bad message: %#v", string(data)) return } for _, msg := range msgs { atomic.AddUint32(&rcv.metricsReceived, uint32(len(msg.Data))) rcv.out <- msg } } }
func (listener *CarbonlinkListener) handleConnection(conn net.Conn) { defer conn.Close() reader := bufio.NewReader(conn) for { conn.SetReadDeadline(time.Now().Add(listener.readTimeout)) reqData, err := ReadCarbonlinkRequest(reader) if err != nil { logrus.Debugf("[carbonlink] read carbonlink request from %s: %s", conn.RemoteAddr().String(), err.Error()) break } req, err := ParseCarbonlinkRequest(reqData) if err != nil { logrus.Warningf("[carbonlink] parse carbonlink request from %s: %s", conn.RemoteAddr().String(), err.Error()) break } if req != nil { if req.Type != "cache-query" { logrus.Warningf("[carbonlink] unknown query type: %#v", req.Type) break } if req.Type == "cache-query" { cacheReq := NewQuery(req.Metric) listener.queryChan <- cacheReq var reply *Reply select { case reply = <-cacheReq.ReplyChan: case <-time.After(listener.queryTimeout): logrus.Infof("[carbonlink] Cache no reply (%s timeout)", listener.queryTimeout) reply = NewReply() } packed := listener.packReply(reply) if packed == nil { break } if _, err := conn.Write(packed); err != nil { logrus.Infof("[carbonlink] reply error: %s", err) break } // pp.Println(reply) } } } }
// NewConversation returns a conversation that exchanges data over `conn`. func NewConversation(conn net.Conn, node *ipfsutil.Node, peer id.Peer) (*Conversation, error) { proto, err := wrapConnAsProto(conn, node, peer.Hash()) if err != nil { return nil, err } cnv := &Conversation{ conn: conn, node: node, peer: peer, proto: proto, notifees: make(map[int64]transfer.AsyncFunc), } // Cater responses: go func() { for { resp := wire.Response{} err := cnv.proto.Recv(&resp) if isEOFError(err) { break } if err != nil { log.Warningf("Error while receiving data: %v", err) continue } respID := resp.ID cnv.Lock() fn, ok := cnv.notifees[respID] if !ok { log.Warningf("No such id: %v", respID) cnv.Unlock() continue } // Remove the callback delete(cnv.notifees, respID) cnv.Unlock() fn(&resp) } }() return cnv, nil }
// Connect will start listening on incoming connections and remember // dialer so that calls to Dial() can succeed. func (lay *Layer) Connect(l net.Listener, d transfer.Dialer) error { lay.mu.Lock() defer lay.mu.Unlock() lay.dialer = d lay.listener = l lay.childCtx, lay.cancel = context.WithCancel(lay.parentCtx) // Listen for incoming connections as long the listener is open: go func() { for { conn, err := l.Accept() if err != nil { // *sigh* Again, not my fault. if err != transfer.ErrListenerWasClosed { if err.Error() != "context canceled" { log.Warningf("Listener: %T '%v'", err, err) } } break } // We currently rely on an ipfs connection here, // so testing it without ipfs is not directly possible. streamConn, ok := conn.(*ipfsutil.StreamConn) if !ok { log.Warningf("Denying non-stream conn connection, sorry.") return } // Attempt to establish a full authenticated connection: hash := streamConn.PeerHash() proto, err := wrapConnAsProto(conn, lay.node, hash) if err != nil { log.Warningf( "Could not establish incoming connection to %s: %v", hash, err, ) return } // Handle protocol in server mode: go lay.handleServerConn(proto) } }() return nil }
func needAtLeast(min int) checkFunc { return func(ctx *cli.Context) int { if ctx.NArg() < min { if min == 1 { log.Warningf("Need at least %d argument.", min) } else { log.Warningf("Need at least %d arguments.", min) } cli.ShowCommandHelp(ctx, ctx.Command.Name) return BadArgs } return Success } }
// Listen bind port. Receive messages and send to out channel func (listener *CarbonlinkListener) Listen(addr *net.TCPAddr) error { var err error listener.tcpListener, err = net.ListenTCP("tcp", addr) if err != nil { return err } go func() { select { case <-listener.exit: listener.tcpListener.Close() } }() go func() { defer listener.tcpListener.Close() for { conn, err := listener.tcpListener.Accept() if err != nil { if strings.Contains(err.Error(), "use of closed network connection") { break } logrus.Warningf("[carbonlink] Failed to accept connection: %s", err) continue } go listener.handleConnection(conn) } }() return nil }
// Subscribe watches etcd changes and generates structured events telling vulcand to add or delete frontends, hosts etc. // It is a blocking function. func (n *ng) Subscribe(changes chan interface{}, cancelC chan bool) error { // This index helps us to get changes in sequence, as they were performed by clients. waitIndex := uint64(0) for { response, err := n.client.Watch(n.etcdKey, waitIndex, true, nil, cancelC) if err != nil { switch err { case etcd.ErrWatchStoppedByUser: log.Infof("Stop watching: graceful shutdown") return nil default: log.Errorf("unexpected error: %s, stop watching", err) return err } } waitIndex = response.EtcdIndex + 1 log.Infof("%s", responseToString(response)) change, err := n.parseChange(response) if err != nil { log.Warningf("Ignore '%s', error: %s", responseToString(response), err) continue } if change != nil { log.Infof("%v", change) select { case changes <- change: case <-cancelC: return nil } } } }
// addLeftovers takes the paths from bob that alice doesn't posess. func (st *Store) addLeftovers(bob *Store, bobMap pathToHistory) error { owner, err := st.Owner() if err != nil { return err } for path := range bobMap { // TODO: what to do with bob's history? Ignore for now? node, err := bob.fs.LookupNode(path) if err != nil { return err } if node.GetType() != NodeTypeFile { continue } file, ok := node.(*File) if !ok { log.Warningf("Syncing messed up file types; not a file: %v", file) continue } _, err = stageFile(st.fs, path, file.Hash(), file.Key(), file.Size(), owner.ID()) if err != nil { return err } } return nil }
// Deregister will deregister Skydns2Adapter's interface from RegistryAdapter func (r *Skydns2Adapter) Deregister(service *bridge.Service) error { _, err := r.client.Delete(r.servicePath(service), false) if err != nil { log.Warningf("skydns2: failed to deregister service: %s", err) } return err }
func (rcv *TCP) handleConnection(conn net.Conn) { atomic.AddInt32(&rcv.active, 1) defer atomic.AddInt32(&rcv.active, -1) defer conn.Close() reader := bufio.NewReader(conn) for { conn.SetReadDeadline(time.Now().Add(2 * time.Minute)) line, err := reader.ReadBytes('\n') if err != nil { if err == io.EOF { if len(line) > 0 { logrus.Warningf("[tcp] Unfinished line: %#v", line) } } else { atomic.AddUint32(&rcv.errors, 1) logrus.Error(err) } break } if len(line) > 0 { // skip empty lines if msg, err := points.ParseText(string(line)); err != nil { atomic.AddUint32(&rcv.errors, 1) logrus.Info(err) } else { atomic.AddUint32(&rcv.metricsReceived, 1) rcv.out <- msg } } } }
// Deregister will deregister Skydns2Adapter's interface from RegistryAdapter func (r *Skydns2Adapter) Deregister(service *bridge.Service) error { _, err := r.client.Delete(context.Background(), r.servicePath(service), nil) if err != nil { log.Warningf("skydns2: failed to deregister service: %s", err) } return err }
func listenToAMQP(pmbConn *Connection, done chan error, id string) { logrus.Debugf("calling setupListen") msgs, err := setupListen(pmbConn.uri, pmbConn.prefix, id) if err != nil { done <- err return } done <- nil for { delivery, ok := <-msgs if !ok { logrus.Warningf("Listen connection fail, reconnecting...") // attempt to reconnect forever msgs, err = setupListenForever(pmbConn.uri, pmbConn.prefix, id) if err != nil { logrus.Errorf("Unable to reconnect, exiting... %s", err) return } else { logrus.Infof("Reconnected.") continue } } logrus.Debugf("Raw message received: %s", string(delivery.Body)) parseMessage(delivery.Body, pmbConn.Keys, pmbConn.In, id) } }
func checkSchema() { var count int defer writeConstants() db.DB.Table("constants").Where("schema_version = ?", schemaVersion.String()).Count(&count) if count == 1 { return } currStr := getCurrConstants().SchemaVersion if currStr == "" { db.DB.Save(&Constant{ schemaVersion.String(), }) //Initial database migration whitelist_id_string() //Write current schema version return } if v, _ := semver.Parse(currStr); v.Major < schemaVersion.Major { logrus.Warningf("Incompatible schema change detected (%s), attempting to migrate to (%s).", currStr, schemaVersion.String()) for i := v.Major + 1; i <= schemaVersion.Major; i++ { logrus.Debugf("Calling migration routine for %d.0.0", i) f := migrationRoutines[i] f() } } }
// Rescan the plugins directory, load new plugins, and remove missing plugins func (r *Registry) scan() error { sockets, err := r.sockets(r.rootPath) if err != nil { return err } r.lock.Lock() plugins := map[string]*Plugin{} // add (or keep) plugins which were found for _, path := range sockets { if plugin, ok := r.pluginsBySocket[path]; ok { plugins[path] = plugin continue } tr, err := transport(path, pluginTimeout) if err != nil { log.Warningf("plugins: error loading plugin %s: %v", path, err) continue } client := &http.Client{Transport: tr, Timeout: pluginTimeout} plugins[path] = NewPlugin(r.context, path, client, r.apiVersion, r.handshakeMetadata) log.Infof("plugins: added plugin %s", path) } // remove plugins which weren't found for path, plugin := range r.pluginsBySocket { if _, ok := plugins[path]; !ok { plugin.Close() log.Infof("plugins: removed plugin %s", plugin.socket) } } r.pluginsBySocket = plugins r.lock.Unlock() return nil }
// sockets recursively finds all unix sockets under the path provided func (r *Registry) sockets(path string) ([]string, error) { var ( result []string statT syscall.Stat_t ) // TODO: use of fs.Stat (which is syscall.Stat) here makes this linux specific. if err := fs.Stat(path, &statT); err != nil { return nil, err } switch statT.Mode & syscall.S_IFMT { case syscall.S_IFDIR: files, err := fs.ReadDir(path) if err != nil { return nil, err } for _, file := range files { fpath := filepath.Join(path, file.Name()) s, err := r.sockets(fpath) if err != nil { log.Warningf("plugins: error loading path %s: %v", fpath, err) } result = append(result, s...) } case syscall.S_IFSOCK: result = append(result, path) } return result, nil }
func ToggleMaintenanceMode(c *cli.Context) { cfg, err := NewAppConfig(c) if err != nil { log.Errorf("Failed to get client: %v", err) return } action := c.Args().First() switch action { case "enable": if err = cfg.client.Agent().EnableNodeMaintenance(c.String("reason")); err != nil { log.Errorf("Could not set maintenance mode: %v", err) return } case "disable": if err = cfg.client.Agent().DisableNodeMaintenance(); err != nil { log.Errorf("Could not unset maintenance mode: %v", err) return } default: log.Warningf("Must choose either enable or disable") cli.ShowAppHelp(c) return } log.Println("Success") }
// send is the synchronous variant of SendAsync func (acl *APIClient) send(req *wire.Request) (resp *wire.Response, err error) { // `0` is reserved for broadcast counters, // increment first therefore. acl.idcnt++ req.ID = acl.idcnt done := make(chan util.Empty) err = acl.cnv.SendAsync(req, func(respIn *wire.Response) { resp = respIn done <- util.Empty{} }) // TODO: Make that configurable? timer := time.NewTimer(10 * time.Second) // Wait until we got a response from SendAsync or until // we time out. select { case <-done: break case stamp := <-timer.C: log.Warningf("APIClient operation timed out at %v", stamp) return nil, util.ErrTimeout } return }