func logToFile(logger *log.Logger, filename string) { f, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) if err != nil { logger.Fatalf("error opening file: %v", err) } logger.SetOutput(f) }
// for every byte sent to us, sends back 2 bytes: original sent and our code func echoServer(l net.Listener, myCode byte, lg *log.Logger) { for { conn, err := l.Accept() if err != nil { lg.Fatalf("Error accepting for code %d : %s", myCode, err) } go func() { for { req := make([]byte, 1) _, err := conn.Read(req) if err != nil { lg.Printf("Error reading bytes from conn for code %d : %s", myCode, err) return } resp := make([]byte, 2) resp[0] = req[0] resp[1] = myCode _, err = conn.Write(resp) if err != nil { lg.Printf("Error writing bytes to conn for code %d : %s", myCode, err) return } } }() } }
// Dials a Bitcoin server over tcp, times out in .5 seconds func setupConn(addr string, logger *log.Logger) net.Conn { logger.Println("Connecting to:", addr) conn, err := net.DialTimeout("tcp", addr, time.Millisecond*500) if err != nil { logger.Fatalf(fmt.Sprintf("%v, could not connect to %s", err, addr)) } return conn }
//serve static content from a given subdir of this modena project func modenaStaticContent(urlPath string, subdir string, logger *log.Logger) { //setup static content truePath, err := modena.ModenaPathFromEnv(subdir, logger) if err != nil { logger.Fatalf("Cannot get path to %s: %s", subdir, err) } //strip the path from requests so that /urlPath/fart = modena/subdir/fart http.Handle(urlPath, http.StripPrefix(urlPath, http.FileServer(http.Dir(truePath)))) }
func initBlog(settings *settings, session *service.Session, sessions *service.SessionPool, logger *log.Logger, renderer *mtemplate.Renderer) error { G := func(in string) string { return in } nodeType := service.NodeType{ Id: "core.Blog", AddableTo: []string{"."}, Name: i18n.GenLanguageMap(G("Blog"), availableLocales), Fields: []*service.FieldConfig{ {Id: "core.Title"}, }, } if err := session.Monsti().RegisterNodeType(&nodeType); err != nil { return fmt.Errorf("Could not register blog node type: %v", err) } nodeType = service.NodeType{ Id: "core.BlogPost", AddableTo: []string{"core.Blog"}, Name: i18n.GenLanguageMap(G("Blog Post"), availableLocales), Fields: []*service.FieldConfig{ {Id: "core.Title"}, {Id: "core.Body"}, }, Hide: true, PathPrefix: "$year/$month", } if err := session.Monsti().RegisterNodeType(&nodeType); err != nil { return fmt.Errorf("Could not register blog post node type: %v", err) } // Add a signal handler handler := service.NewNodeContextHandler(sessions, func(req uint, session *service.Session, nodeType string, embedNode *service.EmbedNode) ( map[string][]byte, *service.CacheMods, error) { switch nodeType { case "core.Blog": ctx, mods, err := getBlogContext(req, embedNode, session, settings, renderer) if err != nil { return nil, nil, fmt.Errorf("Could not get blog context: %v", err) } return ctx, mods, nil default: return nil, nil, nil } }) if err := session.Monsti().AddSignalHandler(handler); err != nil { logger.Fatalf("Could not add signal handler: %v", err) } return nil }
func loadConfigFile(confFile string, logger *log.Logger) { logger.Printf("Loading config from file '%s'\n", confFile) file, err := os.Open(confFile) if err != nil { logger.Fatalf("could not load config file '%s': %v\n", confFile, err) } decoder := json.NewDecoder(file) configuration := &NbadConfig{} err = decoder.Decode(configuration) if err != nil { logger.Fatalf("could not read config file '%s': %v", confFile, err) } nbadConfig = configuration }
// Initialise a JarvisBot. // lg is optional. func InitJarvis(name string, bot *telebot.Bot, lg *log.Logger, config map[string]string) *JarvisBot { // We'll use random numbers throughout JarvisBot rand.Seed(time.Now().UTC().UnixNano()) if lg == nil { lg = log.New(os.Stdout, "[jarvis] ", 0) } j := &JarvisBot{Name: name, bot: bot, log: lg, keys: config} j.fmap = j.getDefaultFuncMap() // Setup database // Get current executing folder pwd, err := osext.ExecutableFolder() if err != nil { lg.Fatalf("cannot retrieve present working directory: %s", err) } db, err := bolt.Open(path.Join(pwd, "jarvis.db"), 0600, nil) if err != nil { lg.Fatal(err) } j.db = db createAllBuckets(db) // Ensure temp directory is created. // This is used to store media temporarily. tmpDirPath := filepath.Join(pwd, TEMPDIR) if _, err := os.Stat(tmpDirPath); os.IsNotExist(err) { j.log.Printf("[%s] creating temporary directory", time.Now().Format(time.RFC3339)) mkErr := os.Mkdir(tmpDirPath, 0775) if mkErr != nil { j.log.Printf("[%s] error creating temporary directory\n%s", time.Now().Format(time.RFC3339), err) } } return j }
// Initialise a MorningBot. // lg is optional. func InitMorningBot(name string, bot *telebot.Bot, lg *log.Logger, config map[string]string) *MorningBot { if lg == nil { lg = log.New(os.Stdout, "[morningbot] ", 0) } m := &MorningBot{Name: name, bot: bot, log: lg, keys: config} m.fmap = m.getDefaultFuncMap() // Setup database // Get current executing folder pwd, err := osext.ExecutableFolder() if err != nil { lg.Fatalf("cannot retrieve present working directory: %s", err) } db, err := bolt.Open(path.Join(pwd, "morningbot.db"), 0600, nil) if err != nil { lg.Fatal(err) } m.db = db createAllBuckets(db) return m }
func NewClient(cfg Config) *Client { var logger *log.Logger var err error switch { case cfg.LogTo == "stdout": logger = log.New(os.Stdout, "warp: ", 0) case cfg.LogTo == "stderr": logger = log.New(os.Stderr, "warp: ", 0) case cfg.LogTo == "syslog": logger, err = syslog.NewLogger(syslog.LOG_INFO|syslog.LOG_DAEMON, 0) if err != nil { fmt.Printf("cannot initialize logger: %v", err) os.Exit(1) } default: fmt.Printf("cannot initialize logger, bad configuration") os.Exit(1) } cert, err := tls.LoadX509KeyPair(cfg.Cert, cfg.PrivKey) if err != nil { logger.Fatalf("cannot load certificate pair: %v", err) } data, err := ioutil.ReadFile(cfg.CaCert) if err != nil { logger.Fatalf("cannot load cacertificate: %v", err) } capool := x509.NewCertPool() capool.AppendCertsFromPEM(data) tlscfg := &tls.Config{ Certificates: []tls.Certificate{cert}, RootCAs: capool, } tlscfg.BuildNameToCertificate() input := make(chan *Packet, 100) output := make(chan *Packet, 100) env := NewEnvironment(cfg.Host) client := &Client{Config: cfg, Conn: nil, Logger: logger, Connected: false, Input: input, Output: output} go BuildEnv(logger, cfg.Host, env) go func() { for { client.lock.Lock() if client.Connected == false { client.lock.Unlock() conn, err := tls.Dial("tcp", cfg.Server, tlscfg) if err != nil { logger.Printf("unabled to connect, will retry in 5 seconds: %v", err) time.Sleep(5 * time.Second) continue } client.lock.Lock() client.Conn = conn client.Connected = true logger.Printf("connected") } client.Conn.SetReadDeadline(time.Now().Add(2 * time.Minute)) conn := client.Conn client.lock.Unlock() p, err := ReadPacket(logger, conn) if err != nil { logger.Printf("read error: %v", err) client.lock.Lock() client.Connected = false client.Conn.Close() client.Conn = nil client.lock.Unlock() } else { input <- p } } }() go func() { for { p := <-input client.HandleRequest(p, env) } }() go func() { for { p := <-output p.Host = cfg.Host client.lock.Lock() if client.Connected == false { logger.Printf("not connected, dropping output packet") client.lock.Unlock() continue } conn := client.Conn client.lock.Unlock() err = WritePacket(logger, conn, p) if err != nil { logger.Printf("failed to send packet") } } }() return client }
func testFiles(logger *log.Logger) { var semaphore chan struct{} if isLimited { semaphore = make(chan struct{}, concurrencyFlag) } out := make(chan []byte) wg := &sync.WaitGroup{} walker := func(path string, info os.FileInfo, err error) error { if !info.IsDir() { return nil } rel := strings.Replace(path, projectPath, "", 1) if _, ignore := ignores[rel]; ignore { return filepath.SkipDir } rel = "./" + rel if files, err := filepath.Glob(rel + "/*_test.go"); len(files) == 0 || err != nil { if err != nil { logger.Fatal("Error checking for test files") } if debugFlag { logger.Println("No Go Test files in DIR:", rel, "skipping") } return nil } wg.Add(1) if isLimited { semaphore <- struct{}{} } go processDIR(logger, wg, path, rel, out, semaphore) return nil } if err := filepath.Walk(projectPath, walker); err != nil { logger.Fatalf("\n**could not walk project path '%s'\n%s\n", projectPath, err) } go func() { wg.Wait() close(out) if isLimited { close(semaphore) } }() buff := bytes.NewBufferString("") for cover := range out { buff.Write(cover) } final := buff.String() final = modeRegex.ReplaceAllString(final, "") final = "mode: " + coverFlag + "\n" + final if err := ioutil.WriteFile(outFilename, []byte(final), 0644); err != nil { logger.Fatal("ERROR Writing \""+outFilename+"\"", err) } }
func main() { useSyslog := flag.Bool("syslog", false, "use syslog") flag.Parse() var logger *log.Logger if *useSyslog { var err error logger, err = syslog.NewLogger(syslog.LOG_INFO|syslog.LOG_DAEMON, 0) if err != nil { fmt.Fprintf(os.Stderr, "Could not setup syslog logger: %v\n", err) os.Exit(1) } } else { logger = log.New(os.Stderr, "monsti ", log.LstdFlags) } // Load configuration if flag.NArg() != 1 { logger.Fatalf("Usage: %v <config_directory>\n", filepath.Base(os.Args[0])) } cfgPath := msettings.GetConfigPath(flag.Arg(0)) var settings settings if err := msettings.LoadModuleSettings("daemon", cfgPath, &settings); err != nil { logger.Fatal("Could not load settings: ", err) } gettext.DefaultLocales.Domain = "monsti-daemon" gettext.DefaultLocales.LocaleDir = settings.Monsti.Directories.Locale var waitGroup sync.WaitGroup // Start service handler monstiPath := settings.Monsti.GetServicePath(service.MonstiService.String()) monsti := new(MonstiService) monsti.Settings = &settings monsti.Logger = logger provider := service.NewProvider("Monsti", monsti) provider.Logger = logger if err := provider.Listen(monstiPath); err != nil { logger.Fatalf("service: Could not start service: %v", err) } waitGroup.Add(1) go func() { defer waitGroup.Done() if err := provider.Accept(); err != nil { logger.Fatalf("Could not accept at service: %v", err) } }() sessions := service.NewSessionPool(1, monstiPath) renderer := template.Renderer{Root: settings.Monsti.GetTemplatesPath()} // Init core functionality session, err := sessions.New() if err != nil { logger.Fatalf("Could not get session: %v", err) } if err := initNodeTypes(&settings, session, logger); err != nil { logger.Fatalf("Could not init node types: %v", err) } if err := initBlog(&settings, session, sessions, logger, &renderer); err != nil { logger.Fatalf("Could not init blog: %v", err) } // Wait for signals go func() { for { if err := session.Monsti().WaitSignal(); err != nil { logger.Fatalf("Could not wait for signal: %v", err) } } }() // Start modules monsti.moduleInit = make(map[string]chan bool) for _, module := range settings.Modules { monsti.moduleInit[module] = make(chan bool, 1) } for _, module := range settings.Modules { executable := "monsti-" + module cmd := exec.Command(executable, cfgPath) cmd.Stderr = moduleLog{module, logger} go func(module string) { if err := cmd.Run(); err != nil { logger.Fatalf("Module %q failed: %v", module, err) } }(module) } logger.Println("Waiting for modules to finish initialization...") for _, module := range settings.Modules { logger.Printf("Waiting for %q...", module) <-monsti.moduleInit[module] } // Setup up httpd handler := nodeHandler{ Renderer: renderer, Settings: &settings, Log: logger, Sessions: sessions, } monsti.Handler = &handler monsti.siteMutexes = make(map[string]*sync.RWMutex) http.Handle("/static/", http.FileServer(http.Dir( filepath.Dir(settings.Monsti.GetStaticsPath())))) handler.InitializedSites = make(map[string]bool) http.Handle("/", &handler) waitGroup.Add(1) go func() { if err := http.ListenAndServe(settings.Listen, nil); err != nil { logger.Fatal("HTTP Listener failed: ", err) } waitGroup.Done() }() logger.Printf("Monsti is up and running, listening on %q.", settings.Listen) waitGroup.Wait() logger.Println("Monsti is shutting down.") }
func slaveLoop( slaveEvents com.SlaveEvent, masterEvents com.MasterEvent, elevatorEvents com.ElevatorEvent, slaveLogger log.Logger) []order.Order { sendTicker := time.NewTicker(sendInterval) masterTimeoutTimer := time.NewTimer(masterTimeout) slaves := make(map[network.IP]com.Slave) orders := make([]order.Order, 0) requests := make([]order.Order, 0) isBackup := false for { select { case <-masterTimeoutTimer.C: slaveLogger.Print("Master timed out") if isBackup { slaveLogger.Print("Initiating backup") conn, err := net.Dial("tcp", "google.com:80") if err != nil { slaveLogger.Print("Failed to connect to internet. Master will not be spawned.") masterTimeoutTimer.Reset(masterTimeout) } else { conn.Close() slaveLogger.Print("Spawning new master") go network.UDPInit(true, masterEvents.ToSlaves, masterEvents.FromSlaves, logger.NewLogger("NETWORK")) go master.InitMaster(masterEvents, orders, slaves, logger.NewLogger("MASTER")) } } else { return orders } case <-slaveEvents.MissedDeadline: driver.SetMotorDirection(driver.DirnStop) slaveLogger.Fatalf("Failed to complete order within deadline") case <-sendTicker.C: data := com.SlaveData{ ElevData: elevator.GetElevData(), Requests: requests, } slaveEvents.ToMaster <- network.UDPMessage{ Data: com.EncodeSlaveData(data), } case button := <-slaveEvents.ButtonPressed: slaveLogger.Print("Button pressed") requests = append(requests, order.Order{Button: button}) case floor := <-slaveEvents.CompletedFloor: slaveLogger.Printf("Completed order for floor %d", floor+1) for _, o := range orders { if o.TakenBy == myIP && o.Button.Floor == floor { o.Done = true requests = append(requests, o) } } case message := <-slaveEvents.FromMaster: masterTimeoutTimer.Reset(masterTimeout) data, err := com.DecodeMasterMessage(message.Data) if err != nil { break } slaves = data.Slaves orders = data.Orders isBackup = (data.AssignedBackup == myIP) driver.ClearAllButtonLamps() for _, o := range orders { if o.Button.Type == driver.ButtonCallCommand && o.TakenBy != myIP { continue } driver.SetButtonLamp(o.Button.Type, o.Button.Floor, 1) } priority := order.GetPriority(orders, myIP) if priority != nil && !order.OrderDone(*priority, requests) { elevatorEvents.NewTargetFloor <- priority.Button.Floor } // Remove acknowledged orders for i := 0; i < len(requests); i++ { r := requests[i] sentToMaster := false acknowledged := false for _, o := range orders { if order.OrdersEqual(r, o) { sentToMaster = true if r.Done == o.Done { acknowledged = true } } } if !sentToMaster && r.Done { acknowledged = true } if acknowledged { requests = append(requests[:i], requests[i+1:]...) i-- } } } } }
// Common example TLS connect logic func CommonTLSConnect(exampid, tag string, l *log.Logger, c *tls.Config) (net.Conn, *stompngo.Connection, error) { l.Printf("%stag:%s consess:%s common_tls_connect_starts\n", exampid, tag, Lcs) // Set up the connection. h, p := senv.HostAndPort() hap := net.JoinHostPort(h, p) n, e := net.Dial("tcp", hap) if e != nil { return nil, nil, e } c.ServerName = h // SNI nc := tls.Client(n, c) // Returns: *tls.Conn : implements net.Conn e = nc.Handshake() if e != nil { if e.Error() == "EOF" { l.Printf("%stag:%s consess:%s common_tls_handshake_EOF_Is_the_broker_port_TLS_enabled? port:%s\n", exampid, tag, Lcs, p) } l.Fatalf("%stag:%s consess:%s common_tls_handshake_failed error:%v\n", exampid, tag, Lcs, e.Error()) } l.Printf("%stag:%s consess:%s common_tls_handshake_complete\n", exampid, tag, Lcs) l.Printf("%stag:%s connsess:%s common_tls_connect_host_and_port:%v\n", exampid, tag, Lcs, hap) // Create connect headers and connect to stompngo ch := ConnectHeaders() l.Printf("%stag:%s connsess:%s common_tls_connect_headers headers:%v\n", exampid, tag, Lcs, ch) conn, e := stompngo.Connect(nc, ch) if e != nil { return nil, nil, e } l.Printf("%stag:%s connsess:%s common_tls_connect_complete host:%s vhost:%s protocol:%s server:%s\n", exampid, tag, conn.Session(), h, senv.Vhost(), conn.Protocol(), ServerIdent(conn)) // Show connect response l.Printf("%stag:%s connsess:%s common_tls_connect_response connresp:%v\n", exampid, tag, conn.Session(), conn.ConnectResponse) // Show heartbeat data (if heart beats are being used) if senv.Heartbeats() != "" { l.Printf("%stag:%s connsess:%s common_tls_connect_heart_beat_send hbsend:%v\n", exampid, tag, conn.Session(), conn.SendTickerInterval()) l.Printf("%stag:%s connsess:%s common_tls_connect_heart_beat_recv hbrecv:%v\n", exampid, tag, conn.Session(), conn.ReceiveTickerInterval()) } l.Printf("%stag:%s connsess:%s common_tls_connect_local_addr:%s\n", exampid, tag, conn.Session(), n.LocalAddr().String()) l.Printf("%stag:%s connsess:%s common_tls_connect_remote_addr:%s\n", exampid, tag, conn.Session(), n.RemoteAddr().String()) // return nc, conn, nil }