func createSession(w http.ResponseWriter, r *http.Request, session *sessions.Session) *ServerSession { // Each session needs a unique ID in order to be saved. if session.ID == "" { session.ID = tokens.NewSessionID() } ss := &ServerSession{ CSRFToken: tokens.NewCSRFToken(session.ID), } // Attempt to store the session. Remove the session if it's not stored // correctly. if err := ss.StoreSession(session.ID); err != nil { RemoveSession(session.ID) glog.Fatalln(err) } // Similarly, save it in our FS storage and set the user's cookie. if err := session.Save(r, w); err != nil { RemoveSession(session.ID) glog.Fatalln(err) } return ss }
func configFrom(file string) *config.Config { // Find the file... if file != "" { if _, err := os.Stat(file); os.IsNotExist(err) { glog.Fatalln("Cannot find specified configuration file", file, ", aborting.") } } else if _, err := os.Stat(os.Getenv("CAYLEY_CFG")); err == nil { file = os.Getenv("CAYLEY_CFG") } else if _, err := os.Stat("/etc/cayley.cfg"); err == nil { file = "/etc/cayley.cfg" } if file == "" { glog.Infoln("Couldn't find a config file in either $CAYLEY_CFG or /etc/cayley.cfg. Going by flag defaults only.") } cfg, err := config.Load(file) if err != nil { glog.Fatalln(err) } if cfg.DatabasePath == "" { cfg.DatabasePath = *databasePath } if cfg.DatabaseType == "" { cfg.DatabaseType = *databaseBackend } return cfg }
func Listen(m *Manager, port string) { addr, err := net.ResolveTCPAddr("tcp", fmt.Sprintf(":%s", port)) if err != nil { glog.Fatalln("HandleIncoming:", err) } l, err := net.ListenTCP("tcp", addr) if err != nil { glog.Fatalln("HandleIncoming:", err) } defer l.Close() for { conn, err := l.AcceptTCP() if err != nil { glog.Errorln("HandleIncoming:", err) continue } host, port, err := net.SplitHostPort(conn.RemoteAddr().String()) if err != nil { glog.Errorln("HandleIncoming:", err) continue } glog.Infoln("Incoming Host: %s Port: %s", host, port) m.AddPeer(host, port, false, conn) } }
func init() { // Generate the reverse of our enum so we can work backwards and reload // a file with the name instead of the enum. // // The issue arises because we've decided to use an array instead of a map // to describe our in-memory templates. While this is more efficient, it // causes issues with our hot reloading because the name of the file // given to us from inotify is the string representation of the file's // name, and we can't match that up with the enum on the fly (or generate // code that does that using //go: generate). So, we run an init func that // generates a map of the names to the enum so we can work backwards to // reload the file. for i := 0; i < len(_TmplName_index)-1; i++ { key := _TmplName_name[_TmplName_index[i]:_TmplName_index[i+1]] TmplMap[key] = TmplName(i) } // Set up our watcher for hot reloads of modified files. watcher, err := inotify.NewWatcher() if err != nil { glog.Fatalln(err) } err = watcher.Watch(templatePath) if err != nil { glog.Fatalln(err) } Tmpls.Watcher = watcher Tmpls.Watch() cleanup.Register("reload", watcher.Close) // Close watcher. }
// RegisterTram functionality // enables trams to be attached to specific routes. func (t *Server) RegisterTram(in *RPCMessage, out *RPCMessage) error { glog.Infoln("RegisterTram received: " + in.CsvData) out.PrepReply(in) tempSplit := strings.Split(in.CsvData, ",") routeID, err := strconv.Atoi(tempSplit[len(tempSplit)-1]) if err != nil { glog.Fatalln("Error splitting out tram route from RPCMessage data.") } stops, err := inDatabase(routeID) if err != nil { glog.Fatalln("Route doesn't exist") } var data Tram data.FromString(in.CsvData) err = t.addClient(&data, routeID) if err != nil { out.Status = 1 } else { // pass current and previous stops to client // these represent the starting (depo) location out.CsvData = fmt.Sprintf("%d,%d", stops[0], stops[1]) } return nil }
func main() { log.SetFlags(0) flag.Set("logtostderr", "true") flag.Parse() if *url == "" { fmt.Println("you need to set the parameter post-url") os.Exit(1) } data, err := json.Marshal(machinedata.HostData{ Serial: fetchDMISerial(), NetDevs: fetchNetDevs(), ConnectedNIC: fetchConnectedNIC(), IPMIAddress: fetchIPMIAddress(), }) if err != nil { glog.Fatalln(err) } resp, err := http.Post(*url, "application/json", bytes.NewBuffer(data)) if err != nil { glog.Fatalln(err) } io.Copy(os.Stdout, resp.Body) }
// parse config file. func ParseConfig(cfg string) { if cfg == "" { glog.Fatalln("use -c to specify configuration file") } if !file.IsExist(cfg) { glog.Fatalln("config file:", cfg, "is not existent. maybe you need `mv cfg.example.json cfg.json`") } ConfigFile = cfg configContent, err := file.ToTrimString(cfg) if err != nil { glog.Fatalln("read config file:", cfg, "fail:", err) } var c GlobalConfig err = json.Unmarshal([]byte(configContent), &c) if err != nil { glog.Fatalln("parse config file:", cfg, "fail:", err) } configLock.Lock() defer configLock.Unlock() config = &c glog.Infoln("g:ParseConfig, ok, ", cfg) }
func main() { httpAddr := flag.String("http", "127.0.0.1:5000", "address and port to listen on") httpDocroot := flag.String("root", "www", "HTTP document root for static web files") dataPath := flag.String("data", "/usr/local/var/lib/shadowcaster", "data directory (for indexes and such)") flag.Parse() Config = config{ IndexPath: *dataPath, HTTPAddr: *httpAddr, HTTPDocumentRoot: *httpDocroot} // Run consistency checks on the indexes. glog.Infoln("Running consistency checks on the indexes") if err := CheckIndexes(*dataPath); err != nil { glog.Fatalln(err) } glog.Infoln("Consistency checks passed") // Set up the HTTP handling. http.HandleFunc("/movies/", HandleMovies) http.HandleFunc("/movies/setdir", HandleSetMovieDir) http.HandleFunc("/movies/status", HandleMovieStatus) http.HandleFunc("/tv/", HandleTV) http.HandleFunc("/music/", HandleMusic) http.HandleFunc("/pictures/", HandlePictures) http.HandleFunc("/settings/", HandleSettings) http.Handle("/", http.FileServer(http.Dir(*httpDocroot))) glog.Infof("Listening on %v", *httpAddr) if err := http.ListenAndServe(*httpAddr, nil); err != nil { glog.Fatalln(err) } glog.Infof("ShadowCaster offline") }
func setup() { flag.Parse() numCPU := runtime.NumCPU() glog.Infoln("NumCPU", numCPU) if envMaxProcs := os.Getenv("GOMAXPROCS"); envMaxProcs == "" { if numCPU > 1 { // Consuming N-1 appears to greatly reduce per-request latency in loaded systems. runtime.GOMAXPROCS(numCPU - 1) } } glog.Infoln("GOMAXPROCS", runtime.GOMAXPROCS(0)) var d db.DB switch *useDB { case "cassandra": d = cassandradb.New() default: glog.Fatalln("Unknown DB:", *useDB) } if err := d.Init(); err != nil { glog.Fatalln("An error occured Initializing the DB: ", err) } handlers.InitializeAndRegister(d) }
func MonitorFeeds(reg *registry.Registry) { if reg.Feeds == "" { return } f, err := os.Open(reg.Feeds) if err != nil { glog.Fatalln("Reading feeds:", err) } defer f.Close() var feeds []Feed if err := json.NewDecoder(f).Decode(&feeds); err != nil { glog.Fatalln("Decoding feeds:", err) } db := reg.DB() defer db.Session.Close() for i := range feeds { if err := db.C("feeds").FindId(feeds[i].DocType).One(&feeds[i]); err != nil && err != mgo.ErrNotFound { glog.Fatalln("Finding existing feeds:", err) } feeds[i].stream, err = eventsource.Subscribe(feeds[i].Url, feeds[i].LastEventId) if err == nil { glog.Infof("Monitoring: %s", &feeds[i]) go monitor(reg, &feeds[i]) } else { glog.Fatalln("Eventsource:", err) } } }
func (mgr *pxeManagerT) WriteIgnitionConfig(host hostmgr.Host, wr io.Writer) error { etcdClusterToken := mgr.cluster.Config.DefaultEtcdClusterToken if host.EtcdClusterToken != "" { etcdClusterToken = host.EtcdClusterToken } mergedTemplatesEnv := mgr.config.TemplatesEnv for k, v := range host.Overrides { mergedTemplatesEnv[k] = v } ctx := struct { Host hostmgr.Host EtcdDiscoveryUrl string ClusterNetwork network MayuHost string MayuPort int MayuURL string PostBootURL string NoTLS bool TemplatesEnv map[string]interface{} }{ Host: host, ClusterNetwork: mgr.config.Network, EtcdDiscoveryUrl: fmt.Sprintf("%s/%s", mgr.etcdDiscoveryUrl, etcdClusterToken), MayuHost: mgr.config.Network.BindAddr, MayuPort: mgr.httpPort, MayuURL: mgr.thisHost(), PostBootURL: mgr.thisHost() + "/admin/host/" + host.Serial + "/boot_complete", NoTLS: mgr.noTLS, TemplatesEnv: mergedTemplatesEnv, } ctx.Host.MayuVersion = mgr.version tmpl, err := getTemplate(mgr.ignitionConfig, mgr.templateSnippets) if err != nil { glog.Fatalln(err) return err } var data bytes.Buffer if err = tmpl.Execute(&data, ctx); err != nil { glog.Fatalln(err) return err } ignitionJSON, e := convertTemplatetoJSON(data.Bytes(), false) if e != nil { glog.Fatalln(e) return e } fmt.Fprintln(wr, string(ignitionJSON[:])) return nil }
func init() { glog.SetToStderr(true) cfg, err := configFrom("cayley_appengine.cfg") if err != nil { glog.Fatalln("Error loading config:", err) } handle, err := db.Open(cfg) if err != nil { glog.Fatalln("Error opening database:", err) } http.SetupRoutes(handle, cfg) }
func zoneproxy(v *viper.Viper, dp *dialer.DialerPool) { var wg sync.WaitGroup zones := v.GetStringMap("zones") dp.AddByZones(zones) tcpproxys := v.GetStringMap("tcpproxys") for name, _ := range tcpproxys { address := v.GetString("tcpproxys." + name + ".address") if address == "" { glog.Fatalln("tcpproxys." + name + ".address must be string") } tp := tcpproxy.NewTcpProxy(name, address, dp, v) wg.Add(1) go func() { tp.Run() wg.Done() }() } httpproxys := v.GetStringMap("httpproxys") for name, _ := range httpproxys { address := v.GetString("httpproxys." + name + ".address") if address == "" { glog.Fatalln("httpproxys." + name + ".address must be string") } hp := httpproxy.NewHttpProxy(name, address, dp, v) wg.Add(1) go func() { hp.Run() wg.Done() }() } httpservers := v.GetStringMap("httpservers") for name, _ := range httpservers { address := v.GetString("httpservers." + name + ".address") if address == "" { glog.Fatalln("httpservers." + name + ".address must be string") } hs := httpserver.NewHttpServer(name, address, dp, v) wg.Add(1) go func() { hs.Run() wg.Done() }() } wg.Wait() glog.Flush() }
func workerSentry(engine cfg.Engine, index int) chan os.Signal { workersigchan := make(chan os.Signal, 1) //channel for signal delivery to worker processes engineType := engine.Name signalForStop := false go func() { defer func() { glog.Infoln("workerSentry out", engineType, index) wg.Done() }() for { glog.Infoln("workerSentry start", engineType, index, *workerExe) cmd := exec.Command(*workerExe, "-cfg", cfg.ConfigFile, "-engine-cfg", cfg.EngineConfigFile, "-i", fmt.Sprint(index), "-engine", engineType) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr err := cmd.Start() starttime := time.Now() if err != nil { glog.Fatalln(err) return } glog.Infoln("Waiting for command to finish", engineType, index) c := make(chan string) go func() { cmd.Wait() glog.Infoln("Finished wait", engineType, index) close(c) }() outer: for { select { case res := <-c: //wait for container to finish glog.Infoln("finished worker execution", res, engineType, index) if signalForStop { return } else { if time.Since(starttime) < 30*time.Second { glog.Infoln("finished before sleep", engineType, index) glog.Flush() time.Sleep(30 * time.Second) glog.Infoln("finished sleep", engineType, index) } break outer } case sig := <-workersigchan: glog.Infoln("workersigchan signal ", engineType, index, sig) signalForStop = true cmd.Process.Signal(sig) } } } }() return workersigchan }
func (storage *Storage) SaveSyncMessage(emsg *EMessage) error { storage.mutex.Lock() defer storage.mutex.Unlock() filesize, err := storage.file.Seek(0, os.SEEK_END) if err != nil { log.Fatalln(err) } if emsg.msgid != filesize { log.Warningf("file size:%d, msgid:%d is't equal", filesize, emsg.msgid) if emsg.msgid < filesize { log.Warning("skip msg:", emsg.msgid) } else { log.Warning("write padding:", emsg.msgid-filesize) padding := make([]byte, emsg.msgid-filesize) _, err = storage.file.Write(padding) if err != nil { log.Fatal("file write:", err) } } } storage.WriteMessage(storage.file, emsg.msg) storage.ExecMessage(emsg.msg, emsg.msgid) log.Info("save sync message:", emsg.msgid) return nil }
// Resolve the IP func get_ip(url string, prefer6 bool) net.IP { glog.Infoln("Looking up", url) var ipv4, ipv6 net.IP ips, err := net.LookupHost(url) if err != nil { glog.Fatalln("Error:", err) } for _, ip := range ips { glog.V(1).Infoln("Got IP:", ip) ip := net.ParseIP(ip) if ip.To4() == nil && ipv6 == nil { ipv6 = ip if prefer6 == true { break } } else { ipv4 = ip if prefer6 != true { break } } } if prefer6 { return ipv6 } return ipv4 }
func (d *Director) startServer() { rpc := rpc.NewServer() directorApi := &DirectorApi{ director: d, } rpc.Register(directorApi) _, port, err := net.SplitHostPort(d.nodeName) if err != nil { panic(err) } d.server = &http.Server{ Addr: ":" + port, Handler: rpc, ReadTimeout: 10 * time.Second, WriteTimeout: 10 * time.Second, MaxHeaderBytes: 1 << 20, } var wg sync.WaitGroup wg.Add(1) go func() { glog.Infoln("Director listening at", d.nodeName) // Partially ensure the director is up before returning wg.Done() glog.Fatalln(d.server.ListenAndServe()) }() wg.Wait() }
func main() { defer glog.Flush() if flag.NFlag() == 0 { flag.PrintDefaults() return } if pv { printVersion() return } if len(listenArgs) == 0 { glog.Fatalln("no listen addr") } var wg sync.WaitGroup for _, args := range listenArgs { wg.Add(1) go func(arg Args) { defer wg.Done() listenAndServe(arg) }(args) } wg.Wait() }
func (this *Server) Run() error { // run consumer managers for _, mgr := range this.managers { mgr.Work() } glog.V(2).Info("[Pusher]Managers get to work!") // run http service if statPort > 0 { if err := this.httpsvr.ListenAndServe(); err != nil { glog.Fatalln("[Pusher]Start admin http server failed.", err) return err } glog.V(2).Info("[Pusher]Start admin http server success.") } // register signal callback c := make(chan os.Signal, 1) signal.Notify(c, syscall.SIGINT, syscall.SIGUSR1, syscall.SIGUSR2, syscall.SIGTERM, syscall.SIGKILL) select { case <-c: glog.V(2).Info("[Pusher]Catch exit signal") for _, mgr := range this.managers { mgr.Close() } glog.V(2).Info("[Pusher]Exit done") } return nil }
func randNodeId() []byte { b := make([]byte, 20) if _, err := rand.Read(b); err != nil { log.Fatalln("nodeId rand:", err) } return b }
func (p *Posting) search(doc *document.Document, results *document.SearchMap) (err error) { defer func() { if r := recover(); r != nil { err = r.(error) } }() stats := &Stats{ doc: doc, start: time.Now(), length: doc.HashLength(p.hashKey), } l := NewPostingLine() *results = make(document.SearchMap) searchFunc := func(i int, hash uint64) { pos := hash - p.offset if pos >= p.size { return } stats.count++ if err := p.table.Get(pos, l); err != nil { glog.Fatalln(newPostingError("Search Document: Sparsetable Get:", err)) } stats.ops++ l.FillMap(results, uint32(i)) } doc.ApplyHasher(p.hashKey, searchFunc) glog.Infoln("Searched Document: ", stats.String()) return nil }
func (storage *Storage) SaveSyncMessage(emsg *EMessage) error { storage.mutex.Lock() defer storage.mutex.Unlock() filesize, err := storage.file.Seek(0, os.SEEK_END) if err != nil { log.Fatalln(err) } if emsg.msgid != filesize { log.Warningf("file size:%d, msgid:%d is't equal", filesize, emsg.msgid) if emsg.msgid < filesize { log.Warning("skip msg:", emsg.msgid) } else { log.Warning("write padding:", emsg.msgid-filesize) padding := make([]byte, emsg.msgid-filesize) _, err = storage.file.Write(padding) if err != nil { log.Fatal("file write:", err) } } } storage.WriteMessage(storage.file, emsg.msg) if emsg.msg.cmd == MSG_OFFLINE { off := emsg.msg.body.(*OfflineMessage) storage.AddOffline(off.msgid, off.appid, off.receiver) storage.SetLastMessageID(off.appid, off.receiver, emsg.msgid) } else if emsg.msg.cmd == MSG_ACK_IN { off := emsg.msg.body.(*OfflineMessage) storage.RemoveOffline(off.msgid, off.appid, off.receiver) } log.Info("save sync message:", emsg.msgid) return nil }
// Internal constructor function func NewMasterDetector(zkurls string, options ...detector.Option) (*MasterDetector, error) { zkHosts, zkPath, err := parseZk(zkurls) if err != nil { log.Fatalln("Failed to parse url", err) return nil, err } detector := &MasterDetector{ minDetectorCyclePeriod: defaultMinDetectorCyclePeriod, done: make(chan struct{}), cancel: func() {}, } detector.bootstrapFunc = func(client ZKInterface, _ <-chan struct{}) (ZKInterface, error) { if client == nil { return connect2(zkHosts, zkPath) } return client, nil } // apply options last so that they can override default behavior for _, opt := range options { opt(detector) } log.V(2).Infoln("Created new detector to watch", zkHosts, zkPath) return detector, nil }
func (storage *Storage) WriteHeader(file *os.File) { var m int32 = MAGIC err := binary.Write(file, binary.BigEndian, m) if err != nil { log.Fatalln(err) } var v int32 = VERSION err = binary.Write(file, binary.BigEndian, v) if err != nil { log.Fatalln(err) } pad := make([]byte, HEADER_SIZE-8) n, err := file.Write(pad) if err != nil || n != (HEADER_SIZE-8) { log.Fatalln(err) } }
func parseArgs(ss []string) (args []Args) { for _, s := range ss { if !strings.Contains(s, "://") { s = "tcp://" + s } u, err := url.Parse(s) if err != nil { if glog.V(LWARNING) { glog.Warningln(err) } continue } arg := Args{ Addr: u.Host, User: u.User, } schemes := strings.Split(u.Scheme, "+") if len(schemes) == 1 { arg.Protocol = schemes[0] arg.Transport = schemes[0] } if len(schemes) == 2 { arg.Protocol = schemes[0] arg.Transport = schemes[1] } switch arg.Protocol { case "http", "socks", "socks5", "ss": default: arg.Protocol = "default" } switch arg.Transport { case "ws", "tls", "tcp": default: arg.Transport = "tcp" } mp := strings.Split(strings.Trim(u.Path, "/"), ":") if len(mp) == 1 { arg.EncMeth = mp[0] } if len(mp) == 2 { arg.EncMeth = mp[0] arg.EncPass = mp[1] } if arg.Cert, err = tls.LoadX509KeyPair("cert.pem", "key.pem"); err != nil { if glog.V(LFATAL) { glog.Fatalln(err) } } args = append(args, arg) } return }
func (mgr *pxeManagerT) maybeCreateHost(serial string) *hostmgr.Host { mgr.mu.Lock() defer mgr.mu.Unlock() host, exists := mgr.cluster.HostWithSerial(serial) if !exists { var err error host, err = mgr.cluster.CreateNewHost(serial) if err != nil { glog.Fatalln(err) } if host.InternalAddr == nil { host.InternalAddr = mgr.getNextInternalIP() err = host.Commit("updated host InternalAddr") if err != nil { glog.Fatalln(err) } } if host.Profile == "" { host.Profile = mgr.getNextProfile() if host.Profile == "" { host.Profile = defaultProfileName } host.FleetDisableEngine = mgr.profileDisableEngine(host.Profile) host.FleetMetadata = mgr.profileMetadata(host.Profile) host.CoreOSVersion = mgr.profileCoreOSVersion(host.Profile) host.EtcdClusterToken = mgr.profileEtcdClusterToken(host.Profile) err = host.Commit("updated host profile and metadata") if err != nil { glog.Fatalln(err) } } if host.EtcdClusterToken == "" { host.EtcdClusterToken = mgr.cluster.Config.DefaultEtcdClusterToken err = host.Commit("set default etcd discovery token") if err != nil { glog.Fatalln(err) } } } return host }
func (storage *Storage) NextMessageID() int64 { storage.mutex.Lock() defer storage.mutex.Unlock() msgid, err := storage.file.Seek(0, os.SEEK_END) if err != nil { log.Fatalln(err) } return msgid }
func (g *runner) pinValue(pin string) reflect.Value { println("lookupPin: " + pin) v := reflect.ValueOf(g.ins[pinPart(pin)]) if !v.IsValid() { glog.Fatalln("pin not defined:", pin) } println(123) return v.Elem() }
func main() { fmt.Println("Compilation complete.") flag.Parse() // Theoretically we should catch most signals. ch := make(chan os.Signal, 1) signal.Notify(ch, os.Interrupt, syscall.SIGTERM) go func() { select { case s := <-ch: if glog.V(2) { glog.Infof("Caught signal %s.", s.String()) } // This is ran when a signal similar to ctrl+c is caught. It allows // us to do nice things like flush logs, close fds, etc. It's not // 100% necessary, but nice and easy to do. cleanup.RunAndQuit(s) } }() r := controllers.Router // Serve our static files, e.g. CSS, JS. dir, err := os.Getwd() if err != nil { glog.Fatalln(err) } dir = filepath.Join(dir, "/static/") r.ServeFiles("/static/*filepath", http.Dir(dir)) handler := useful.NewUsefulHandler(secureOpts.Handler(r)) go func() { if err := http.ListenAndServeTLS(paths.PQDN+":443", "keys/cert.pem", "keys/server.key", handler); err != nil { glog.Fatalln(err) } }() if err := http.ListenAndServe(paths.PQDN+":80", http.HandlerFunc(redir)); err != nil { glog.Fatalln(err) } }
func newSecureToken() []byte { buf := make([]byte, HashSize) n, err := io.ReadFull(rand.Reader, buf) if err != nil { glog.Fatalln(err) } if n != HashSize { glog.Fatalln(ErrGeneratedBadToken) } tok := make([]byte, hex.EncodedLen(len(buf))) if hex.Encode(tok, buf) != 64 { glog.Fatalln(ErrGeneratedBadToken) } return tok }