// Setup initializes profiling and logging based on the CLI flags. // It should be called as early as possible in the program. func Setup(ctx *cli.Context) error { // logging glog.CopyStandardLogTo("INFO") glog.SetToStderr(true) // profiling, tracing runtime.MemProfileRate = ctx.GlobalInt(memprofilerateFlag.Name) Handler.SetBlockProfileRate(ctx.GlobalInt(blockprofilerateFlag.Name)) if traceFile := ctx.GlobalString(traceFlag.Name); traceFile != "" { if err := Handler.StartTrace(traceFile); err != nil { return err } } if cpuFile := ctx.GlobalString(cpuprofileFlag.Name); cpuFile != "" { if err := Handler.StartCPUProfile(cpuFile); err != nil { return err } } // pprof server if ctx.GlobalBool(pprofFlag.Name) { address := fmt.Sprintf("127.0.0.1:%d", ctx.GlobalInt(pprofPortFlag.Name)) go func() { glog.V(logger.Info).Infof("starting pprof server at http://%s/debug/pprof", address) glog.Errorln(http.ListenAndServe(address, nil)) }() } return nil }
func runSuite(test, file string) { var tests []string if test == defaultTest { tests = allTests } else { tests = []string{test} } for _, curTest := range tests { glog.Infoln("runSuite", curTest, file) var err error var files []string if test == defaultTest { // check if we have an explicit directory mapping for the test if _, ok := testDirMapping[curTest]; ok { files, err = getFiles(filepath.Join(file, testDirMapping[curTest])) } else { // otherwise assume test name files, err = getFiles(filepath.Join(file, curTest)) } } else { files, err = getFiles(file) } if err != nil { glog.Fatalln(err) } if len(files) == 0 { glog.Warningln("No files matched path") } for _, curFile := range files { // Skip blank entries if len(curFile) == 0 { continue } r, err := os.Open(curFile) if err != nil { glog.Fatalln(err) } defer r.Close() err = runTestWithReader(curTest, r) if err != nil { if continueOnError { glog.Errorln(err) } else { glog.Fatalln(err) } } } } }
// ServeCodec reads incoming requests from codec, calls the appropriate callback and writes the // response back using the given codec. It will block until the codec is closed. // // This server will: // 1. allow for asynchronous and parallel request execution // 2. supports notifications (pub/sub) // 3. supports request batches func (s *Server) ServeCodec(codec ServerCodec) { defer func() { if err := recover(); err != nil { const size = 64 << 10 buf := make([]byte, size) buf = buf[:runtime.Stack(buf, false)] glog.Errorln(string(buf)) } codec.Close() }() ctx, cancel := context.WithCancel(context.Background()) defer cancel() s.codecsMu.Lock() if atomic.LoadInt32(&s.run) != 1 { // server stopped s.codecsMu.Unlock() return } s.codecs.Add(codec) s.codecsMu.Unlock() for atomic.LoadInt32(&s.run) == 1 { reqs, batch, err := s.readRequest(codec) if err != nil { glog.V(logger.Debug).Infof("%v\n", err) codec.Write(codec.CreateErrorResponse(nil, err)) break } if atomic.LoadInt32(&s.run) != 1 { err = &shutdownError{} if batch { resps := make([]interface{}, len(reqs)) for i, r := range reqs { resps[i] = codec.CreateErrorResponse(&r.id, err) } codec.Write(resps) } else { codec.Write(codec.CreateErrorResponse(&reqs[0].id, err)) } break } if batch { go s.execBatch(ctx, codec, reqs) } else { go s.exec(ctx, codec, reqs[0]) } } }
func runSuite(test, file string) { var tests []string if test == defaultTest { tests = allTests } else { tests = []string{test} } for _, curTest := range tests { glog.Infoln("runSuite", curTest, file) var err error var files []string if test == defaultTest { files, err = getFiles(filepath.Join(file, curTest)) } else { files, err = getFiles(file) } if err != nil { glog.Fatalln(err) } if len(files) == 0 { glog.Warningln("No files matched path") } for _, curFile := range files { // Skip blank entries if len(curFile) == 0 { continue } r, err := os.Open(curFile) if err != nil { glog.Fatalln(err) } defer r.Close() err = runTestWithReader(curTest, r) if err != nil { if continueOnError { glog.Errorln(err) } else { glog.Fatalln(err) } } } } }
// serveRequest will reads requests from the codec, calls the RPC callback and // writes the response to the given codec. // // If singleShot is true it will process a single request, otherwise it will handle // requests until the codec returns an error when reading a request (in most cases // an EOF). It executes requests in parallel when singleShot is false. func (s *Server) serveRequest(codec ServerCodec, singleShot bool, options CodecOption) error { defer func() { if err := recover(); err != nil { const size = 64 << 10 buf := make([]byte, size) buf = buf[:runtime.Stack(buf, false)] glog.Errorln(string(buf)) } s.codecsMu.Lock() s.codecs.Remove(codec) s.codecsMu.Unlock() return }() ctx, cancel := context.WithCancel(context.Background()) defer cancel() // if the codec supports notification include a notifier that callbacks can use // to send notification to clients. It is thight to the codec/connection. If the // connection is closed the notifier will stop and cancels all active subscriptions. if options&OptionSubscriptions == OptionSubscriptions { ctx = context.WithValue(ctx, notifierKey{}, newBufferedNotifier(codec, notificationBufferSize)) } s.codecsMu.Lock() if atomic.LoadInt32(&s.run) != 1 { // server stopped s.codecsMu.Unlock() return &shutdownError{} } s.codecs.Add(codec) s.codecsMu.Unlock() // test if the server is ordered to stop for atomic.LoadInt32(&s.run) == 1 { reqs, batch, err := s.readRequest(codec) if err != nil { glog.V(logger.Debug).Infof("%v\n", err) codec.Write(codec.CreateErrorResponse(nil, err)) return nil } // check if server is ordered to shutdown and return an error // telling the client that his request failed. if atomic.LoadInt32(&s.run) != 1 { err = &shutdownError{} if batch { resps := make([]interface{}, len(reqs)) for i, r := range reqs { resps[i] = codec.CreateErrorResponse(&r.id, err) } codec.Write(resps) } else { codec.Write(codec.CreateErrorResponse(&reqs[0].id, err)) } return nil } if singleShot && batch { s.execBatch(ctx, codec, reqs) return nil } else if singleShot && !batch { s.exec(ctx, codec, reqs[0]) return nil } else if !singleShot && batch { go s.execBatch(ctx, codec, reqs) } else { go s.exec(ctx, codec, reqs[0]) } } return nil }
// serveRequest will reads requests from the codec, calls the RPC callback and // writes the response to the given codec. // If singleShot is true it will process a single request, otherwise it will handle // requests until the codec returns an error when reading a request (in most cases // an EOF). It executes requests in parallel when singleShot is false. func (s *Server) serveRequest(codec ServerCodec, singleShot bool) error { defer func() { if err := recover(); err != nil { const size = 64 << 10 buf := make([]byte, size) buf = buf[:runtime.Stack(buf, false)] glog.Errorln(string(buf)) } s.codecsMu.Lock() s.codecs.Remove(codec) s.codecsMu.Unlock() return }() ctx, cancel := context.WithCancel(context.Background()) defer cancel() s.codecsMu.Lock() if atomic.LoadInt32(&s.run) != 1 { // server stopped s.codecsMu.Unlock() return &shutdownError{} } s.codecs.Add(codec) s.codecsMu.Unlock() // test if the server is ordered to stop for atomic.LoadInt32(&s.run) == 1 { reqs, batch, err := s.readRequest(codec) if err != nil { glog.V(logger.Debug).Infof("%v\n", err) codec.Write(codec.CreateErrorResponse(nil, err)) return nil } // check if server is ordered to shutdown and return an error // telling the client that his request failed. if atomic.LoadInt32(&s.run) != 1 { err = &shutdownError{} if batch { resps := make([]interface{}, len(reqs)) for i, r := range reqs { resps[i] = codec.CreateErrorResponse(&r.id, err) } codec.Write(resps) } else { codec.Write(codec.CreateErrorResponse(&reqs[0].id, err)) } return nil } if singleShot && batch { s.execBatch(ctx, codec, reqs) return nil } else if singleShot && !batch { s.exec(ctx, codec, reqs[0]) return nil } else if !singleShot && batch { go s.execBatch(ctx, codec, reqs) } else { go s.exec(ctx, codec, reqs[0]) } } return nil }