/* Used to specify the N1QL nodes options using the method NewServer as defined in server/server.go. */ func Start(site, pool, namespace string) *server.Server { datastore, err := resolver.NewDatastore(site + pool) if err != nil { logging.Errorp(err.Error()) os.Exit(1) } configstore, err := config_resolver.NewConfigstore("stub:") if err != nil { logging.Errorp("Could not connect to configstore", logging.Pair{"error", err}, ) } acctstore, err := acct_resolver.NewAcctstore("stub:") if err != nil { logging.Errorp("Could not connect to acctstore", logging.Pair{"error", err}, ) } channel := make(server.RequestChannel, 10) plusChannel := make(server.RequestChannel, 10) server, err := server.NewServer(datastore, configstore, acctstore, namespace, false, channel, plusChannel, 4, 4, 0, 0, false, false, false) if err != nil { logging.Errorp(err.Error()) os.Exit(1) } server.SetKeepAlive(1 << 10) go server.Serve() return server }
func minimalIndexes(sargables map[datastore.Index]*indexEntry, pred expression.Expression) ( map[datastore.Index]*indexEntry, error) { for s, se := range sargables { for t, te := range sargables { if t == s { continue } if narrowerOrEquivalent(se, te) { delete(sargables, t) } } } minimals := make(map[datastore.Index]*indexEntry, len(sargables)) for s, se := range sargables { spans, err := SargFor(pred, se.sargKeys, len(se.keys)) if err != nil || len(spans) == 0 { logging.Errorp("Sargable index not sarged", logging.Pair{"pred", pred}, logging.Pair{"sarg_keys", se.sargKeys}, logging.Pair{"error", err}) return nil, errors.NewPlanError(nil, fmt.Sprintf("Sargable index not sarged; pred=%v, sarg_keys=%v, error=%v", pred.String(), se.sargKeys.String(), err)) return nil, err } se.spans = spans minimals[s] = se } return minimals, nil }
func allIndexes(keyspace datastore.Keyspace) ([]datastore.Index, error) { indexers, err := keyspace.Indexers() if err != nil { return nil, err } indexes := make([]datastore.Index, 0, len(indexers)*16) for _, indexer := range indexers { idxes, err := indexer.Indexes() if err != nil { return nil, err } for _, idx := range idxes { state, _, er := idx.State() if er != nil { logging.Errorp("Index selection", logging.Pair{"error", er.Error()}) } if er != nil || state != datastore.ONLINE { continue } indexes = append(indexes, idx) } } return indexes, nil }
func allHints(keyspace datastore.Keyspace, hints algebra.IndexRefs) ([]datastore.Index, error) { indexes := make([]datastore.Index, 0, len(hints)) for _, hint := range hints { indexer, err := keyspace.Indexer(hint.Using()) if err != nil { return nil, err } index, err := indexer.IndexByName(hint.Name()) if err != nil { return nil, err } state, _, er := index.State() if er != nil { logging.Errorp("Index selection", logging.Pair{"error", er.Error()}) } if er != nil || state != datastore.ONLINE { continue } indexes = append(indexes, index) } return indexes, nil }
func (this *PrimaryScan) scanPrimary(context *Context, parent value.Value) { conn := this.newIndexConnection(context) defer notifyConn(conn.StopChannel()) // Notify index that I have stopped var duration time.Duration timer := time.Now() defer context.AddPhaseTime("scan", time.Since(timer)-duration) go this.scanEntries(context, conn) var entry, lastEntry *datastore.IndexEntry ok := true nitems := 0 for ok { select { case <-this.stopChannel: return default: } select { case entry, ok = <-conn.EntryChannel(): t := time.Now() if ok { cv := value.NewScopeValue(make(map[string]interface{}), parent) av := value.NewAnnotatedValue(cv) av.SetAttachment("meta", map[string]interface{}{"id": entry.PrimaryKey}) ok = this.sendItem(av) lastEntry = entry nitems++ } duration += time.Since(t) case <-this.stopChannel: return } } if conn.Timeout() { logging.Errorp("Primary index scan timeout - resorting to chunked scan", logging.Pair{"chunkSize", nitems}, logging.Pair{"startingEntry", lastEntry}) if lastEntry == nil { // no key for chunked scans (primary scan returned 0 items) context.Error(errors.NewCbIndexScanTimeoutError(nil)) } // do chunked scans; nitems gives the chunk size, and lastEntry the starting point for lastEntry != nil { lastEntry = this.scanPrimaryChunk(context, parent, nitems, lastEntry) } } }
func makeMockServer() *server.Server { datastore, err := resolver.NewDatastore("http://localhost:8091") if err != nil { logging.Errorp(err.Error()) os.Exit(1) } channel := make(server.RequestChannel, 10) plusChannel := make(server.RequestChannel, 10) server, err := server.NewServer(datastore, nil, nil, "default", false, channel, plusChannel, 4, 4, 0, 0, false, false, false) if err != nil { logging.Errorp(err.Error()) os.Exit(1) } server.SetKeepAlive(1 << 10) go server.Serve() return server }
func (this *Server) SetLogLevel(level string) { lvl, ok := logging.ParseLevel(level) if !ok { logging.Errorp("SetLogLevel: unrecognized level", logging.Pair{"level", level}) return } if this.datastore != nil { this.datastore.SetLogLevel(lvl) } logging.SetLevel(lvl) }
// signalCatcher blocks until a signal is recieved and then takes appropriate action func signalCatcher(server *server.Server, endpoint *http.HttpEndpoint) { sig_chan := make(chan os.Signal, 4) signal.Notify(sig_chan, os.Interrupt, syscall.SIGTERM) var s os.Signal select { case s = <-sig_chan: } if server.CpuProfile() != "" { logging.Infop("Stopping CPU profile") pprof.StopCPUProfile() } if server.MemProfile() != "" { f, err := os.Create(server.MemProfile()) if err != nil { logging.Errorp("Cannot create memory profile file", logging.Pair{"error", err}) } else { logging.Infop("Writing Memory profile") pprof.WriteHeapProfile(f) f.Close() } } if s == os.Interrupt { // Interrupt (ctrl-C) => Immediate (ungraceful) exit logging.Infop("Shutting down immediately") os.Exit(0) } logging.Infop("Attempting graceful exit") // Stop accepting new requests err := endpoint.Close() if err != nil { logging.Errorp("error closing http listener", logging.Pair{"err", err}) } err = endpoint.CloseTLS() if err != nil { logging.Errorp("error closing https listener", logging.Pair{"err", err}) } }
func (this *Server) SetCpuProfile(cpuprofile string) { this.Lock() defer this.Unlock() this.cpuprofile = cpuprofile if this.cpuprofile == "" { return } f, err := os.Create(this.cpuprofile) if err != nil { logging.Errorp("Cannot start cpu profiler", logging.Pair{"error", err}) this.cpuprofile = "" } else { pprof.StartCPUProfile(f) } }
func TestStub(t *testing.T) { logger := NewLogger(os.Stdout, logging.DEBUG, false) logging.SetLogger(logger) logger.Infof("This is a message from %s", "test") logging.Infof("This is a message from %s", "test") logger.Infop("This is a message from ", logging.Pair{"name", "test"}, logging.Pair{"Queue Size", 10}, logging.Pair{"Debug Mode", false}) logging.Infop("This is a message from ", logging.Pair{"name", "test"}) logger.Infom("This is a message from ", logging.Map{"name": "test", "Queue Size": 10, "Debug Mode": false}) logging.Infom("This is a message from ", logging.Map{"name": "test"}) logger.Requestf(logging.WARN, "This is a Request from %s", "test") logging.Requestf(logging.INFO, "This is a Request from %s", "test") logger.Requestp(logging.DEBUG, "This is a Request from ", logging.Pair{"name", "test"}) logging.Requestp(logging.ERROR, "This is a Request from ", logging.Pair{"name", "test"}) logger.SetLevel(logging.WARN) fmt.Printf("Log level is %s\n", logger.Level()) logger.Requestf(logging.WARN, "This is a Request from %s", "test") logging.Requestf(logging.INFO, "This is a Request from %s", "test") logger.Requestp(logging.DEBUG, "This is a Request from ", logging.Pair{"name", "test"}) logging.Requestp(logging.ERROR, "This is a Request from ", logging.Pair{"name", "test"}) logger.Warnf("This is a message from %s", "test") logging.Infof("This is a message from %s", "test") logger.Debugp("This is a message from ", logging.Pair{"name", "test"}) logging.Errorp("This is a message from ", logging.Pair{"name", "test"}) fmt.Printf("Changing to json formatter\n") logger.entryFormatter = &jsonFormatter{} logger.SetLevel(logging.DEBUG) logger.Infof("This is a message from %s", "test") logging.Infof("This is a message from %s", "test") logger.Infop("This is a message from ", logging.Pair{"name", "test"}, logging.Pair{"Queue Size", 10}, logging.Pair{"Debug Mode", false}) logging.Infop("This is a message from ", logging.Pair{"name", "test"}) logger.Infom("This is a message from ", logging.Map{"name": "test", "Queue Size": 10, "Debug Mode": false}) logging.Infom("This is a message from ", logging.Map{"name": "test"}) logger.Requestf(logging.WARN, "This is a Request from %s", "test") logging.Requestf(logging.INFO, "This is a Request from %s", "test") logger.Requestp(logging.DEBUG, "This is a Request from ", logging.Pair{"name", "test"}) logging.Requestp(logging.ERROR, "This is a Request from ", logging.Pair{"name", "test"}) }
func pollStdin() { reader := bufio.NewReader(os.Stdin) logging.Infop("pollEOF: About to start stdin polling") for { ch, err := reader.ReadByte() if err == io.EOF { logging.Infop("Received EOF; Exiting...") os.Exit(0) } if err != nil { logging.Errorp("Unexpected error polling stdin", logging.Pair{"error", err}) os.Exit(1) } if ch == '\n' || ch == '\r' { logging.Infop("Received EOL; Exiting...") // TODO: "graceful" shutdown should be placed here os.Exit(0) } } }
func (this *PrimaryScan) newIndexConnection(context *Context) *datastore.IndexConnection { var conn *datastore.IndexConnection // Use keyspace count to create a sized index connection keyspace := this.plan.Keyspace() size, err := keyspace.Count() if err == nil { if size <= 0 { size = 1 } conn, err = datastore.NewSizedIndexConnection(size, context) conn.SetPrimary() } // Use non-sized API and log error if err != nil { conn = datastore.NewIndexConnection(context) conn.SetPrimary() logging.Errorp("PrimaryScan.newIndexConnection ", logging.Pair{"error", err}) } return conn }
func main() { HideConsole(true) defer HideConsole(false) // useful for getting list of go-routines go go_http.ListenAndServe("localhost:6060", nil) flag.Parse() if *LOGGER != "" { logger, _ := log_resolver.NewLogger(*LOGGER) if logger == nil { fmt.Printf("Invalid logger: %s\n", *LOGGER) os.Exit(1) } logging.SetLogger(logger) } if *DEBUG { logging.SetLevel(logging.DEBUG) } else { level := logging.INFO if *LOG_LEVEL != "" { lvl, ok := logging.ParseLevel(*LOG_LEVEL) if ok { level = lvl } } logging.SetLevel(level) } datastore, err := resolver.NewDatastore(*DATASTORE) if err != nil { logging.Errorp(err.Error()) os.Exit(1) } datastore_package.SetDatastore(datastore) configstore, err := config_resolver.NewConfigstore(*CONFIGSTORE) if err != nil { logging.Errorp("Could not connect to configstore", logging.Pair{"error", err}, ) } acctstore, err := acct_resolver.NewAcctstore(*ACCTSTORE) if err != nil { logging.Errorp("Could not connect to acctstore", logging.Pair{"error", err}, ) } else { // Create the metrics we are interested in accounting.RegisterMetrics(acctstore) // Make metrics available acctstore.MetricReporter().Start(1, 1) } channel := make(server.RequestChannel, *REQUEST_CAP) plusChannel := make(server.RequestChannel, *REQUEST_CAP) sys, err := system.NewDatastore(datastore) if err != nil { logging.Errorp(err.Error()) os.Exit(1) } server, err := server.NewServer(datastore, sys, configstore, acctstore, *NAMESPACE, *READONLY, channel, plusChannel, *SERVICERS, *PLUS_SERVICERS, *MAX_PARALLELISM, *TIMEOUT, *SIGNATURE, *METRICS, *ENTERPRISE) if err != nil { logging.Errorp(err.Error()) os.Exit(1) } datastore_package.SetSystemstore(server.Systemstore()) server.SetCpuProfile(*CPU_PROFILE) server.SetKeepAlive(*KEEP_ALIVE_LENGTH) server.SetMemProfile(*MEM_PROFILE) server.SetPipelineCap(*PIPELINE_CAP) server.SetPipelineBatch(*PIPELINE_BATCH) server.SetRequestSizeCap(*REQUEST_SIZE_CAP) server.SetScanCap(*SCAN_CAP) if server.Enterprise() && os.Getenv("GOMAXPROCS") == "" { runtime.GOMAXPROCS(runtime.NumCPU()) } if !server.Enterprise() { var numCPU int if os.Getenv("GOMAXPROCS") == "" { numCPU = runtime.NumCPU() } else { numCPU = runtime.GOMAXPROCS(0) } // Use at most 4 cpus in non-enterprise mode runtime.GOMAXPROCS(util.MinInt(numCPU, 4)) } go server.Serve() go server.PlusServe() logging.Infop("cbq-engine started", logging.Pair{"version", util.VERSION}, logging.Pair{"datastore", *DATASTORE}, logging.Pair{"max-concurrency", runtime.GOMAXPROCS(0)}, logging.Pair{"loglevel", logging.LogLevel().String()}, logging.Pair{"servicers", server.Servicers()}, logging.Pair{"plus-servicers", server.PlusServicers()}, logging.Pair{"pipeline-cap", server.PipelineCap()}, logging.Pair{"pipeline-batch", *PIPELINE_BATCH}, logging.Pair{"request-cap", *REQUEST_CAP}, logging.Pair{"request-size-cap", server.RequestSizeCap()}, logging.Pair{"timeout", server.Timeout()}, ) // Create http endpoint endpoint := http.NewServiceEndpoint(server, *STATIC_PATH, *METRICS, *HTTP_ADDR, *HTTPS_ADDR, *CERT_FILE, *KEY_FILE) er := endpoint.Listen() if er != nil { logging.Errorp("cbq-engine exiting with error", logging.Pair{"error", er}, logging.Pair{"HTTP_ADDR", *HTTP_ADDR}, ) os.Exit(1) } if server.Enterprise() && *CERT_FILE != "" && *KEY_FILE != "" { er := endpoint.ListenTLS() if er != nil { logging.Errorp("cbq-engine exiting with error", logging.Pair{"error", er}, logging.Pair{"HTTPS_ADDR", *HTTPS_ADDR}, ) os.Exit(1) } } signalCatcher(server, endpoint) }