func init() { logger, _ := log_resolver.NewLogger("golog") if logger == nil { fmt.Printf("Unable to create logger") os.Exit(1) } logging.SetLogger(logger) test_server = newTestServer() }
func NewLogger(uri string) (logging.Logger, errors.Error) { var logger logging.Logger if strings.HasPrefix(uri, "golog") { logger = logger_golog.NewLogger(os.Stderr, logging.INFO, false) logging.SetLogger(logger) return logger, nil } return nil, errors.NewAdminInvalidURL("Logger", uri) }
func init() { logger, _ := log_resolver.NewLogger("golog") if logger == nil { fmt.Printf("Unable to create logger") os.Exit(1) } logging.SetLogger(logger) query_server = makeMockServer() test_server = httptest.NewServer(testHandler()) }
func TestStub(t *testing.T) { logger := NewLogger(os.Stdout, logging.DEBUG, false) logging.SetLogger(logger) logger.Infof("This is a message from %s", "test") logging.Infof("This is a message from %s", "test") logger.Infop("This is a message from ", logging.Pair{"name", "test"}, logging.Pair{"Queue Size", 10}, logging.Pair{"Debug Mode", false}) logging.Infop("This is a message from ", logging.Pair{"name", "test"}) logger.Infom("This is a message from ", logging.Map{"name": "test", "Queue Size": 10, "Debug Mode": false}) logging.Infom("This is a message from ", logging.Map{"name": "test"}) logger.Requestf(logging.WARN, "This is a Request from %s", "test") logging.Requestf(logging.INFO, "This is a Request from %s", "test") logger.Requestp(logging.DEBUG, "This is a Request from ", logging.Pair{"name", "test"}) logging.Requestp(logging.ERROR, "This is a Request from ", logging.Pair{"name", "test"}) logger.SetLevel(logging.WARN) fmt.Printf("Log level is %s\n", logger.Level()) logger.Requestf(logging.WARN, "This is a Request from %s", "test") logging.Requestf(logging.INFO, "This is a Request from %s", "test") logger.Requestp(logging.DEBUG, "This is a Request from ", logging.Pair{"name", "test"}) logging.Requestp(logging.ERROR, "This is a Request from ", logging.Pair{"name", "test"}) logger.Warnf("This is a message from %s", "test") logging.Infof("This is a message from %s", "test") logger.Debugp("This is a message from ", logging.Pair{"name", "test"}) logging.Errorp("This is a message from ", logging.Pair{"name", "test"}) fmt.Printf("Changing to json formatter\n") logger.entryFormatter = &jsonFormatter{} logger.SetLevel(logging.DEBUG) logger.Infof("This is a message from %s", "test") logging.Infof("This is a message from %s", "test") logger.Infop("This is a message from ", logging.Pair{"name", "test"}, logging.Pair{"Queue Size", 10}, logging.Pair{"Debug Mode", false}) logging.Infop("This is a message from ", logging.Pair{"name", "test"}) logger.Infom("This is a message from ", logging.Map{"name": "test", "Queue Size": 10, "Debug Mode": false}) logging.Infom("This is a message from ", logging.Map{"name": "test"}) logger.Requestf(logging.WARN, "This is a Request from %s", "test") logging.Requestf(logging.INFO, "This is a Request from %s", "test") logger.Requestp(logging.DEBUG, "This is a Request from ", logging.Pair{"name", "test"}) logging.Requestp(logging.ERROR, "This is a Request from ", logging.Pair{"name", "test"}) }
func init() { logger, _ := log_resolver.NewLogger("golog") logging.SetLogger(logger) runtime.GOMAXPROCS(1) }
func TestServer(t *testing.T) { logger, _ := log_resolver.NewLogger("golog") if logger == nil { t.Fatalf("Invalid logger") } logging.SetLogger(logger) site, err := NewDatastore(TEST_URL) if err != nil { t.Skipf("SKIPPING TEST: %v", err) } namespaceNames, err := site.NamespaceNames() if err != nil { t.Fatalf("Failed to get Namespace names . error %v", err) } fmt.Printf("Namespaces in this instance %v", namespaceNames) namespace, err := site.NamespaceByName("default") if err != nil { t.Fatalf("Namespace default not found, error %v", err) } keyspaceNames, err := namespace.KeyspaceNames() if err != nil { t.Fatalf(" Cannot fetch keyspaces names. error %v", err) } fmt.Printf("Keyspaces in this namespace %v", keyspaceNames) //connect to beer-sample ks, err := namespace.KeyspaceByName("beer-sample") if err != nil { t.Fatalf(" Cannot connect to beer-sample. Error %v", err) return } indexer, err := ks.Indexer(datastore.VIEW) if err != nil { fmt.Printf("No indexers found") return } // try create a primary index index, err := indexer.CreatePrimaryIndex("", "#primary", nil) if err != nil { // keep going. maybe index already exists fmt.Printf(" Cannot create a primary index on bucket. Error %v", err) } else { fmt.Printf("primary index created %v", index) } pair, errs := ks.Fetch([]string{"357", "aass_brewery"}) if errs != nil { t.Fatalf(" Cannot fetch keys errors %v", errs) } fmt.Printf("Keys fetched %v", pair) insertKey := datastore.Pair{Key: "testBeerKey", Value: value.NewValue(("This is a random test key-value"))} _, err = ks.Insert([]datastore.Pair{insertKey}) if err != nil { t.Fatalf("Cannot insert key %v", insertKey) } deleted, err := ks.Delete([]string{insertKey.Key}) if err != nil || (len(deleted) != 1 && deleted[0] != insertKey.Key) { t.Fatalf("Failed to delete %v", err) } pi, err := indexer.PrimaryIndexes() if err != nil || len(pi) < 1 { fmt.Printf("No primary index found") return } //fmt.Printf(" got primary index %s", pi.name) conn := datastore.NewIndexConnection(nil) go pi[0].ScanEntries("", math.MaxInt64, datastore.UNBOUNDED, nil, conn) var entry *datastore.IndexEntry ok := true for ok { select { case entry, ok = <-conn.EntryChannel(): if ok { fmt.Printf("\n primary key %v", entry.PrimaryKey) } } } }
func init() { logger, _ := log_resolver.NewLogger("golog") logging.SetLogger(logger) }
func init() { logger := logger_golog.NewLogger(os.Stderr, logging.INFO, false) logging.SetLogger(logger) }
func main() { HideConsole(true) defer HideConsole(false) // useful for getting list of go-routines go go_http.ListenAndServe("localhost:6060", nil) flag.Parse() if *LOGGER != "" { logger, _ := log_resolver.NewLogger(*LOGGER) if logger == nil { fmt.Printf("Invalid logger: %s\n", *LOGGER) os.Exit(1) } logging.SetLogger(logger) } if *DEBUG { logging.SetLevel(logging.DEBUG) } else { level := logging.INFO if *LOG_LEVEL != "" { lvl, ok := logging.ParseLevel(*LOG_LEVEL) if ok { level = lvl } } logging.SetLevel(level) } datastore, err := resolver.NewDatastore(*DATASTORE) if err != nil { logging.Errorp(err.Error()) os.Exit(1) } datastore_package.SetDatastore(datastore) configstore, err := config_resolver.NewConfigstore(*CONFIGSTORE) if err != nil { logging.Errorp("Could not connect to configstore", logging.Pair{"error", err}, ) } acctstore, err := acct_resolver.NewAcctstore(*ACCTSTORE) if err != nil { logging.Errorp("Could not connect to acctstore", logging.Pair{"error", err}, ) } else { // Create the metrics we are interested in accounting.RegisterMetrics(acctstore) // Make metrics available acctstore.MetricReporter().Start(1, 1) } channel := make(server.RequestChannel, *REQUEST_CAP) plusChannel := make(server.RequestChannel, *REQUEST_CAP) sys, err := system.NewDatastore(datastore) if err != nil { logging.Errorp(err.Error()) os.Exit(1) } server, err := server.NewServer(datastore, sys, configstore, acctstore, *NAMESPACE, *READONLY, channel, plusChannel, *SERVICERS, *PLUS_SERVICERS, *MAX_PARALLELISM, *TIMEOUT, *SIGNATURE, *METRICS, *ENTERPRISE) if err != nil { logging.Errorp(err.Error()) os.Exit(1) } datastore_package.SetSystemstore(server.Systemstore()) server.SetCpuProfile(*CPU_PROFILE) server.SetKeepAlive(*KEEP_ALIVE_LENGTH) server.SetMemProfile(*MEM_PROFILE) server.SetPipelineCap(*PIPELINE_CAP) server.SetPipelineBatch(*PIPELINE_BATCH) server.SetRequestSizeCap(*REQUEST_SIZE_CAP) server.SetScanCap(*SCAN_CAP) if server.Enterprise() && os.Getenv("GOMAXPROCS") == "" { runtime.GOMAXPROCS(runtime.NumCPU()) } if !server.Enterprise() { var numCPU int if os.Getenv("GOMAXPROCS") == "" { numCPU = runtime.NumCPU() } else { numCPU = runtime.GOMAXPROCS(0) } // Use at most 4 cpus in non-enterprise mode runtime.GOMAXPROCS(util.MinInt(numCPU, 4)) } go server.Serve() go server.PlusServe() logging.Infop("cbq-engine started", logging.Pair{"version", util.VERSION}, logging.Pair{"datastore", *DATASTORE}, logging.Pair{"max-concurrency", runtime.GOMAXPROCS(0)}, logging.Pair{"loglevel", logging.LogLevel().String()}, logging.Pair{"servicers", server.Servicers()}, logging.Pair{"plus-servicers", server.PlusServicers()}, logging.Pair{"pipeline-cap", server.PipelineCap()}, logging.Pair{"pipeline-batch", *PIPELINE_BATCH}, logging.Pair{"request-cap", *REQUEST_CAP}, logging.Pair{"request-size-cap", server.RequestSizeCap()}, logging.Pair{"timeout", server.Timeout()}, ) // Create http endpoint endpoint := http.NewServiceEndpoint(server, *STATIC_PATH, *METRICS, *HTTP_ADDR, *HTTPS_ADDR, *CERT_FILE, *KEY_FILE) er := endpoint.Listen() if er != nil { logging.Errorp("cbq-engine exiting with error", logging.Pair{"error", er}, logging.Pair{"HTTP_ADDR", *HTTP_ADDR}, ) os.Exit(1) } if server.Enterprise() && *CERT_FILE != "" && *KEY_FILE != "" { er := endpoint.ListenTLS() if er != nil { logging.Errorp("cbq-engine exiting with error", logging.Pair{"error", er}, logging.Pair{"HTTPS_ADDR", *HTTPS_ADDR}, ) os.Exit(1) } } signalCatcher(server, endpoint) }
func FetchDocs(serverURL string, bucketName string) map[string]interface{} { logger, _ := log_resolver.NewLogger("golog") if logger == nil { log.Fatalf("Invalid logger") } logging.SetLogger(logger) site, err := couchbase.NewDatastore(serverURL) if err != nil { log.Fatalf("Cannot create datastore %v", err) } namespace, err := site.NamespaceByName("default") if err != nil { log.Fatalf("Namespace default not found, error %v", err) } ks, err := namespace.KeyspaceByName(bucketName) if err != nil { log.Fatalf(" Cannot connect to %s. Error %v", bucketName, err) } indexer, err := ks.Indexer(datastore.VIEW) if err != nil { log.Fatalf("No view indexer found %v", err) } // try create a primary index index, err := indexer.CreatePrimaryIndex("", "#primary", nil) if err != nil { // keep going. maybe index already exists log.Printf(" Cannot create a primary index on bucket. Error %v", err) pi, err := indexer.PrimaryIndexes() if err != nil || len(pi) < 1 { log.Fatalf("No primary index found") } index = pi[0] } else { log.Printf("primary index created %v", index) } conn := datastore.NewIndexConnection(nil) go index.ScanEntries("", math.MaxInt64, datastore.UNBOUNDED, nil, conn) var entry *datastore.IndexEntry var fetchKeys = make([]string, 0, 1000) ok := true for ok { select { case entry, ok = <-conn.EntryChannel(): if ok { fetchKeys = append(fetchKeys, entry.PrimaryKey) } } } //fetch all the keys pairs, errs := ks.Fetch(fetchKeys) if errs != nil { log.Fatalf(" Failed to fetch keys %v", errs) } var keyMap = make(map[string]interface{}) for _, value := range pairs { keyMap[value.Key] = value.Value.Actual() } log.Printf("Got %v docs", len(keyMap)) return keyMap }