func (e *EventDefinition) UnmarshalJSON(raw []byte) error { //lets start by unmashaling into a basic map datastructure event := make(map[string]interface{}) err := json.Unmarshal(raw, &event) if err != nil { return err } //lets get a list of our required fields. s := reflect.TypeOf(*e) requiredFields := make(map[string]*requiredField) for i := 0; i < s.NumField(); i++ { field := s.Field(i) name := field.Name // look at the field Tags to work out the property named used in the // JSON document. tag := field.Tag.Get("json") if tag != "" && tag != "-" { name = tag } //all fields except 'Extra' and 'Id' are required. if name != "Extra" && name != "id" { requiredFields[name] = &requiredField{ StructName: field.Name, Seen: false, } } } e.Extra = make(map[string]interface{}) for k, v := range event { def, ok := requiredFields[k] // anything that is not a required field gets // stored in our 'Extra' field. if !ok { e.Extra[k] = v } else { //coerce any float64 values to int64 if reflect.ValueOf(v).Type().Name() == "float64" { v = int64(v.(float64)) } value := reflect.ValueOf(v) if value.IsValid() { reflect.ValueOf(e).Elem().FieldByName(def.StructName).Set(value) } else { logger.Warningf("Yikes, in eventdef %s had the zero value! %v", k, v) } def.Seen = true } } //make sure all required fields were present. for _, v := range requiredFields { if !v.Seen { return fmt.Errorf("Required field '%s' missing", v.StructName) } } return nil }
// LatestConstrained returns the latest version of a cookbook that matches the // given constraint. If no constraint is given, returns the latest version. func (c *Cookbook) LatestConstrained(constraint string) *CookbookVersion { if constraint == "" { return c.LatestVersion() } var constraintVersion string var constraintOp string traints := strings.Split(constraint, " ") if len(traints) == 2 { constraintVersion = traints[1] constraintOp = traints[0] } else { logger.Warningf("Constraint '%s' for cookbook %s (in LatestConstrained) was malformed. Bailing.\n", constraint, c.Name) return nil } for _, cv := range c.sortedVersions() { action := verConstraintCheck(cv.Version, constraintVersion, constraintOp) /* We only want the latest that works. */ if action == "ok" { return cv } } /* if nothing satisfied the constraint, we have to return nil */ return nil }
func (c *Cookbook) infoHashBase(numResults interface{}, constraint string) map[string]interface{} { cbHash := make(map[string]interface{}) cbHash["url"] = util.ObjURL(c) nr := 0 /* Working to maintain Chef server behavior here. We need to make "all" * give all versions of the cookbook and make no value give one version, * but keep 0 as invalid input that gives zero results back. This might * be an area worth breaking. */ var numVersions int allVersions := false if numResults != "" && numResults != "all" { numVersions, _ = strconv.Atoi(numResults.(string)) } else if numResults == "" { numVersions = 1 } else { allVersions = true } cbHash["versions"] = make([]interface{}, 0) var constraintVersion string var constraintOp string if constraint != "" { traints := strings.Split(constraint, " ") /* If the constraint isn't well formed like ">= 1.2.3", log the * fact and ignore the constraint. */ if len(traints) == 2 { constraintVersion = traints[1] constraintOp = traints[0] } else { logger.Warningf("Constraint '%s' for cookbook %s was badly formed -- bailing.\n", constraint, c.Name) return nil } } VerLoop: for _, cv := range c.sortedVersions() { if !allVersions && nr >= numVersions { break } /* Version constraint checking. */ if constraint != "" { conAction := verConstraintCheck(cv.Version, constraintVersion, constraintOp) switch conAction { case "skip": /* Skip this version, keep going. */ continue VerLoop case "break": /* Stop processing entirely. */ break VerLoop /* Default action is, of course, to continue on * like nothing happened. Later, we need to * panic over an invalid constraint. */ } } cvInfo := make(map[string]string) cvInfo["url"] = util.CustomObjURL(c, cv.Version) cvInfo["version"] = cv.Version cbHash["versions"] = append(cbHash["versions"].([]interface{}), cvInfo) nr++ } return cbHash }
func (m *MetricDefinition) UnmarshalJSON(raw []byte) error { //lets start by unmashaling into a basic map datastructure metric := make(map[string]interface{}) err := json.Unmarshal(raw, &metric) if err != nil { return err } //lets get a list of our required fields. s := reflect.TypeOf(*m) requiredFields := make(map[string]*requiredField) for i := 0; i < s.NumField(); i++ { field := s.Field(i) if field.PkgPath != "" { continue } name := field.Name // look at the field Tags to work out the property named used in the // JSON document. tag := field.Tag.Get("json") if tag != "" && tag != "-" { name = tag } //all fields except 'Extra', 'Id', "KeepAlives", and "state" // are required. if name != "Extra" && name != "id" && name != "keepAlives" && name != "state" { requiredFields[name] = &requiredField{ StructName: field.Name, Seen: false, } } } m.Extra = make(map[string]interface{}) for k, v := range metric { def, ok := requiredFields[k] // anything that is not a required field gets // stored in our 'Extra' field. if !ok { m.Extra[k] = v } else { switch reflect.ValueOf(m).Elem().FieldByName(def.StructName).Kind() { case reflect.Int: v = int(v.(float64)) case reflect.Int8: v = int8(v.(float64)) case reflect.Int64: v = int64(v.(float64)) case reflect.Struct: y := v.(map[string]interface{}) v = struct { WarnMin interface{} `json:"warnMin"` WarnMax interface{} `json:"warnMax"` CritMin interface{} `json:"critMin"` CritMax interface{} `json:"critMax"` }{ y["warnMin"], y["warnMax"], y["critMix"], y["critMax"], } } value := reflect.ValueOf(v) if value.IsValid() { reflect.ValueOf(m).Elem().FieldByName(def.StructName).Set(value) } else { logger.Warningf("Yikes, in metricdef %s had the zero value! %v", k, v) } def.Seen = true } } //make sure all required fields were present. for _, v := range requiredFields { if !v.Seen && !(v.StructName == "State" || v.StructName == "KeepAlives") { return fmt.Errorf("Required field '%s' missing", v.StructName) } } return nil }
// ParseConfigOptions reads and applies arguments from the command line and the // configuration file, merging them together as needed, with command line options // taking precedence over options in the config file. func ParseConfigOptions() error { var opts = &Options{} _, err := flags.Parse(opts) if err != nil { if err.(*flags.Error).Type == flags.ErrHelp { os.Exit(0) } else { log.Println(err) os.Exit(1) } } if opts.Version { fmt.Printf("goiardi version %s (aiming for compatibility with Chef Server version %s).\n", Version, ChefVersion) os.Exit(0) } /* Load the config file. Command-line options have precedence over * config file options. */ if opts.ConfFile != "" { if _, err := toml.DecodeFile(opts.ConfFile, Config); err != nil { log.Println(err) os.Exit(1) } Config.ConfFile = opts.ConfFile Config.FreezeData = false } if opts.Export != "" && opts.Import != "" { log.Println("Cannot use -x/--export and -m/--import flags together.") os.Exit(1) } if opts.Export != "" { Config.DoExport = true Config.ImpExFile = opts.Export } else if opts.Import != "" { Config.DoImport = true Config.ImpExFile = opts.Import } if opts.Hostname != "" { Config.Hostname = opts.Hostname } else { if Config.Hostname == "" { Config.Hostname, err = os.Hostname() if err != nil { log.Println(err) Config.Hostname = "localhost" } } } if opts.DataStoreFile != "" { Config.DataStoreFile = opts.DataStoreFile } if opts.IndexFile != "" { Config.IndexFile = opts.IndexFile } // Use MySQL? if opts.UseMySQL { Config.UseMySQL = opts.UseMySQL } // Use Postgres? if opts.UsePostgreSQL { Config.UsePostgreSQL = opts.UsePostgreSQL } if Config.UseMySQL && Config.UsePostgreSQL { err := fmt.Errorf("The MySQL and Postgres options cannot be used together.") log.Println(err) os.Exit(1) } if Config.DataStoreFile != "" && (Config.UseMySQL || Config.UsePostgreSQL) { err := fmt.Errorf("The MySQL or Postgres and data store options may not be specified together.") log.Println(err) os.Exit(1) } if !((Config.DataStoreFile == "" && Config.IndexFile == "") || ((Config.DataStoreFile != "" || (Config.UseMySQL || Config.UsePostgreSQL)) && Config.IndexFile != "")) { err := fmt.Errorf("-i and -D must either both be specified, or not specified") log.Println(err) os.Exit(1) } if (Config.UseMySQL || Config.UsePostgreSQL) && Config.IndexFile == "" { err := fmt.Errorf("An index file must be specified with -i or --index-file (or the 'index-file' config file option) when running with a MySQL or PostgreSQL backend.") log.Println(err) os.Exit(1) } if Config.IndexFile != "" && (Config.DataStoreFile != "" || (Config.UseMySQL || Config.UsePostgreSQL)) { Config.FreezeData = true } if opts.LogFile != "" { Config.LogFile = opts.LogFile } if opts.SysLog { Config.SysLog = opts.SysLog } if Config.LogFile != "" { lfp, lerr := os.Create(Config.LogFile) if lerr != nil { log.Println(err) os.Exit(1) } log.SetOutput(lfp) } if dlev := len(opts.Verbose); dlev != 0 { Config.DebugLevel = dlev } if Config.LogLevel != "" { if lev, ok := LogLevelNames[strings.ToLower(Config.LogLevel)]; ok && Config.DebugLevel == 0 { Config.DebugLevel = lev } } if Config.DebugLevel > 4 { Config.DebugLevel = 4 } Config.DebugLevel = int(logger.LevelCritical) - Config.DebugLevel logger.SetLevel(logger.LogLevel(Config.DebugLevel)) debugLevel := map[int]string{0: "debug", 1: "info", 2: "warning", 3: "error", 4: "critical"} log.Printf("Logging at %s level", debugLevel[Config.DebugLevel]) if Config.SysLog { sl, err := logger.NewSysLogger("goiardi") if err != nil { log.Println(err.Error()) os.Exit(1) } logger.SetLogger(sl) } else { logger.SetLogger(logger.NewGoLogger()) } /* Database options */ // Don't bother setting a default mysql port if mysql isn't used if Config.UseMySQL { if Config.MySQL.Port == "" { Config.MySQL.Port = "3306" } } // set default Postgres options if Config.UsePostgreSQL { if Config.PostgreSQL.Port == "" { Config.PostgreSQL.Port = "5432" } } if opts.LocalFstoreDir != "" { Config.LocalFstoreDir = opts.LocalFstoreDir } if Config.LocalFstoreDir == "" && (Config.UseMySQL || Config.UsePostgreSQL) { logger.Criticalf("local-filestore-dir must be set when running goiardi in SQL mode") os.Exit(1) } if !Config.FreezeData && (opts.FreezeInterval != 0 || Config.FreezeInterval != 0) { logger.Warningf("FYI, setting the freeze data interval's not especially useful without setting the index and data files.") } if opts.FreezeInterval != 0 { Config.FreezeInterval = opts.FreezeInterval } if Config.FreezeInterval == 0 { Config.FreezeInterval = 300 } /* Root directory for certs and the like */ if opts.ConfRoot != "" { Config.ConfRoot = opts.ConfRoot } if Config.ConfRoot == "" { if Config.ConfFile != "" { Config.ConfRoot = path.Dir(Config.ConfFile) } else { Config.ConfRoot = "." } } Config.Ipaddress = opts.Ipaddress if opts.Port != 0 { Config.Port = opts.Port } if Config.Port == 0 { Config.Port = 4545 } if opts.UseSSL { Config.UseSSL = opts.UseSSL } if opts.SSLCert != "" { Config.SSLCert = opts.SSLCert } if opts.SSLKey != "" { Config.SSLKey = opts.SSLKey } if opts.HTTPSUrls { Config.HTTPSUrls = opts.HTTPSUrls } // SSL setup if Config.Port == 80 { Config.UseSSL = false } else if Config.Port == 443 { Config.UseSSL = true } if Config.UseSSL { if Config.SSLCert == "" || Config.SSLKey == "" { logger.Criticalf("SSL mode requires specifying both a certificate and a key file.") os.Exit(1) } /* If the SSL cert and key are not absolute files, join them * with the conf root */ if !path.IsAbs(Config.SSLCert) { Config.SSLCert = path.Join(Config.ConfRoot, Config.SSLCert) } if !path.IsAbs(Config.SSLKey) { Config.SSLKey = path.Join(Config.ConfRoot, Config.SSLKey) } } if opts.TimeSlew != "" { Config.TimeSlew = opts.TimeSlew } if Config.TimeSlew != "" { d, derr := time.ParseDuration(Config.TimeSlew) if derr != nil { logger.Criticalf("Error parsing time-slew: %s", derr.Error()) os.Exit(1) } Config.TimeSlewDur = d } else { Config.TimeSlewDur, _ = time.ParseDuration("15m") } if opts.UseAuth { Config.UseAuth = opts.UseAuth } if opts.DisableWebUI { Config.DisableWebUI = opts.DisableWebUI } if opts.LogEvents { Config.LogEvents = opts.LogEvents } if opts.LogEventKeep != 0 { Config.LogEventKeep = opts.LogEventKeep } // Set max sizes for objects and json requests. if opts.ObjMaxSize != 0 { Config.ObjMaxSize = opts.ObjMaxSize } if opts.JSONReqMaxSize != 0 { Config.JSONReqMaxSize = opts.JSONReqMaxSize } if Config.ObjMaxSize == 0 { Config.ObjMaxSize = 10485760 } if Config.JSONReqMaxSize == 0 { Config.JSONReqMaxSize = 1000000 } if opts.UseUnsafeMemStore { Config.UseUnsafeMemStore = opts.UseUnsafeMemStore } return nil }
func (h *interceptHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { /* knife sometimes sends URL paths that start with //. Redirecting * worked for GETs, but since it was breaking POSTs and screwing with * GETs with query params, we just clean up the path and move on. */ /* log the URL */ // TODO: set this to verbosity level 4 or so logger.Debugf("Serving %s -- %s\n", r.URL.Path, r.Method) if r.Method != "CONNECT" { if p := cleanPath(r.URL.Path); p != r.URL.Path { r.URL.Path = p } } /* Make configurable, I guess, but Chef wants it to be 1000000 */ if !strings.HasPrefix(r.URL.Path, "/file_store") && r.ContentLength > config.Config.JSONReqMaxSize { http.Error(w, "Content-length too long!", http.StatusRequestEntityTooLarge) return } else if r.ContentLength > config.Config.ObjMaxSize { http.Error(w, "Content-length waaaaaay too long!", http.StatusRequestEntityTooLarge) return } w.Header().Set("X-Goiardi", "yes") w.Header().Set("X-Goiardi-Version", config.Version) w.Header().Set("X-Chef-Version", config.ChefVersion) apiInfo := fmt.Sprintf("flavor=osc;version:%s;goiardi=%s", config.ChefVersion, config.Version) w.Header().Set("X-Ops-API-Info", apiInfo) userID := r.Header.Get("X-OPS-USERID") if rs := r.Header.Get("X-Ops-Request-Source"); rs == "web" { /* If use-auth is on and disable-webui is on, and this is a * webui connection, it needs to fail. */ if config.Config.DisableWebUI { w.Header().Set("Content-Type", "application/json") logger.Warningf("Attempting to log in through webui, but webui is disabled") jsonErrorReport(w, r, "invalid action", http.StatusUnauthorized) return } /* Check that the user in question with the web request exists. * If not, fail. */ if _, uherr := actor.GetReqUser(userID); uherr != nil { w.Header().Set("Content-Type", "application/json") logger.Warningf("Attempting to use invalid user %s through X-Ops-Request-Source = web", userID) jsonErrorReport(w, r, "invalid action", http.StatusUnauthorized) return } userID = "chef-webui" } /* Only perform the authorization check if that's configured. Bomb with * an error if the check of the headers, timestamps, etc. fails. */ /* No clue why /principals doesn't require authorization. Hrmph. */ if config.Config.UseAuth && !strings.HasPrefix(r.URL.Path, "/file_store") && !(strings.HasPrefix(r.URL.Path, "/principals") && r.Method == "GET") { herr := authentication.CheckHeader(userID, r) if herr != nil { w.Header().Set("Content-Type", "application/json") logger.Errorf("Authorization failure: %s\n", herr.Error()) //http.Error(w, herr.Error(), herr.Status()) jsonErrorReport(w, r, herr.Error(), herr.Status()) return } } // Experimental: decompress gzipped requests if r.Header.Get("Content-Encoding") == "gzip" { reader, err := gzip.NewReader(r.Body) if err != nil { w.Header().Set("Content-Type", "application/json") logger.Errorf("Failure decompressing gzipped request body: %s\n", err.Error()) jsonErrorReport(w, r, err.Error(), http.StatusBadRequest) return } r.Body = reader } http.DefaultServeMux.ServeHTTP(w, r) }
// ParseConfigOptions reads and applies arguments from the command line and the // configuration file, merging them together as needed, with command line options // taking precedence over options in the config file. func ParseConfigOptions() error { var opts = &Options{} _, err := flags.Parse(opts) if err != nil { if err.(*flags.Error).Type == flags.ErrHelp { os.Exit(0) } else { log.Println(err) os.Exit(1) } } if opts.Version { fmt.Printf("goiardi version %s (aiming for compatibility with Chef Server version %s).\n", Version, ChefVersion) os.Exit(0) } /* Load the config file. Command-line options have precedence over * config file options. */ if opts.ConfFile != "" { if _, err := toml.DecodeFile(opts.ConfFile, Config); err != nil { log.Println(err) os.Exit(1) } Config.ConfFile = opts.ConfFile Config.FreezeData = false } if opts.Export != "" && opts.Import != "" { log.Println("Cannot use -x/--export and -m/--import flags together.") os.Exit(1) } if opts.Export != "" { Config.DoExport = true Config.ImpExFile = opts.Export } else if opts.Import != "" { Config.DoImport = true Config.ImpExFile = opts.Import } if opts.Hostname != "" { Config.Hostname = opts.Hostname } else { if Config.Hostname == "" { Config.Hostname, err = os.Hostname() if err != nil { log.Println(err) Config.Hostname = "localhost" } } } if opts.DataStoreFile != "" { Config.DataStoreFile = opts.DataStoreFile } if opts.IndexFile != "" { Config.IndexFile = opts.IndexFile } // Use MySQL? if opts.UseMySQL { Config.UseMySQL = opts.UseMySQL } // Use Postgres? if opts.UsePostgreSQL { Config.UsePostgreSQL = opts.UsePostgreSQL } if Config.UseMySQL && Config.UsePostgreSQL { err := fmt.Errorf("The MySQL and Postgres options cannot be used together.") log.Println(err) os.Exit(1) } // Use Postgres search? if opts.PgSearch { // make sure postgres is enabled if !Config.UsePostgreSQL { err := fmt.Errorf("--pg-search requires --use-postgresql (which makes sense, really).") log.Println(err) os.Exit(1) } Config.PgSearch = opts.PgSearch } if Config.DataStoreFile != "" && (Config.UseMySQL || Config.UsePostgreSQL) { err := fmt.Errorf("The MySQL or Postgres and data store options may not be specified together.") log.Println(err) os.Exit(1) } if !((Config.DataStoreFile == "" && Config.IndexFile == "") || ((Config.DataStoreFile != "" || (Config.UseMySQL || Config.UsePostgreSQL)) && Config.IndexFile != "")) { err := fmt.Errorf("-i and -D must either both be specified, or not specified") log.Println(err) os.Exit(1) } if (Config.UseMySQL || Config.UsePostgreSQL) && (Config.IndexFile == "" && !Config.PgSearch) { err := fmt.Errorf("An index file must be specified with -i or --index-file (or the 'index-file' config file option) when running with a MySQL or PostgreSQL backend.") log.Println(err) os.Exit(1) } if Config.IndexFile != "" && (Config.DataStoreFile != "" || (Config.UseMySQL || Config.UsePostgreSQL)) { Config.FreezeData = true } if opts.LogFile != "" { Config.LogFile = opts.LogFile } if opts.SysLog { Config.SysLog = opts.SysLog } if Config.LogFile != "" { lfp, lerr := os.Create(Config.LogFile) if lerr != nil { log.Println(err) os.Exit(1) } log.SetOutput(lfp) } if dlev := len(opts.Verbose); dlev != 0 { Config.DebugLevel = dlev } if Config.LogLevel != "" { if lev, ok := LogLevelNames[strings.ToLower(Config.LogLevel)]; ok && Config.DebugLevel == 0 { Config.DebugLevel = lev } } if Config.DebugLevel > 4 { Config.DebugLevel = 4 } Config.DebugLevel = int(logger.LevelCritical) - Config.DebugLevel logger.SetLevel(logger.LogLevel(Config.DebugLevel)) debugLevel := map[int]string{0: "debug", 1: "info", 2: "warning", 3: "error", 4: "critical"} log.Printf("Logging at %s level", debugLevel[Config.DebugLevel]) if Config.SysLog { sl, err := logger.NewSysLogger("goiardi") if err != nil { log.Println(err.Error()) os.Exit(1) } logger.SetLogger(sl) } else { logger.SetLogger(logger.NewGoLogger()) } /* Database options */ // Don't bother setting a default mysql port if mysql isn't used if Config.UseMySQL { if Config.MySQL.Port == "" { Config.MySQL.Port = "3306" } } // set default Postgres options if Config.UsePostgreSQL { if Config.PostgreSQL.Port == "" { Config.PostgreSQL.Port = "5432" } } if opts.LocalFstoreDir != "" { Config.LocalFstoreDir = opts.LocalFstoreDir } if Config.LocalFstoreDir == "" && (Config.UseMySQL || Config.UsePostgreSQL) { logger.Criticalf("local-filestore-dir must be set when running goiardi in SQL mode") os.Exit(1) } if Config.LocalFstoreDir != "" { finfo, ferr := os.Stat(Config.LocalFstoreDir) if ferr != nil { logger.Criticalf("Error checking local filestore dir: %s", ferr.Error()) os.Exit(1) } if !finfo.IsDir() { logger.Criticalf("Local filestore dir %s is not a directory", Config.LocalFstoreDir) os.Exit(1) } } if !Config.FreezeData && (opts.FreezeInterval != 0 || Config.FreezeInterval != 0) { logger.Warningf("FYI, setting the freeze data interval's not especially useful without setting the index and data files.") } if opts.FreezeInterval != 0 { Config.FreezeInterval = opts.FreezeInterval } if Config.FreezeInterval == 0 { Config.FreezeInterval = 10 } /* Root directory for certs and the like */ if opts.ConfRoot != "" { Config.ConfRoot = opts.ConfRoot } if Config.ConfRoot == "" { if Config.ConfFile != "" { Config.ConfRoot = path.Dir(Config.ConfFile) } else { Config.ConfRoot = "." } } Config.Ipaddress = opts.Ipaddress if Config.Ipaddress != "" { ip := net.ParseIP(Config.Ipaddress) if ip == nil { logger.Criticalf("IP address '%s' is not valid", Config.Ipaddress) os.Exit(1) } } if opts.Port != 0 { Config.Port = opts.Port } if Config.Port == 0 { Config.Port = 4545 } if opts.UseSSL { Config.UseSSL = opts.UseSSL } if opts.SSLCert != "" { Config.SSLCert = opts.SSLCert } if opts.SSLKey != "" { Config.SSLKey = opts.SSLKey } if opts.HTTPSUrls { Config.HTTPSUrls = opts.HTTPSUrls } // SSL setup if Config.Port == 80 { Config.UseSSL = false } else if Config.Port == 443 { Config.UseSSL = true } if Config.UseSSL { if Config.SSLCert == "" || Config.SSLKey == "" { logger.Criticalf("SSL mode requires specifying both a certificate and a key file.") os.Exit(1) } /* If the SSL cert and key are not absolute files, join them * with the conf root */ if !path.IsAbs(Config.SSLCert) { Config.SSLCert = path.Join(Config.ConfRoot, Config.SSLCert) } if !path.IsAbs(Config.SSLKey) { Config.SSLKey = path.Join(Config.ConfRoot, Config.SSLKey) } } if opts.TimeSlew != "" { Config.TimeSlew = opts.TimeSlew } if Config.TimeSlew != "" { d, derr := time.ParseDuration(Config.TimeSlew) if derr != nil { logger.Criticalf("Error parsing time-slew: %s", derr.Error()) os.Exit(1) } Config.TimeSlewDur = d } else { Config.TimeSlewDur, _ = time.ParseDuration("15m") } if opts.UseAuth { Config.UseAuth = opts.UseAuth } if opts.DisableWebUI { Config.DisableWebUI = opts.DisableWebUI } if opts.LogEvents { Config.LogEvents = opts.LogEvents } if opts.LogEventKeep != 0 { Config.LogEventKeep = opts.LogEventKeep } // Set max sizes for objects and json requests. if opts.ObjMaxSize != 0 { Config.ObjMaxSize = opts.ObjMaxSize } if opts.JSONReqMaxSize != 0 { Config.JSONReqMaxSize = opts.JSONReqMaxSize } if Config.ObjMaxSize == 0 { Config.ObjMaxSize = 10485760 } if Config.JSONReqMaxSize == 0 { Config.JSONReqMaxSize = 1000000 } if opts.UseUnsafeMemStore { Config.UseUnsafeMemStore = opts.UseUnsafeMemStore } if opts.DbPoolSize != 0 { Config.DbPoolSize = opts.DbPoolSize } if opts.MaxConn != 0 { Config.MaxConn = opts.MaxConn } if !UsingDB() { if Config.DbPoolSize != 0 { logger.Infof("db-pool-size is set to %d, which is not particularly useful if you are not using one of the SQL databases.", Config.DbPoolSize) } if Config.MaxConn != 0 { logger.Infof("max-connections is set to %d, which is not particularly useful if you are not using one of the SQL databases.", Config.MaxConn) } } if opts.UseSerf { Config.UseSerf = opts.UseSerf } if Config.UseSerf { if opts.SerfAddr != "" { Config.SerfAddr = opts.SerfAddr } if Config.SerfAddr == "" { Config.SerfAddr = "127.0.0.1:7373" } } if opts.SerfEventAnnounce { Config.SerfEventAnnounce = opts.SerfEventAnnounce } if Config.SerfEventAnnounce && !Config.UseSerf { logger.Criticalf("--serf-event-announce requires --use-serf") os.Exit(1) } if opts.UseShovey { if !Config.UseSerf { logger.Criticalf("--use-shovey requires --use-serf to be enabled") os.Exit(1) } Config.UseShovey = opts.UseShovey } // shovey signing key stuff if opts.SignPrivKey != "" { Config.SignPrivKey = opts.SignPrivKey } // if using shovey, open the existing, or create if absent, signing // keys. if Config.UseShovey { if Config.SignPrivKey == "" { Config.SignPrivKey = path.Join(Config.ConfRoot, "shovey-sign_rsa") } else if !path.IsAbs(Config.SignPrivKey) { Config.SignPrivKey = path.Join(Config.ConfRoot, Config.SignPrivKey) } privfp, err := os.Open(Config.SignPrivKey) if err != nil { logger.Criticalf("Private key %s for signing shovey requests not found. Please create a set of RSA keys for this purpose.", Config.SignPrivKey) os.Exit(1) } privPem, err := ioutil.ReadAll(privfp) if err != nil { logger.Criticalf(err.Error()) os.Exit(1) } privBlock, _ := pem.Decode(privPem) if privBlock == nil { logger.Criticalf("Invalid block size for private key for shovey") os.Exit(1) } privKey, err := x509.ParsePKCS1PrivateKey(privBlock.Bytes) if err != nil { logger.Criticalf(err.Error()) os.Exit(1) } Key.Lock() defer Key.Unlock() Key.PrivKey = privKey } if opts.DotSearch { Config.DotSearch = opts.DotSearch } else if Config.PgSearch { Config.DotSearch = true } if Config.DotSearch { if opts.ConvertSearch { Config.ConvertSearch = opts.ConvertSearch } } if Config.IndexFile != "" && Config.PgSearch { logger.Infof("Specifying an index file for search while using the postgres search isn't useful.") } return nil }