Пример #1
0
// TestFiltering tests the filtering of the logging.
func TestFiltering(t *testing.T) {
	assert := audit.NewTestingAssertion(t, true)

	ownLogger := &testLogger{}
	logger.SetLogger(ownLogger)
	logger.SetLevel(logger.LevelDebug)
	logger.SetFilter(func(level logger.LogLevel, info, msg string) bool {
		return level >= logger.LevelWarning && level <= logger.LevelError
	})

	logger.Debugf("Debug.")
	logger.Infof("Info.")
	logger.Warningf("Warning.")
	logger.Errorf("Error.")
	logger.Criticalf("Critical.")
	assert.Length(ownLogger.logs, 3)

	logger.UnsetFilter()

	ownLogger = &testLogger{}
	logger.SetLogger(ownLogger)
	logger.Debugf("Debug.")
	logger.Infof("Info.")
	logger.Warningf("Warning.")
	logger.Errorf("Error.")
	logger.Criticalf("Critical.")
	assert.Length(ownLogger.logs, 5)
}
Пример #2
0
func (dc *DNSChecker) Check() (bool, int64, string) {
	m := new(dns.Msg)
	c := new(dns.Client)
	switch dc.Parameters.Rr_type {
	case "A":
		m.SetQuestion(dc.Parameters.Query, dns.TypeA)
	case "MX":
		m.SetQuestion(dc.Parameters.Query, dns.TypeMX)
	default:
		logger.Warningf("DNSChecker: unsupported query type")
		return false, -1, "DNSChecker: unsupported query type"
	}
	res, rtt, err := c.Exchange(m, dc.Parameters.Server+":53")
	if err != nil {
		logger.Warningf("DNSChecker: %v", err)
		return false, -1, err.Error()
	}
	var isUp bool
	if res.Rcode == dns.RcodeSuccess {
		isUp = true
	} else {
		isUp = false
	}
	reason := dns.RcodeToString[res.Rcode]
	// rounding
	//fmt.Printf("%v -> %v\n", rtt.Nanoseconds(), math.Floor((float64(rtt.Nanoseconds())/1000000.0)+0.5))
	return isUp, int64(math.Floor((float64(rtt.Nanoseconds()) / 1000000.0) + 0.5)), reason
}
Пример #3
0
// Configuration returns the configuration payload
// of the passed event or an empty configuration.
func Configuration(event cells.Event) etc.Etc {
	payload, ok := event.Payload().Get(ConfigurationPayload)
	if !ok {
		logger.Warningf("event does not contain configuration payload")
		cfg, _ := etc.ReadString("{etc}")
		return cfg
	}
	cfg, ok := payload.(etc.Etc)
	if !ok {
		logger.Warningf("configuration payload has illegal type")
		cfg, _ := etc.ReadString("{etc}")
		return cfg
	}
	return cfg
}
Пример #4
0
// Configuration returns the configuration payload
// of the passed event or an empty configuration.
func Configuration(event cells.Event) configuration.Configuration {
	payload, ok := event.Payload().Get(ConfigurationPayload)
	if !ok {
		logger.Warningf("event does not contain configuration payload")
		config, _ := configuration.ReadString("{config}")
		return config
	}
	config, ok := payload.(configuration.Configuration)
	if !ok {
		logger.Warningf("configuration payload has illegal type")
		config, _ := configuration.ReadString("{config}")
		return config
	}
	return config
}
Пример #5
0
// checkRecovering checks if the backend can be recovered.
func (c *Crontab) checkRecovering(rs loop.Recoverings) (loop.Recoverings, error) {
	if rs.Frequency(12, time.Minute) {
		logger.Errorf("crontab cannot be recovered: %v", rs.Last().Reason)
		return nil, errors.New(ErrCrontabCannotBeRecovered, errorMessages, rs.Last().Reason)
	}
	logger.Warningf("crontab recovered: %v", rs.Last().Reason)
	return rs.Trim(12), nil
}
Пример #6
0
// checkRecovering checks if the backend can be recovered.
func (b *stdBackend) checkRecovering(rs loop.Recoverings) (loop.Recoverings, error) {
	if rs.Frequency(12, time.Minute) {
		logger.Errorf("standard monitor cannot be recovered: %v", rs.Last().Reason)
		return nil, errors.New(ErrMonitoringCannotBeRecovered, errorMessages, rs.Last().Reason)
	}
	logger.Warningf("standard monitor recovered: %v", rs.Last().Reason)
	return rs.Trim(12), nil
}
Пример #7
0
func (nc *NTPChecker) Check() (bool, int64, string) {
	reqStart := getMs()
	_, err := ntp.Time(nc.Parameters.Server)
	lag := getMs() - reqStart
	if err != nil {
		logger.Warningf(err.Error())
		return false, lag, err.Error()
	}
	return true, lag, ""
}
Пример #8
0
// Test logging with the go logger.
func TestGoLogger(t *testing.T) {
	log.SetOutput(os.Stdout)

	logger.SetLevel(logger.LevelDebug)
	logger.SetLogger(logger.NewGoLogger())

	logger.Debugf("Debug.")
	logger.Infof("Info.")
	logger.Warningf("Warning.")
	logger.Errorf("Error.")
	logger.Criticalf("Critical.")
}
Пример #9
0
// Test log level filtering.
func TestLogLevelFiltering(t *testing.T) {
	assert := audit.NewTestingAssertion(t, true)

	ownLogger := &testLogger{}
	logger.SetLogger(ownLogger)
	logger.SetLevel(logger.LevelDebug)
	logger.Debugf("Debug.")
	logger.Infof("Info.")
	logger.Warningf("Warning.")
	logger.Errorf("Error.")
	logger.Criticalf("Critical.")
	assert.Length(ownLogger.logs, 5)

	ownLogger = &testLogger{}
	logger.SetLogger(ownLogger)
	logger.SetLevel(logger.LevelError)
	logger.Debugf("Debug.")
	logger.Infof("Info.")
	logger.Warningf("Warning.")
	logger.Errorf("Error.")
	logger.Criticalf("Critical.")
	assert.Length(ownLogger.logs, 2)
}
Пример #10
0
// Test logging with the syslogger.
func TestSysLogger(t *testing.T) {
	assert := audit.NewTestingAssertion(t, true)

	logger.SetLevel(logger.LevelDebug)

	sl, err := logger.NewSysLogger("GOAS")
	assert.Nil(err)
	logger.SetLogger(sl)

	logger.Debugf("Debug.")
	logger.Infof("Info.")
	logger.Warningf("Warning.")
	logger.Errorf("Error.")
	logger.Criticalf("Critical.")
}
Пример #11
0
// checkRecovering checks if the cell may recover after a panic. It will
// signal an error and let the cell stop working if there have been 12 recoverings
// during the last minute or the behaviors Recover() signals, that it cannot
// handle the error.
func (c *cell) checkRecovering(rs loop.Recoverings) (loop.Recoverings, error) {
	logger.Warningf("recovering cell %q after error: %v", c.id, rs.Last().Reason)
	// Check frequency.
	if rs.Frequency(c.recoveringNumber, c.recoveringDuration) {
		err := errors.New(ErrRecoveredTooOften, errorMessages, rs.Last().Reason)
		logger.Errorf("recovering frequency of cell %q too high", c.id)
		return nil, err
	}
	// Try to recover.
	if err := c.behavior.Recover(rs.Last().Reason); err != nil {
		err := errors.Annotate(err, ErrEventRecovering, errorMessages, rs.Last().Reason)
		logger.Errorf("recovering of cell %q failed: %v", c.id, err)
		return nil, err
	}
	logger.Infof("successfully recovered cell %q", c.id)
	return rs.Trim(c.recoveringNumber), nil
}
Пример #12
0
// LatestConstrained returns the latest version of a cookbook that matches the
// given constraint. If no constraint is given, returns the latest version.
func (c *Cookbook) LatestConstrained(constraint string) *CookbookVersion {
	if constraint == "" {
		return c.LatestVersion()
	}
	var constraintVersion string
	var constraintOp string
	traints := strings.Split(constraint, " ")
	if len(traints) == 2 {
		constraintVersion = traints[1]
		constraintOp = traints[0]
	} else {
		logger.Warningf("Constraint '%s' for cookbook %s (in LatestConstrained) was malformed. Bailing.\n", constraint, c.Name)
		return nil
	}
	for _, cv := range c.sortedVersions() {
		action := verConstraintCheck(cv.Version, constraintVersion, constraintOp)
		/* We only want the latest that works. */
		if action == "ok" {
			return cv
		}
	}
	/* if nothing satisfied the constraint, we have to return nil */
	return nil
}
Пример #13
0
func (HTTPChecker *HTTPChecker) doRequest() error {
	client := &http.Client{
		Timeout: timeout,
	}
	if HTTPChecker.Parameters.Strict_tls == false {
		client.Transport = &http.Transport{
			TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
		}
	}

	var err error
	var resp *http.Response
	var req *http.Request
	if HTTPChecker.Parameters.URL != "" {
		resp, err = client.Get(HTTPChecker.Parameters.URL)
	} else {
		ip := net.ParseIP(HTTPChecker.Parameters.IP)
		if ip == nil {
			return fmt.Errorf("Cannot parse IP: %s", HTTPChecker.Parameters.IP)
		}
		if !(HTTPChecker.Parameters.Scheme == "http" || HTTPChecker.Parameters.Scheme == "https") {
			return fmt.Errorf("Scheme must be http or https")
		}
		if HTTPChecker.Parameters.Port > 65535 || HTTPChecker.Parameters.Port < 1 {
			return fmt.Errorf("Port must be 1-65535")
		}
		req, err = http.NewRequest("GET", HTTPChecker.Parameters.Scheme+"://"+HTTPChecker.Parameters.IP+":"+
			strconv.Itoa(int(HTTPChecker.Parameters.Port))+HTTPChecker.Parameters.Path, nil)
		if err != nil {
			return err
		}
		req.Host = HTTPChecker.Parameters.ServerName
		resp, err = client.Do(req)
	}

	if resp != nil {
		defer resp.Body.Close()
	}

	if err != nil {
		return err
	}

	if resp.StatusCode != HTTPChecker.Expect.Status_code {
		reason := "Unexpected response code: " + strconv.Itoa(resp.StatusCode) + ". Expected " + strconv.Itoa(HTTPChecker.Expect.Status_code)
		return errors.New(reason)
	}

	if HTTPChecker.Expect.Contain_keyword != "" {
		bodybuf := new(bytes.Buffer)
		_, err := bodybuf.ReadFrom(resp.Body)
		body_s := bodybuf.String()
		if err != nil {
			logger.Warningf("HTTPChecker: " + err.Error())
			return errors.New("HTTPChecker: " + err.Error())
		}
		if !strings.Contains(body_s, HTTPChecker.Expect.Contain_keyword) {
			logger.Infof("Response does not contain keyword: " + HTTPChecker.Expect.Contain_keyword)
			return errors.New("Response does not contain keyword: " + HTTPChecker.Expect.Contain_keyword)
		}
	}

	return nil
}
Пример #14
0
func (c *Cookbook) infoHashBase(numResults interface{}, constraint string) map[string]interface{} {
	cbHash := make(map[string]interface{})
	cbHash["url"] = util.ObjURL(c)

	nr := 0

	/* Working to maintain Chef server behavior here. We need to make "all"
	 * give all versions of the cookbook and make no value give one version,
	 * but keep 0 as invalid input that gives zero results back. This might
	 * be an area worth breaking. */
	var numVersions int
	allVersions := false

	if numResults != "" && numResults != "all" {
		numVersions, _ = strconv.Atoi(numResults.(string))
	} else if numResults == "" {
		numVersions = 1
	} else {
		allVersions = true
	}

	cbHash["versions"] = make([]interface{}, 0)

	var constraintVersion string
	var constraintOp string
	if constraint != "" {
		traints := strings.Split(constraint, " ")
		/* If the constraint isn't well formed like ">= 1.2.3", log the
		 * fact and ignore the constraint. */
		if len(traints) == 2 {
			constraintVersion = traints[1]
			constraintOp = traints[0]
		} else {
			logger.Warningf("Constraint '%s' for cookbook %s was badly formed -- bailing.\n", constraint, c.Name)
			return nil
		}
	}

VerLoop:
	for _, cv := range c.sortedVersions() {
		if !allVersions && nr >= numVersions {
			break
		}
		/* Version constraint checking. */
		if constraint != "" {
			conAction := verConstraintCheck(cv.Version, constraintVersion, constraintOp)
			switch conAction {
			case "skip":
				/* Skip this version, keep going. */
				continue VerLoop
			case "break":
				/* Stop processing entirely. */
				break VerLoop
				/* Default action is, of course, to continue on
				 * like nothing happened. Later, we need to
				 * panic over an invalid constraint. */
			}
		}
		cvInfo := make(map[string]string)
		cvInfo["url"] = util.CustomObjURL(c, cv.Version)
		cvInfo["version"] = cv.Version
		cbHash["versions"] = append(cbHash["versions"].([]interface{}), cvInfo)
		nr++
	}
	return cbHash
}
Пример #15
0
func (h *interceptHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
	/* knife sometimes sends URL paths that start with //. Redirecting
	 * worked for GETs, but since it was breaking POSTs and screwing with
	 * GETs with query params, we just clean up the path and move on. */

	/* log the URL */
	// TODO: set this to verbosity level 4 or so
	logger.Debugf("Serving %s -- %s", r.URL.Path, r.Method)

	if r.Method != "CONNECT" {
		if p := cleanPath(r.URL.Path); p != r.URL.Path {
			r.URL.Path = p
		}
	}

	/* Make configurable, I guess, but Chef wants it to be 1000000 */
	if !strings.HasPrefix(r.URL.Path, "/file_store") && r.ContentLength > config.Config.JSONReqMaxSize {
		logger.Debugf("Content length was too long for %s", r.URL.Path)
		http.Error(w, "Content-length too long!", http.StatusRequestEntityTooLarge)
		// hmm, with 1.5 it gets a broken pipe now if we don't do
		// anything with the body they're trying to send. Try copying it
		// to /dev/null. This seems crazy, but merely closing the body
		// doesn't actually work.
		io.Copy(ioutil.Discard, r.Body)
		r.Body.Close()
		return
	} else if r.ContentLength > config.Config.ObjMaxSize {
		http.Error(w, "Content-length waaaaaay too long!", http.StatusRequestEntityTooLarge)
		return
	}

	w.Header().Set("X-Goiardi", "yes")
	w.Header().Set("X-Goiardi-Version", config.Version)
	w.Header().Set("X-Chef-Version", config.ChefVersion)
	apiInfo := fmt.Sprintf("flavor=osc;version:%s;goiardi=%s", config.ChefVersion, config.Version)
	w.Header().Set("X-Ops-API-Info", apiInfo)

	userID := r.Header.Get("X-OPS-USERID")
	if rs := r.Header.Get("X-Ops-Request-Source"); rs == "web" {
		/* If use-auth is on and disable-webui is on, and this is a
		 * webui connection, it needs to fail. */
		if config.Config.DisableWebUI {
			w.Header().Set("Content-Type", "application/json")
			logger.Warningf("Attempting to log in through webui, but webui is disabled")
			jsonErrorReport(w, r, "invalid action", http.StatusUnauthorized)
			return
		}

		/* Check that the user in question with the web request exists.
		 * If not, fail. */
		if _, uherr := actor.GetReqUser(userID); uherr != nil {
			w.Header().Set("Content-Type", "application/json")
			logger.Warningf("Attempting to use invalid user %s through X-Ops-Request-Source = web", userID)
			jsonErrorReport(w, r, "invalid action", http.StatusUnauthorized)
			return
		}
		userID = "chef-webui"
	}
	/* Only perform the authorization check if that's configured. Bomb with
	 * an error if the check of the headers, timestamps, etc. fails. */
	/* No clue why /principals doesn't require authorization. Hrmph. */
	if config.Config.UseAuth && !strings.HasPrefix(r.URL.Path, "/file_store") && !(strings.HasPrefix(r.URL.Path, "/principals") && r.Method == "GET") {
		herr := authentication.CheckHeader(userID, r)
		if herr != nil {
			w.Header().Set("Content-Type", "application/json")
			logger.Errorf("Authorization failure: %s\n", herr.Error())
			w.Header().Set("Www-Authenticate", `X-Ops-Sign version="1.0" version="1.1" version="1.2"`)
			//http.Error(w, herr.Error(), herr.Status())
			jsonErrorReport(w, r, herr.Error(), herr.Status())
			return
		}
	}

	// Experimental: decompress gzipped requests
	if r.Header.Get("Content-Encoding") == "gzip" {
		reader, err := gzip.NewReader(r.Body)
		if err != nil {
			w.Header().Set("Content-Type", "application/json")
			logger.Errorf("Failure decompressing gzipped request body: %s\n", err.Error())
			jsonErrorReport(w, r, err.Error(), http.StatusBadRequest)
			return
		}
		r.Body = reader
	}

	http.DefaultServeMux.ServeHTTP(w, r)
}
Пример #16
0
// ParseConfigOptions reads and applies arguments from the command line and the
// configuration file, merging them together as needed, with command line options
// taking precedence over options in the config file.
func ParseConfigOptions() error {
	var opts = &Options{}
	_, err := flags.Parse(opts)

	if err != nil {
		if err.(*flags.Error).Type == flags.ErrHelp {
			os.Exit(0)
		} else {
			log.Println(err)
			os.Exit(1)
		}
	}

	if opts.Version {
		fmt.Printf("goiardi version %s built with %s (aiming for compatibility with Chef Server version %s).\n", Version, runtime.Version(), ChefVersion)
		os.Exit(0)
	}

	/* Load the config file. Command-line options have precedence over
	 * config file options. */
	if opts.ConfFile != "" {
		if _, err := toml.DecodeFile(opts.ConfFile, Config); err != nil {
			log.Println(err)
			os.Exit(1)
		}
		Config.ConfFile = opts.ConfFile
		Config.FreezeData = false
	}

	if opts.Export != "" && opts.Import != "" {
		log.Println("Cannot use -x/--export and -m/--import flags together.")
		os.Exit(1)
	}

	if opts.Export != "" {
		Config.DoExport = true
		Config.ImpExFile = opts.Export
	} else if opts.Import != "" {
		Config.DoImport = true
		Config.ImpExFile = opts.Import
	}

	if opts.Hostname != "" {
		Config.Hostname = opts.Hostname
	} else {
		if Config.Hostname == "" {
			Config.Hostname, err = os.Hostname()
			if err != nil {
				log.Println(err)
				Config.Hostname = "localhost"
			}
		}
	}

	if opts.DataStoreFile != "" {
		Config.DataStoreFile = opts.DataStoreFile
	}

	if opts.IndexFile != "" {
		Config.IndexFile = opts.IndexFile
	}

	// Use MySQL?
	if opts.UseMySQL {
		Config.UseMySQL = opts.UseMySQL
	}

	// Use Postgres?
	if opts.UsePostgreSQL {
		Config.UsePostgreSQL = opts.UsePostgreSQL
	}

	if Config.UseMySQL && Config.UsePostgreSQL {
		err := fmt.Errorf("The MySQL and Postgres options cannot be used together.")
		log.Println(err)
		os.Exit(1)
	}

	// Use Postgres search?
	if opts.PgSearch {
		// make sure postgres is enabled
		if !Config.UsePostgreSQL {
			err := fmt.Errorf("--pg-search requires --use-postgresql (which makes sense, really).")
			log.Println(err)
			os.Exit(1)
		}
		Config.PgSearch = opts.PgSearch
	}

	if Config.DataStoreFile != "" && (Config.UseMySQL || Config.UsePostgreSQL) {
		err := fmt.Errorf("The MySQL or Postgres and data store options may not be specified together.")
		log.Println(err)
		os.Exit(1)
	}

	if !((Config.DataStoreFile == "" && Config.IndexFile == "") || ((Config.DataStoreFile != "" || (Config.UseMySQL || Config.UsePostgreSQL)) && Config.IndexFile != "")) {
		err := fmt.Errorf("-i and -D must either both be specified, or not specified")
		log.Println(err)
		os.Exit(1)
	}

	if (Config.UseMySQL || Config.UsePostgreSQL) && (Config.IndexFile == "" && !Config.PgSearch) {
		err := fmt.Errorf("An index file must be specified with -i or --index-file (or the 'index-file' config file option) when running with a MySQL or PostgreSQL backend.")
		log.Println(err)
		os.Exit(1)
	}

	if Config.IndexFile != "" && (Config.DataStoreFile != "" || (Config.UseMySQL || Config.UsePostgreSQL)) {
		Config.FreezeData = true
	}

	if opts.LogFile != "" {
		Config.LogFile = opts.LogFile
	}
	if opts.SysLog {
		Config.SysLog = opts.SysLog
	}
	if Config.LogFile != "" {
		lfp, lerr := os.Create(Config.LogFile)
		if lerr != nil {
			log.Println(err)
			os.Exit(1)
		}
		log.SetOutput(lfp)
	}
	if dlev := len(opts.Verbose); dlev != 0 {
		Config.DebugLevel = dlev
	}
	if Config.LogLevel != "" {
		if lev, ok := LogLevelNames[strings.ToLower(Config.LogLevel)]; ok && Config.DebugLevel == 0 {
			Config.DebugLevel = lev
		}
	}
	if Config.DebugLevel > 4 {
		Config.DebugLevel = 4
	}

	Config.DebugLevel = int(logger.LevelCritical) - Config.DebugLevel
	logger.SetLevel(logger.LogLevel(Config.DebugLevel))
	debugLevel := map[int]string{0: "debug", 1: "info", 2: "warning", 3: "error", 4: "critical"}
	log.Printf("Logging at %s level", debugLevel[Config.DebugLevel])
	if Config.SysLog {
		sl, err := logger.NewSysLogger("goiardi")
		if err != nil {
			log.Println(err.Error())
			os.Exit(1)
		}
		logger.SetLogger(sl)
	} else {
		logger.SetLogger(logger.NewGoLogger())
	}

	/* Database options */

	// Don't bother setting a default mysql port if mysql isn't used
	if Config.UseMySQL {
		if Config.MySQL.Port == "" {
			Config.MySQL.Port = "3306"
		}
	}

	// set default Postgres options
	if Config.UsePostgreSQL {
		if Config.PostgreSQL.Port == "" {
			Config.PostgreSQL.Port = "5432"
		}
	}

	if opts.LocalFstoreDir != "" {
		Config.LocalFstoreDir = opts.LocalFstoreDir
	}
	if Config.LocalFstoreDir == "" && (Config.UseMySQL || Config.UsePostgreSQL) {
		logger.Criticalf("local-filestore-dir must be set when running goiardi in SQL mode")
		os.Exit(1)
	}
	if Config.LocalFstoreDir != "" {
		finfo, ferr := os.Stat(Config.LocalFstoreDir)
		if ferr != nil {
			logger.Criticalf("Error checking local filestore dir: %s", ferr.Error())
			os.Exit(1)
		}
		if !finfo.IsDir() {
			logger.Criticalf("Local filestore dir %s is not a directory", Config.LocalFstoreDir)
			os.Exit(1)
		}
	}

	if !Config.FreezeData && (opts.FreezeInterval != 0 || Config.FreezeInterval != 0) {
		logger.Warningf("FYI, setting the freeze data interval's not especially useful without setting the index and data files.")
	}
	if opts.FreezeInterval != 0 {
		Config.FreezeInterval = opts.FreezeInterval
	}
	if Config.FreezeInterval == 0 {
		Config.FreezeInterval = 10
	}

	/* Root directory for certs and the like */
	if opts.ConfRoot != "" {
		Config.ConfRoot = opts.ConfRoot
	}

	if Config.ConfRoot == "" {
		if Config.ConfFile != "" {
			Config.ConfRoot = path.Dir(Config.ConfFile)
		} else {
			Config.ConfRoot = "."
		}
	}

	if opts.Ipaddress != "" {
		Config.Ipaddress = opts.Ipaddress
	}
	if Config.Ipaddress != "" {
		ip := net.ParseIP(Config.Ipaddress)
		if ip == nil {
			logger.Criticalf("IP address '%s' is not valid", Config.Ipaddress)
			os.Exit(1)
		}
	}

	if opts.Port != 0 {
		Config.Port = opts.Port
	}
	if Config.Port == 0 {
		Config.Port = 4545
	}

	if opts.UseSSL {
		Config.UseSSL = opts.UseSSL
	}
	if opts.SSLCert != "" {
		Config.SSLCert = opts.SSLCert
	}
	if opts.SSLKey != "" {
		Config.SSLKey = opts.SSLKey
	}
	if opts.HTTPSUrls {
		Config.HTTPSUrls = opts.HTTPSUrls
	}
	// SSL setup
	if Config.Port == 80 {
		Config.UseSSL = false
	} else if Config.Port == 443 {
		Config.UseSSL = true
	}
	if Config.UseSSL {
		if Config.SSLCert == "" || Config.SSLKey == "" {
			logger.Criticalf("SSL mode requires specifying both a certificate and a key file.")
			os.Exit(1)
		}
		/* If the SSL cert and key are not absolute files, join them
		 * with the conf root */
		if !path.IsAbs(Config.SSLCert) {
			Config.SSLCert = path.Join(Config.ConfRoot, Config.SSLCert)
		}
		if !path.IsAbs(Config.SSLKey) {
			Config.SSLKey = path.Join(Config.ConfRoot, Config.SSLKey)
		}
	}

	if opts.TimeSlew != "" {
		Config.TimeSlew = opts.TimeSlew
	}
	if Config.TimeSlew != "" {
		d, derr := time.ParseDuration(Config.TimeSlew)
		if derr != nil {
			logger.Criticalf("Error parsing time-slew: %s", derr.Error())
			os.Exit(1)
		}
		Config.TimeSlewDur = d
	} else {
		Config.TimeSlewDur, _ = time.ParseDuration(DefaultTimeSlew)
	}

	if opts.UseAuth {
		Config.UseAuth = opts.UseAuth
	}

	if opts.DisableWebUI {
		Config.DisableWebUI = opts.DisableWebUI
	}

	if opts.LogEvents {
		Config.LogEvents = opts.LogEvents
	}

	if opts.LogEventKeep != 0 {
		Config.LogEventKeep = opts.LogEventKeep
	}

	// Set max sizes for objects and json requests.
	if opts.ObjMaxSize != 0 {
		Config.ObjMaxSize = opts.ObjMaxSize
	}
	if opts.JSONReqMaxSize != 0 {
		Config.JSONReqMaxSize = opts.JSONReqMaxSize
	}
	if Config.ObjMaxSize == 0 {
		Config.ObjMaxSize = 10485760
	}
	if Config.JSONReqMaxSize == 0 {
		Config.JSONReqMaxSize = 1000000
	}

	if opts.UseUnsafeMemStore {
		Config.UseUnsafeMemStore = opts.UseUnsafeMemStore
	}

	if opts.DbPoolSize != 0 {
		Config.DbPoolSize = opts.DbPoolSize
	}
	if opts.MaxConn != 0 {
		Config.MaxConn = opts.MaxConn
	}
	if !UsingDB() {
		if Config.DbPoolSize != 0 {
			logger.Infof("db-pool-size is set to %d, which is not particularly useful if you are not using one of the SQL databases.", Config.DbPoolSize)
		}
		if Config.MaxConn != 0 {
			logger.Infof("max-connections is set to %d, which is not particularly useful if you are not using one of the SQL databases.", Config.MaxConn)
		}
	}
	if opts.UseSerf {
		Config.UseSerf = opts.UseSerf
	}
	if Config.UseSerf {
		if opts.SerfAddr != "" {
			Config.SerfAddr = opts.SerfAddr
		}
		if Config.SerfAddr == "" {
			Config.SerfAddr = "127.0.0.1:7373"
		}
	}
	if opts.SerfEventAnnounce {
		Config.SerfEventAnnounce = opts.SerfEventAnnounce
	}
	if Config.SerfEventAnnounce && !Config.UseSerf {
		logger.Criticalf("--serf-event-announce requires --use-serf")
		os.Exit(1)
	}

	if opts.UseShovey {
		if !Config.UseSerf {
			logger.Criticalf("--use-shovey requires --use-serf to be enabled")
			os.Exit(1)
		}
		Config.UseShovey = opts.UseShovey
	}

	// shovey signing key stuff
	if opts.SignPrivKey != "" {
		Config.SignPrivKey = opts.SignPrivKey
	}

	// if using shovey, open the existing, or create if absent, signing
	// keys.
	if Config.UseShovey {
		if Config.SignPrivKey == "" {
			Config.SignPrivKey = path.Join(Config.ConfRoot, "shovey-sign_rsa")
		} else if !path.IsAbs(Config.SignPrivKey) {
			Config.SignPrivKey = path.Join(Config.ConfRoot, Config.SignPrivKey)
		}
		privfp, err := os.Open(Config.SignPrivKey)
		if err != nil {
			logger.Criticalf("Private key %s for signing shovey requests not found. Please create a set of RSA keys for this purpose.", Config.SignPrivKey)
			os.Exit(1)
		}
		privPem, err := ioutil.ReadAll(privfp)
		if err != nil {
			logger.Criticalf(err.Error())
			os.Exit(1)
		}
		privBlock, _ := pem.Decode(privPem)
		if privBlock == nil {
			logger.Criticalf("Invalid block size for private key for shovey")
			os.Exit(1)
		}
		privKey, err := x509.ParsePKCS1PrivateKey(privBlock.Bytes)
		if err != nil {
			logger.Criticalf(err.Error())
			os.Exit(1)
		}
		Key.Lock()
		defer Key.Unlock()
		Key.PrivKey = privKey
	}

	if opts.DotSearch {
		Config.DotSearch = opts.DotSearch
	} else if Config.PgSearch {
		Config.DotSearch = true
	}
	if Config.DotSearch {
		if opts.ConvertSearch {
			Config.ConvertSearch = opts.ConvertSearch
		}
	}
	if Config.IndexFile != "" && Config.PgSearch {
		logger.Infof("Specifying an index file for search while using the postgres search isn't useful.")
	}

	return nil
}