Exemple #1
0
func handleSignals() {
	c := make(chan os.Signal, 1)
	// SIGTERM is not exactly portable, but Go has a fake signal for it
	// with Windows so it being there should theoretically not break it
	// running on windows
	signal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGHUP)

	// if we receive a SIGINT or SIGTERM, do cleanup here.
	go func() {
		for sig := range c {
			if sig == os.Interrupt || sig == syscall.SIGTERM {
				logger.Infof("cleaning up...")
				if config.Config.FreezeData {
					if config.Config.DataStoreFile != "" {
						ds := datastore.New()
						if err := ds.Save(config.Config.DataStoreFile); err != nil {
							logger.Errorf(err.Error())
						}
					}
					if err := indexer.SaveIndex(config.Config.IndexFile); err != nil {
						logger.Errorf(err.Error())
					}
				}
				if config.UsingDB() {
					datastore.Dbh.Close()
				}
				os.Exit(0)
			} else if sig == syscall.SIGHUP {
				logger.Infof("Reloading configuration...")
				config.ParseConfigOptions()
			}
		}
	}()
}
Exemple #2
0
func (i *FileIndex) Save() error {
	idxFile := i.file
	if idxFile == "" {
		err := fmt.Errorf("Yikes! Cannot save index to disk because no file was specified.")
		return err
	}
	if !i.updated {
		return nil
	}
	logger.Infof("Index has changed, saving to disk")
	fp, err := ioutil.TempFile(path.Dir(idxFile), "idx-build")
	if err != nil {
		return err
	}
	zfp := zlib.NewWriter(fp)
	i.m.RLock()
	defer i.m.RUnlock()
	i.updated = false
	enc := gob.NewEncoder(zfp)
	err = enc.Encode(i)
	zfp.Close()
	if err != nil {
		fp.Close()
		return err
	}
	err = fp.Close()
	if err != nil {
		return err
	}
	return os.Rename(fp.Name(), idxFile)
}
Exemple #3
0
// ProcessQueue creates a consumer queue on the given connection using the
// supplied exchange, exchange type, queue pattern, and number of workers. A
// function that satisfies the PayloadProcessor function type is also passed in
// to process the jobs off the queue, and if the payload processor should
// publish the results of its work a Publisher may be supplied. Otherwise nil
// for the Publisher is fine.
func ProcessQueue(conn *amqp.Connection, exchange, exchangeType, queueName, queuePattern string, durable, autoDelete, exclusive bool, errCh chan<- error, qprocessor PayloadProcessor, numWorkers int) error {
	if numWorkers < 1 {
		err := errors.New("numWorkers must be at least 1")
		return err
	}
	for i := 0; i < numWorkers; i++ {
		devs, err := CreateConsumer(conn, exchange, exchangeType, queueName, queuePattern, durable, autoDelete, exclusive)
		if err != nil {
			return nil
		}
		logger.Infof("starting queue %s for %s", exchange, queuePattern)
		go func(i int, exchange string, devs <-chan amqp.Delivery) {
			for d := range devs {
				logger.Debugf("worker %s %d received delivery", exchange, i)
				err := qprocessor(&d)
				if err != nil {
					errCh <- err
					return
				}
			}
			errCh <- nil
		}(i, exchange, devs)
	}
	return nil
}
Exemple #4
0
func jsonErrorReport(w http.ResponseWriter, r *http.Request, errorStr string, status int) {
	logger.Infof(errorStr)
	jsonError := map[string][]string{"error": []string{errorStr}}
	w.WriteHeader(status)
	enc := json.NewEncoder(w)
	if err := enc.Encode(&jsonError); err != nil {
		logger.Errorf(err.Error())
	}
	return
}
Exemple #5
0
// Save freezes and saves the data store to disk.
func (ds *DataStore) Save(dsFile string) error {
	if !ds.updated {
		return nil
	}
	logger.Infof("Data has changed, saving data store to disk")
	if dsFile == "" {
		err := fmt.Errorf("Yikes! Cannot save data store to disk because no file was specified.")
		return err
	}
	fp, err := ioutil.TempFile(path.Dir(dsFile), "ds-store")
	if err != nil {
		return err
	}
	zfp := zlib.NewWriter(fp)

	fstore := new(dsFileStore)
	dscache := new(bytes.Buffer)
	objList := new(bytes.Buffer)
	ds.m.RLock()
	defer ds.m.RUnlock()
	ds.updated = false

	err = ds.dsc.Save(dscache)
	if err != nil {
		fp.Close()
		return err
	}
	enc := gob.NewEncoder(objList)
	defer func() {
		if x := recover(); x != nil {
			err = fmt.Errorf("Something went wrong encoding the data store with Gob")
		}
	}()
	err = enc.Encode(ds.objList)
	if err != nil {
		fp.Close()
		return err
	}
	fstore.Cache = dscache.Bytes()
	fstore.ObjList = objList.Bytes()
	enc = gob.NewEncoder(zfp)
	err = enc.Encode(fstore)
	zfp.Close()
	if err != nil {
		fp.Close()
		return err
	}
	err = fp.Close()
	if err != nil {
		return err
	}
	return os.Rename(fp.Name(), dsFile)
}
Exemple #6
0
func setSaveTicker() {
	if config.Config.FreezeData {
		ds := datastore.New()
		ticker := time.NewTicker(time.Second * time.Duration(config.Config.FreezeInterval))
		go func() {
			for _ = range ticker.C {
				if config.Config.DataStoreFile != "" {
					logger.Infof("Automatically saving data store...")
					uerr := ds.Save(config.Config.DataStoreFile)
					if uerr != nil {
						logger.Errorf(uerr.Error())
					}
				}
				logger.Infof("Automatically saving index...")
				ierr := indexer.SaveIndex(config.Config.IndexFile)
				if ierr != nil {
					logger.Errorf(ierr.Error())
				}
			}
		}()
	}
}
Exemple #7
0
// New creates a new sandbox, given a map of null values with file checksums as
// keys.
func New(checksumHash map[string]interface{}) (*Sandbox, error) {
	/* For some reason the checksums come in a JSON hash that looks like
	 * this:
	 * { "checksums": {
	 * "385ea5490c86570c7de71070bce9384a":null,
	 * "f6f73175e979bd90af6184ec277f760c":null,
	 * "2e03dd7e5b2e6c8eab1cf41ac61396d5":null
	 * } } --- per the chef server api docs. Not sure why it comes in that
	 * way rather than as an array, since those nulls are apparently never
	 * anything but nulls. */

	/* First generate an id for this sandbox. Collisions are certainly
	 * possible, so we'll give it five tries to make a unique one before
	 * bailing. This may later turn out not to be the ideal sandbox creation
	 * method, but we'll see. */
	var sandboxID string
	var err error
	for i := 0; i < 5; i++ {
		sandboxID, err = generateSandboxID()
		if err != nil {
			/* Something went very wrong. */
			return nil, err
		}
		if s, _ := Get(sandboxID); s != nil {
			logger.Infof("Collision! Somehow %s already existed as a sandbox id on attempt %d. Trying again.", sandboxID, i)
			sandboxID = ""
		}
	}

	if sandboxID == "" {
		err = fmt.Errorf("Somehow every attempt to create a unique sandbox id failed. Bailing.")
		return nil, err
	}
	checksums := make([]string, len(checksumHash))
	j := 0
	for k := range checksumHash {
		checksums[j] = k
		j++
	}

	sbox := &Sandbox{
		ID:           sandboxID,
		CreationTime: time.Now(),
		Completed:    false,
		Checksums:    checksums,
	}
	return sbox, nil
}
Exemple #8
0
// GetNodesByStatus returns the nodes that currently have the given status.
func GetNodesByStatus(nodeNames []string, status string) ([]*Node, error) {
	if config.UsingDB() {
		return getNodesByStatusSQL(nodeNames, status)
	}
	var statNodes []*Node
	nodes := make([]*Node, 0, len(nodeNames))
	for _, name := range nodeNames {
		n, _ := Get(name)
		if n != nil {
			nodes = append(nodes, n)
		}
	}
	for _, n := range nodes {
		ns, _ := n.LatestStatus()
		if ns == nil {
			logger.Infof("No status found at all for node %s, skipping", n.Name)
			continue
		}
		if ns.Status == status {
			statNodes = append(statNodes, n)
		}
	}
	return statNodes, nil
}
func main() {
	if setting.Config.ExpvarAddr != "" {
		go func() {
			err := http.ListenAndServe(setting.Config.ExpvarAddr, nil)
			if err != nil {
				fmt.Println("Error starting expvar http listener:", err.Error())
				os.Exit(1)
			}
		}()
	}

	// First fire up a queue to consume metric def events
	mdConn, err := amqp.Dial(setting.Config.RabbitMQURL)
	if err != nil {
		logger.Criticalf(err.Error())
		os.Exit(1)
	}
	defer mdConn.Close()
	logger.Debugf("connected")

	done := make(chan error, 1)

	var numCPU int
	if setting.Config.NumWorkers != 0 {
		numCPU = setting.Config.NumWorkers
	} else {
		numCPU = runtime.NumCPU()
	}

	err = qproc.ProcessQueue(mdConn, "metrics", "topic", "", "metrics.*", false, true, true, done, processMetricDefEvent, numCPU)
	if err != nil {
		logger.Criticalf(err.Error())
		os.Exit(1)
	}
	err = qproc.ProcessQueue(mdConn, "metricResults", "x-consistent-hash", "", "10", false, true, true, done, processMetrics, numCPU)
	if err != nil {
		logger.Criticalf(err.Error())
		os.Exit(1)
	}
	err = initEventProcessing(mdConn, numCPU, done)
	if err != nil {
		logger.Criticalf(err.Error())
		os.Exit(1)
	}

	// Signal handling. If SIGQUIT is received, print out the current
	// stack. Otherwise if SIGINT or SIGTERM are received clean up and exit
	// in an orderly fashion.
	go func() {
		sigs := make(chan os.Signal, 1)
		signal.Notify(sigs, syscall.SIGQUIT, os.Interrupt, syscall.SIGTERM)
		buf := make([]byte, 1<<20)
		for sig := range sigs {
			if sig == syscall.SIGQUIT {
				// print out the current stack on SIGQUIT
				runtime.Stack(buf, true)
				log.Printf("=== received SIGQUIT ===\n*** goroutine dump...\n%s\n*** end\n", buf)
			} else {
				// finish our existing work, clean up, and exit
				// in an orderly fashion
				logger.Infof("Closing rabbitmq connection")
				cerr := mdConn.Close()
				if cerr != nil {
					logger.Errorf("Received error closing rabbitmq connection: %s", cerr.Error())
				}
				logger.Infof("Closing processing buffer channel")
				close(bufCh)
			}
		}
	}()

	// this channel returns when one of the workers exits.
	err = <-done
	logger.Criticalf("all done!", err)
	if err != nil {
		logger.Criticalf("Had an error, aiiieeee! '%s'", err.Error())
	}
}
Exemple #10
0
func importAll(fileName string) error {
	fp, err := os.Open(fileName)
	if err != nil {
		return err
	}
	exportedData := &ExportData{}
	dec := json.NewDecoder(fp)
	if err := dec.Decode(&exportedData); err != nil {
		return err
	}

	// What versions of the exported data are supported?
	// At the moment it's only 1.0.

	if exportedData.MajorVersion == 1 && (exportedData.MinorVersion == 0 || exportedData.MinorVersion == 1) {
		logger.Infof("Importing data, version %d.%d created on %s", exportedData.MajorVersion, exportedData.MinorVersion, exportedData.CreatedTime)

		// load clients
		logger.Infof("Loading clients")
		for _, v := range exportedData.Data["client"] {
			c, err := client.NewFromJSON(v.(map[string]interface{}))
			if err != nil {
				return err
			}
			c.SetPublicKey(v.(map[string]interface{})["public_key"])
			gerr := c.Save()
			if gerr != nil {
				return gerr
			}
		}

		// load users
		logger.Infof("Loading users")
		for _, v := range exportedData.Data["user"] {
			pwhash, _ := v.(map[string]interface{})["password"].(string)
			v.(map[string]interface{})["password"] = ""
			u, err := user.NewFromJSON(v.(map[string]interface{}))
			if err != nil {
				return err
			}
			u.SetPasswdHash(pwhash)
			u.SetPublicKey(v.(map[string]interface{})["public_key"])
			gerr := u.Save()
			if gerr != nil {
				return gerr
			}
		}

		// load filestore
		logger.Infof("Loading filestore")
		for _, v := range exportedData.Data["filestore"] {
			fileData, err := base64.StdEncoding.DecodeString(v.(map[string]interface{})["Data"].(string))
			if err != nil {
				return err
			}
			fdBuf := bytes.NewBuffer(fileData)
			fdRc := ioutil.NopCloser(fdBuf)
			fs, err := filestore.New(v.(map[string]interface{})["Chksum"].(string), fdRc, int64(fdBuf.Len()))
			if err != nil {
				return err
			}
			if err = fs.Save(); err != nil {
				return err
			}
		}

		// load cookbooks
		logger.Infof("Loading cookbooks")
		for _, v := range exportedData.Data["cookbook"] {
			cb, err := cookbook.New(v.(map[string]interface{})["Name"].(string))
			if err != nil {
				return err
			}
			gerr := cb.Save()
			if gerr != nil {
				return gerr
			}
			for ver, cbvData := range v.(map[string]interface{})["Versions"].(map[string]interface{}) {
				cbvData, cerr := checkAttrs(cbvData.(map[string]interface{}))
				if cerr != nil {
					return cerr
				}
				_, cbverr := cb.NewVersion(ver, cbvData)
				if cbverr != nil {
					return cbverr
				}
			}
		}

		// load data bags
		logger.Infof("Loading data bags")
		for _, v := range exportedData.Data["data_bag"] {
			dbag, err := databag.New(v.(map[string]interface{})["Name"].(string))
			if err != nil {
				return err
			}
			gerr := dbag.Save()
			if gerr != nil {
				return gerr
			}
			for _, dbagData := range v.(map[string]interface{})["DataBagItems"].(map[string]interface{}) {
				_, dbierr := dbag.NewDBItem(dbagData.(map[string]interface{})["raw_data"].(map[string]interface{}))
				if dbierr != nil {
					return dbierr
				}
			}
			gerr = dbag.Save()
			if gerr != nil {
				return gerr
			}
		}
		// load environments
		logger.Infof("Loading environments")
		for _, v := range exportedData.Data["environment"] {
			envData, cerr := checkAttrs(v.(map[string]interface{}))
			if cerr != nil {
				return nil
			}
			if envData["name"].(string) != "_default" {
				e, err := environment.NewFromJSON(envData)
				if err != nil {
					return err
				}
				gerr := e.Save()
				if gerr != nil {
					return gerr
				}
			}
		}

		// load nodes
		logger.Infof("Loading nodes")
		for _, v := range exportedData.Data["node"] {
			nodeData, cerr := checkAttrs(v.(map[string]interface{}))
			if cerr != nil {
				return nil
			}
			n, err := node.NewFromJSON(nodeData)
			if err != nil {
				return err
			}
			gerr := n.Save()
			if gerr != nil {
				return gerr
			}
		}

		// load roles
		logger.Infof("Loading roles")
		for _, v := range exportedData.Data["role"] {
			roleData, cerr := checkAttrs(v.(map[string]interface{}))
			if cerr != nil {
				return nil
			}
			r, err := role.NewFromJSON(roleData)
			if err != nil {
				return err
			}
			gerr := r.Save()
			if gerr != nil {
				return gerr
			}
		}

		// load sandboxes
		logger.Infof("Loading sandboxes")
		for _, v := range exportedData.Data["sandbox"] {
			sbid, _ := v.(map[string]interface{})["Id"].(string)
			sbts, _ := v.(map[string]interface{})["CreationTime"].(string)
			sbcomplete, _ := v.(map[string]interface{})["Completed"].(bool)
			sbck, _ := v.(map[string]interface{})["Checksums"].([]interface{})
			sbTime, err := time.Parse(time.RFC3339, sbts)
			if err != nil {
				return err
			}
			sbChecksums := make([]string, len(sbck))
			for i, c := range sbck {
				sbChecksums[i] = c.(string)
			}
			sbox := &sandbox.Sandbox{ID: sbid, CreationTime: sbTime, Completed: sbcomplete, Checksums: sbChecksums}
			if err = sbox.Save(); err != nil {
				return err
			}
		}

		// load loginfos
		logger.Infof("Loading loginfo")
		for _, v := range exportedData.Data["loginfo"] {
			if err := loginfo.Import(v.(map[string]interface{})); err != nil {
				return err
			}
		}

		// load reports
		logger.Infof("Loading reports")
		for _, o := range exportedData.Data["report"] {
			// handle data exported from a bugged report export
			var nodeName string
			v := o.(map[string]interface{})
			if n, ok := v["node_name"]; ok {
				nodeName = n.(string)
			} else if n, ok := v["nodeName"]; ok {
				nodeName = n.(string)
			}
			v["action"] = "start"
			if st, ok := v["start_time"].(string); ok {
				t, err := time.Parse(time.RFC3339, st)
				if err != nil {
					return err
				}
				v["start_time"] = t.Format(report.ReportTimeFormat)
			}
			if et, ok := v["end_time"].(string); ok {
				t, err := time.Parse(time.RFC3339, et)
				if err != nil {
					return err
				}
				v["end_time"] = t.Format(report.ReportTimeFormat)
			}
			r, err := report.NewFromJSON(nodeName, v)
			if err != nil {
				return err
			}
			gerr := r.Save()
			if gerr != nil {
				return gerr
			}
			v["action"] = "end"
			if err := r.UpdateFromJSON(v); err != nil {
				return err
			}
			gerr = r.Save()
			if gerr != nil {
				return gerr
			}
		}

		if exportedData.MinorVersion == 1 {
			// import shovey jobs, run, and streams, and node
			// statuses
			logger.Infof("Loading node statuses...")
			for _, v := range exportedData.Data["node_status"] {
				ns := v.(map[string]interface{})
				err := node.ImportStatus(ns)
				if err != nil {
					return err
				}
			}
			logger.Infof("Loading shoveys...")
			for _, v := range exportedData.Data["shovey"] {
				s := v.(map[string]interface{})
				err := shovey.ImportShovey(s)
				if err != nil {
					return err
				}
			}
			logger.Infof("Loading shovey runs...")
			for _, v := range exportedData.Data["shovey_run"] {
				s := v.(map[string]interface{})
				err := shovey.ImportShoveyRun(s)
				if err != nil {
					return err
				}

			}
			logger.Infof("Loading shovey run streams...")
			for _, v := range exportedData.Data["shovey_run_stream"] {
				s := v.(map[string]interface{})
				err := shovey.ImportShoveyRunStream(s)
				if err != nil {
					return err
				}
			}
		}

	} else {
		err := fmt.Errorf("goiardi export data version %d.%d is not supported by this version of goiardi", exportedData.MajorVersion, exportedData.MinorVersion)
		return err
	}
	return nil
}
Exemple #11
0
// CancelRuns cancels the shovey runs given in the slice of strings with the
// node names to cancel jobs on.
func (s *Shovey) CancelRuns(nodeNames []string) util.Gerror {
	if config.UsingDB() {
		err := s.cancelRunsSQL()
		if err != nil {
			return err
		}
	} else {
		for _, n := range nodeNames {
			sr, err := s.GetRun(n)
			if err != nil {
				return err
			}
			if sr.Status != "invalid" && sr.Status != "succeeded" && sr.Status != "failed" && sr.Status != "down" && sr.Status != "nacked" {
				sr.EndTime = time.Now()
				sr.Status = "cancelled"
				err = sr.save()
				if err != nil {
					return err
				}
			}
		}
	}
	if len(nodeNames) == len(s.NodeNames) {
		sort.Strings(nodeNames)
		sort.Strings(s.NodeNames)
		if reflect.DeepEqual(nodeNames, s.NodeNames) {
			s.Status = "cancelled"
			s.save()
		}
	} else {
		s.checkCompleted()
	}

	payload := make(map[string]string)
	payload["action"] = "cancel"
	payload["run_id"] = s.RunID
	payload["time"] = time.Now().Format(time.RFC3339)
	sig, serr := s.signRequest(payload)
	if serr != nil {
		return util.CastErr(serr)
	}
	payload["signature"] = sig
	jsonPayload, _ := json.Marshal(payload)
	ackCh := make(chan string, len(nodeNames))
	q := &serfclient.QueryParam{Name: "shovey", Payload: jsonPayload, FilterNodes: nodeNames, RequestAck: true, AckCh: ackCh}
	err := serfin.Serfer.Query(q)
	if err != nil {
		return util.CastErr(err)
	}
	doneCh := make(chan struct{})
	go func() {
		for c := range ackCh {
			logger.Debugf("Received acknowledgement from %s", c)
		}
		doneCh <- struct{}{}
	}()
	select {
	case <-doneCh:
		logger.Infof("All nodes acknowledged cancellation")
		// probably do a report here?
	case <-time.After(time.Duration(60) * time.Second):
		logger.Errorf("Didn't get all acknowledgements within 60 seconds")
	}

	return nil
}
Exemple #12
0
// ParseConfigOptions reads and applies arguments from the command line and the
// configuration file, merging them together as needed, with command line options
// taking precedence over options in the config file.
func ParseConfigOptions() error {
	var opts = &Options{}
	_, err := flags.Parse(opts)

	if err != nil {
		if err.(*flags.Error).Type == flags.ErrHelp {
			os.Exit(0)
		} else {
			log.Println(err)
			os.Exit(1)
		}
	}

	if opts.Version {
		fmt.Printf("goiardi version %s (aiming for compatibility with Chef Server version %s).\n", Version, ChefVersion)
		os.Exit(0)
	}

	/* Load the config file. Command-line options have precedence over
	 * config file options. */
	if opts.ConfFile != "" {
		if _, err := toml.DecodeFile(opts.ConfFile, Config); err != nil {
			log.Println(err)
			os.Exit(1)
		}
		Config.ConfFile = opts.ConfFile
		Config.FreezeData = false
	}

	if opts.Export != "" && opts.Import != "" {
		log.Println("Cannot use -x/--export and -m/--import flags together.")
		os.Exit(1)
	}

	if opts.Export != "" {
		Config.DoExport = true
		Config.ImpExFile = opts.Export
	} else if opts.Import != "" {
		Config.DoImport = true
		Config.ImpExFile = opts.Import
	}

	if opts.Hostname != "" {
		Config.Hostname = opts.Hostname
	} else {
		if Config.Hostname == "" {
			Config.Hostname, err = os.Hostname()
			if err != nil {
				log.Println(err)
				Config.Hostname = "localhost"
			}
		}
	}

	if opts.DataStoreFile != "" {
		Config.DataStoreFile = opts.DataStoreFile
	}

	if opts.IndexFile != "" {
		Config.IndexFile = opts.IndexFile
	}

	// Use MySQL?
	if opts.UseMySQL {
		Config.UseMySQL = opts.UseMySQL
	}

	// Use Postgres?
	if opts.UsePostgreSQL {
		Config.UsePostgreSQL = opts.UsePostgreSQL
	}

	if Config.UseMySQL && Config.UsePostgreSQL {
		err := fmt.Errorf("The MySQL and Postgres options cannot be used together.")
		log.Println(err)
		os.Exit(1)
	}

	// Use Postgres search?
	if opts.PgSearch {
		// make sure postgres is enabled
		if !Config.UsePostgreSQL {
			err := fmt.Errorf("--pg-search requires --use-postgresql (which makes sense, really).")
			log.Println(err)
			os.Exit(1)
		}
		Config.PgSearch = opts.PgSearch
	}

	if Config.DataStoreFile != "" && (Config.UseMySQL || Config.UsePostgreSQL) {
		err := fmt.Errorf("The MySQL or Postgres and data store options may not be specified together.")
		log.Println(err)
		os.Exit(1)
	}

	if !((Config.DataStoreFile == "" && Config.IndexFile == "") || ((Config.DataStoreFile != "" || (Config.UseMySQL || Config.UsePostgreSQL)) && Config.IndexFile != "")) {
		err := fmt.Errorf("-i and -D must either both be specified, or not specified")
		log.Println(err)
		os.Exit(1)
	}

	if (Config.UseMySQL || Config.UsePostgreSQL) && (Config.IndexFile == "" && !Config.PgSearch) {
		err := fmt.Errorf("An index file must be specified with -i or --index-file (or the 'index-file' config file option) when running with a MySQL or PostgreSQL backend.")
		log.Println(err)
		os.Exit(1)
	}

	if Config.IndexFile != "" && (Config.DataStoreFile != "" || (Config.UseMySQL || Config.UsePostgreSQL)) {
		Config.FreezeData = true
	}

	if opts.LogFile != "" {
		Config.LogFile = opts.LogFile
	}
	if opts.SysLog {
		Config.SysLog = opts.SysLog
	}
	if Config.LogFile != "" {
		lfp, lerr := os.Create(Config.LogFile)
		if lerr != nil {
			log.Println(err)
			os.Exit(1)
		}
		log.SetOutput(lfp)
	}
	if dlev := len(opts.Verbose); dlev != 0 {
		Config.DebugLevel = dlev
	}
	if Config.LogLevel != "" {
		if lev, ok := LogLevelNames[strings.ToLower(Config.LogLevel)]; ok && Config.DebugLevel == 0 {
			Config.DebugLevel = lev
		}
	}
	if Config.DebugLevel > 4 {
		Config.DebugLevel = 4
	}

	Config.DebugLevel = int(logger.LevelCritical) - Config.DebugLevel
	logger.SetLevel(logger.LogLevel(Config.DebugLevel))
	debugLevel := map[int]string{0: "debug", 1: "info", 2: "warning", 3: "error", 4: "critical"}
	log.Printf("Logging at %s level", debugLevel[Config.DebugLevel])
	if Config.SysLog {
		sl, err := logger.NewSysLogger("goiardi")
		if err != nil {
			log.Println(err.Error())
			os.Exit(1)
		}
		logger.SetLogger(sl)
	} else {
		logger.SetLogger(logger.NewGoLogger())
	}

	/* Database options */

	// Don't bother setting a default mysql port if mysql isn't used
	if Config.UseMySQL {
		if Config.MySQL.Port == "" {
			Config.MySQL.Port = "3306"
		}
	}

	// set default Postgres options
	if Config.UsePostgreSQL {
		if Config.PostgreSQL.Port == "" {
			Config.PostgreSQL.Port = "5432"
		}
	}

	if opts.LocalFstoreDir != "" {
		Config.LocalFstoreDir = opts.LocalFstoreDir
	}
	if Config.LocalFstoreDir == "" && (Config.UseMySQL || Config.UsePostgreSQL) {
		logger.Criticalf("local-filestore-dir must be set when running goiardi in SQL mode")
		os.Exit(1)
	}
	if Config.LocalFstoreDir != "" {
		finfo, ferr := os.Stat(Config.LocalFstoreDir)
		if ferr != nil {
			logger.Criticalf("Error checking local filestore dir: %s", ferr.Error())
			os.Exit(1)
		}
		if !finfo.IsDir() {
			logger.Criticalf("Local filestore dir %s is not a directory", Config.LocalFstoreDir)
			os.Exit(1)
		}
	}

	if !Config.FreezeData && (opts.FreezeInterval != 0 || Config.FreezeInterval != 0) {
		logger.Warningf("FYI, setting the freeze data interval's not especially useful without setting the index and data files.")
	}
	if opts.FreezeInterval != 0 {
		Config.FreezeInterval = opts.FreezeInterval
	}
	if Config.FreezeInterval == 0 {
		Config.FreezeInterval = 10
	}

	/* Root directory for certs and the like */
	if opts.ConfRoot != "" {
		Config.ConfRoot = opts.ConfRoot
	}

	if Config.ConfRoot == "" {
		if Config.ConfFile != "" {
			Config.ConfRoot = path.Dir(Config.ConfFile)
		} else {
			Config.ConfRoot = "."
		}
	}

	Config.Ipaddress = opts.Ipaddress
	if Config.Ipaddress != "" {
		ip := net.ParseIP(Config.Ipaddress)
		if ip == nil {
			logger.Criticalf("IP address '%s' is not valid", Config.Ipaddress)
			os.Exit(1)
		}
	}

	if opts.Port != 0 {
		Config.Port = opts.Port
	}
	if Config.Port == 0 {
		Config.Port = 4545
	}

	if opts.UseSSL {
		Config.UseSSL = opts.UseSSL
	}
	if opts.SSLCert != "" {
		Config.SSLCert = opts.SSLCert
	}
	if opts.SSLKey != "" {
		Config.SSLKey = opts.SSLKey
	}
	if opts.HTTPSUrls {
		Config.HTTPSUrls = opts.HTTPSUrls
	}
	// SSL setup
	if Config.Port == 80 {
		Config.UseSSL = false
	} else if Config.Port == 443 {
		Config.UseSSL = true
	}
	if Config.UseSSL {
		if Config.SSLCert == "" || Config.SSLKey == "" {
			logger.Criticalf("SSL mode requires specifying both a certificate and a key file.")
			os.Exit(1)
		}
		/* If the SSL cert and key are not absolute files, join them
		 * with the conf root */
		if !path.IsAbs(Config.SSLCert) {
			Config.SSLCert = path.Join(Config.ConfRoot, Config.SSLCert)
		}
		if !path.IsAbs(Config.SSLKey) {
			Config.SSLKey = path.Join(Config.ConfRoot, Config.SSLKey)
		}
	}

	if opts.TimeSlew != "" {
		Config.TimeSlew = opts.TimeSlew
	}
	if Config.TimeSlew != "" {
		d, derr := time.ParseDuration(Config.TimeSlew)
		if derr != nil {
			logger.Criticalf("Error parsing time-slew: %s", derr.Error())
			os.Exit(1)
		}
		Config.TimeSlewDur = d
	} else {
		Config.TimeSlewDur, _ = time.ParseDuration("15m")
	}

	if opts.UseAuth {
		Config.UseAuth = opts.UseAuth
	}

	if opts.DisableWebUI {
		Config.DisableWebUI = opts.DisableWebUI
	}

	if opts.LogEvents {
		Config.LogEvents = opts.LogEvents
	}

	if opts.LogEventKeep != 0 {
		Config.LogEventKeep = opts.LogEventKeep
	}

	// Set max sizes for objects and json requests.
	if opts.ObjMaxSize != 0 {
		Config.ObjMaxSize = opts.ObjMaxSize
	}
	if opts.JSONReqMaxSize != 0 {
		Config.JSONReqMaxSize = opts.JSONReqMaxSize
	}
	if Config.ObjMaxSize == 0 {
		Config.ObjMaxSize = 10485760
	}
	if Config.JSONReqMaxSize == 0 {
		Config.JSONReqMaxSize = 1000000
	}

	if opts.UseUnsafeMemStore {
		Config.UseUnsafeMemStore = opts.UseUnsafeMemStore
	}

	if opts.DbPoolSize != 0 {
		Config.DbPoolSize = opts.DbPoolSize
	}
	if opts.MaxConn != 0 {
		Config.MaxConn = opts.MaxConn
	}
	if !UsingDB() {
		if Config.DbPoolSize != 0 {
			logger.Infof("db-pool-size is set to %d, which is not particularly useful if you are not using one of the SQL databases.", Config.DbPoolSize)
		}
		if Config.MaxConn != 0 {
			logger.Infof("max-connections is set to %d, which is not particularly useful if you are not using one of the SQL databases.", Config.MaxConn)
		}
	}
	if opts.UseSerf {
		Config.UseSerf = opts.UseSerf
	}
	if Config.UseSerf {
		if opts.SerfAddr != "" {
			Config.SerfAddr = opts.SerfAddr
		}
		if Config.SerfAddr == "" {
			Config.SerfAddr = "127.0.0.1:7373"
		}
	}
	if opts.SerfEventAnnounce {
		Config.SerfEventAnnounce = opts.SerfEventAnnounce
	}
	if Config.SerfEventAnnounce && !Config.UseSerf {
		logger.Criticalf("--serf-event-announce requires --use-serf")
		os.Exit(1)
	}

	if opts.UseShovey {
		if !Config.UseSerf {
			logger.Criticalf("--use-shovey requires --use-serf to be enabled")
			os.Exit(1)
		}
		Config.UseShovey = opts.UseShovey
	}

	// shovey signing key stuff
	if opts.SignPrivKey != "" {
		Config.SignPrivKey = opts.SignPrivKey
	}

	// if using shovey, open the existing, or create if absent, signing
	// keys.
	if Config.UseShovey {
		if Config.SignPrivKey == "" {
			Config.SignPrivKey = path.Join(Config.ConfRoot, "shovey-sign_rsa")
		} else if !path.IsAbs(Config.SignPrivKey) {
			Config.SignPrivKey = path.Join(Config.ConfRoot, Config.SignPrivKey)
		}
		privfp, err := os.Open(Config.SignPrivKey)
		if err != nil {
			logger.Criticalf("Private key %s for signing shovey requests not found. Please create a set of RSA keys for this purpose.", Config.SignPrivKey)
			os.Exit(1)
		}
		privPem, err := ioutil.ReadAll(privfp)
		if err != nil {
			logger.Criticalf(err.Error())
			os.Exit(1)
		}
		privBlock, _ := pem.Decode(privPem)
		if privBlock == nil {
			logger.Criticalf("Invalid block size for private key for shovey")
			os.Exit(1)
		}
		privKey, err := x509.ParsePKCS1PrivateKey(privBlock.Bytes)
		if err != nil {
			logger.Criticalf(err.Error())
			os.Exit(1)
		}
		Key.Lock()
		defer Key.Unlock()
		Key.PrivKey = privKey
	}

	if opts.DotSearch {
		Config.DotSearch = opts.DotSearch
	} else if Config.PgSearch {
		Config.DotSearch = true
	}
	if Config.DotSearch {
		if opts.ConvertSearch {
			Config.ConvertSearch = opts.ConvertSearch
		}
	}
	if Config.IndexFile != "" && Config.PgSearch {
		logger.Infof("Specifying an index file for search while using the postgres search isn't useful.")
	}

	return nil
}