Exemple #1
0
// TestFiltering tests the filtering of the logging.
func TestFiltering(t *testing.T) {
	assert := audit.NewTestingAssertion(t, true)

	ownLogger := &testLogger{}
	logger.SetLogger(ownLogger)
	logger.SetLevel(logger.LevelDebug)
	logger.SetFilter(func(level logger.LogLevel, info, msg string) bool {
		return level >= logger.LevelWarning && level <= logger.LevelError
	})

	logger.Debugf("Debug.")
	logger.Infof("Info.")
	logger.Warningf("Warning.")
	logger.Errorf("Error.")
	logger.Criticalf("Critical.")
	assert.Length(ownLogger.logs, 3)

	logger.UnsetFilter()

	ownLogger = &testLogger{}
	logger.SetLogger(ownLogger)
	logger.Debugf("Debug.")
	logger.Infof("Info.")
	logger.Warningf("Warning.")
	logger.Errorf("Error.")
	logger.Criticalf("Critical.")
	assert.Length(ownLogger.logs, 5)
}
Exemple #2
0
func main() {
	config := cachet.Config
	// TODO support log path
	logger.SetLogger(logger.NewTimeformatLogger(os.Stderr, "2006-01-02 15:04:05"))
	logger.SetLevel(logger.LevelDebug)

	logger.Infof("System: %s, API: %s", config.SystemName, config.APIUrl)
	logger.Infof("Starting %d monitors", len(config.MonitorConfigs))

	// initialize monitors
	var allMonitors []*cachet.Monitor
	for _, monconf := range config.MonitorConfigs {
		err, mon := cachet.NewMonitor(&monconf)
		if err == nil {
			err = cachet.SyncMonitor(mon)
			if err != nil {
				logger.Errorf("%v", err)
			}
			allMonitors = append(allMonitors, mon)
		} else {
			logger.Errorf("Parsing monitor error, skipping: %v", err)
		}
	}

	ticker := time.NewTicker(time.Second * time.Duration(config.CheckInterval))
	for range ticker.C {
		for _, m := range allMonitors {
			go m.Check()
		}
	}
}
Exemple #3
0
// logError finally checks logging and formatting before logging an error.
func (d *defaulter) logError(format string, err error) {
	if !d.log {
		return
	}
	format += ": %v"
	if len(d.id) > 0 {
		logger.Infof("(%s) "+format, d.id, err)
	} else {
		logger.Infof(format, err)
	}
}
Exemple #4
0
func (th *TestHandler) Get(ctx web.Context) (bool, error) {
	data := TestRequestData{ctx.Domain(), ctx.Resource(), ctx.ResourceID()}
	switch {
	case ctx.AcceptsContentType(web.ContentTypeXML):
		logger.Infof("get XML")
		ctx.WriteXML(data)
	case ctx.AcceptsContentType(web.ContentTypeJSON):
		logger.Infof("get JSON")
		ctx.WriteJSON(data, true)
	default:
		logger.Infof("get HTML")
		ctx.RenderTemplate("test:context:html", data)
	}
	return true, nil
}
Exemple #5
0
func (i *FileIndex) Save() error {
	i.m.RLock()
	defer i.m.RUnlock()
	idxFile := i.file
	if idxFile == "" {
		err := fmt.Errorf("Yikes! Cannot save index to disk because no file was specified.")
		return err
	}
	if !i.updated {
		return nil
	}
	logger.Infof("Index has changed, saving to disk")
	fp, err := ioutil.TempFile(path.Dir(idxFile), "idx-build")
	if err != nil {
		return err
	}
	zfp := zlib.NewWriter(fp)

	i.updated = false
	enc := gob.NewEncoder(zfp)
	err = enc.Encode(i)
	zfp.Close()
	if err != nil {
		fp.Close()
		return err
	}
	err = fp.Close()
	if err != nil {
		return err
	}
	return os.Rename(fp.Name(), idxFile)
}
Exemple #6
0
func startNodeMonitor() {
	// Never do this if serf isn't set up
	if !config.Config.UseSerf {
		return
	}
	go func() {
		// wait 1 minute before starting to check for nodes being up
		time.Sleep(1 * time.Minute)
		ticker := time.NewTicker(time.Minute)
		for _ = range ticker.C {
			unseen, err := node.UnseenNodes()
			if err != nil {
				logger.Errorf(err.Error())
				continue
			}
			for _, n := range unseen {
				logger.Infof("Haven't seen %s for a while, marking as down", n.Name)
				err = n.UpdateStatus("down")
				if err != nil {
					logger.Errorf(err.Error())
					continue
				}
			}
		}
	}()
	return
}
Exemple #7
0
// logCommand logs a command and its execution status.
func logCommand(cmd string, args []interface{}, err error, log bool) {
	// Format the command for the log entry.
	formatArgs := func() string {
		if args == nil || len(args) == 0 {
			return "(none)"
		}
		output := make([]string, len(args))
		for i, arg := range args {
			output[i] = string(valueToBytes(arg))
		}
		return strings.Join(output, " / ")
	}
	logOutput := func() string {
		format := "CMD %s ARGS %s %s"
		if err == nil {
			return fmt.Sprintf(format, cmd, formatArgs(), "OK")
		}
		return fmt.Sprintf(format, cmd, formatArgs(), "ERROR "+err.Error())
	}
	// Log positive commands only if wanted, errors always.
	if err != nil {
		if errors.IsError(err, ErrServerResponse) || errors.IsError(err, ErrTimeout) {
			return
		}
		logger.Errorf(logOutput())
	} else if log {
		logger.Infof(logOutput())
	}
}
Exemple #8
0
func (ah *AuthHandler) Get(ctx web.Context) (bool, error) {
	logger.Infof("scene ID: %s", ctx.Scene().ID())
	password, err := ctx.Scene().Fetch("password")
	if err == nil {
		logger.Infof("scene is logged in")
		return true, nil
	}
	password = ctx.Request().Header.Get("password")
	if password != ah.password {
		ctx.Redirect("authentication", "login", "")
		return false, nil
	}
	logger.Infof("logging scene in")
	ctx.Scene().Store("password", password)
	return true, nil
}
Exemple #9
0
// ProcessEvent reads, validates and emits a configuration.
func (b *configuratorBehavior) ProcessEvent(event cells.Event) error {
	switch event.Topic() {
	case ReadConfigurationTopic:
		// Read configuration
		filename, ok := event.Payload().GetString(ConfigurationFilenamePayload)
		if !ok {
			logger.Errorf("cannot read configuration without filename payload")
			return nil
		}
		logger.Infof("reading configuration from %q", filename)
		cfg, err := etc.ReadFile(filename)
		if err != nil {
			return errors.Annotate(err, ErrCannotReadConfiguration, errorMessages)
		}
		// If wanted then validate it.
		if b.validate != nil {
			err = b.validate(cfg)
			if err != nil {
				return errors.Annotate(err, ErrCannotValidateConfiguration, errorMessages)
			}
		}
		// All done, emit it.
		pvs := cells.PayloadValues{
			ConfigurationPayload: cfg,
		}
		b.cell.EmitNewContext(ConfigurationTopic, pvs, event.Context())
	}
	return nil
}
Exemple #10
0
// Stop implements the Environment interface.
func (env *environment) Stop() error {
	runtime.SetFinalizer(env, nil)
	if err := env.cells.stop(); err != nil {
		return err
	}
	logger.Infof("cells environment %q terminated", env.ID())
	return nil
}
Exemple #11
0
// stop terminates the cell.
func (c *cell) stop() error {
	c.emitTimeoutTicker.Stop()
	err := c.loop.Stop()
	if err != nil {
		logger.Errorf("cell %q terminated with error: %v", c.id, err)
	} else {
		logger.Infof("cell %q terminated", c.id)
	}
	return err
}
Exemple #12
0
func jsonErrorReport(w http.ResponseWriter, r *http.Request, errorStr string, status int) {
	logger.Infof(errorStr)
	jsonError := map[string][]string{"error": []string{errorStr}}
	w.WriteHeader(status)
	enc := json.NewEncoder(w)
	if err := enc.Encode(&jsonError); err != nil {
		logger.Errorf(err.Error())
	}
	return
}
Exemple #13
0
// newCell create a new cell around a behavior.
func newCell(env *environment, id string, behavior Behavior) (*cell, error) {
	logger.Infof("cell '%s' starts", id)
	// Init cell runtime.
	c := &cell{
		env:               env,
		id:                id,
		measuringID:       identifier.Identifier("cells", env.id, "cell", id),
		behavior:          behavior,
		emitters:          newConnections(),
		subscribers:       newConnections(),
		emitTimeoutTicker: time.NewTicker(5 * time.Second),
	}
	// Set configuration.
	if bebs, ok := behavior.(BehaviorEventBufferSize); ok {
		size := bebs.EventBufferSize()
		if size < minEventBufferSize {
			size = minEventBufferSize
		}
		c.eventc = make(chan Event, size)
	} else {
		c.eventc = make(chan Event, minEventBufferSize)
	}
	if brf, ok := behavior.(BehaviorRecoveringFrequency); ok {
		number, duration := brf.RecoveringFrequency()
		if duration.Seconds()/float64(number) < 0.1 {
			number = minRecoveringNumber
			duration = minRecoveringDuration
		}
		c.recoveringNumber = number
		c.recoveringDuration = duration
	} else {
		c.recoveringNumber = minRecoveringNumber
		c.recoveringDuration = minRecoveringDuration
	}
	if bet, ok := behavior.(BehaviorEmitTimeout); ok {
		timeout := bet.EmitTimeout()
		switch {
		case timeout < minEmitTimeout:
			timeout = minEmitTimeout
		case timeout > maxEmitTimeout:
			timeout = maxEmitTimeout
		}
		c.emitTimeout = int(timeout.Seconds() / 5)
	} else {
		c.emitTimeout = int(maxEmitTimeout.Seconds() / 5)
	}
	// Init behavior.
	if err := behavior.Init(c); err != nil {
		return nil, errors.Annotate(err, ErrCellInit, errorMessages, id)
	}
	// Start backend.
	c.loop = loop.GoRecoverable(c.backendLoop, c.checkRecovering, id)
	return c, nil
}
Exemple #14
0
// Test logging with the go logger.
func TestGoLogger(t *testing.T) {
	log.SetOutput(os.Stdout)

	logger.SetLevel(logger.LevelDebug)
	logger.SetLogger(logger.NewGoLogger())

	logger.Debugf("Debug.")
	logger.Infof("Info.")
	logger.Warningf("Warning.")
	logger.Errorf("Error.")
	logger.Criticalf("Critical.")
}
Exemple #15
0
// Save freezes and saves the data store to disk.
func (ds *DataStore) Save(dsFile string) error {
	if !ds.updated {
		return nil
	}
	logger.Infof("Data has changed, saving data store to disk")
	if dsFile == "" {
		err := fmt.Errorf("Yikes! Cannot save data store to disk because no file was specified.")
		return err
	}
	fp, err := ioutil.TempFile(path.Dir(dsFile), "ds-store")
	if err != nil {
		return err
	}
	zfp := zlib.NewWriter(fp)

	fstore := new(dsFileStore)
	dscache := new(bytes.Buffer)
	objList := new(bytes.Buffer)
	ds.m.RLock()
	defer ds.m.RUnlock()
	ds.updated = false

	err = ds.dsc.Save(dscache)
	if err != nil {
		fp.Close()
		return err
	}
	enc := gob.NewEncoder(objList)
	defer func() {
		if x := recover(); x != nil {
			err = fmt.Errorf("Something went wrong encoding the data store with Gob")
		}
	}()
	err = enc.Encode(ds.objList)
	if err != nil {
		fp.Close()
		return err
	}
	fstore.Cache = dscache.Bytes()
	fstore.ObjList = objList.Bytes()
	enc = gob.NewEncoder(zfp)
	err = enc.Encode(fstore)
	zfp.Close()
	if err != nil {
		fp.Close()
		return err
	}
	err = fp.Close()
	if err != nil {
		return err
	}
	return os.Rename(fp.Name(), dsFile)
}
Exemple #16
0
// Test log level filtering.
func TestLogLevelFiltering(t *testing.T) {
	assert := audit.NewTestingAssertion(t, true)

	ownLogger := &testLogger{}
	logger.SetLogger(ownLogger)
	logger.SetLevel(logger.LevelDebug)
	logger.Debugf("Debug.")
	logger.Infof("Info.")
	logger.Warningf("Warning.")
	logger.Errorf("Error.")
	logger.Criticalf("Critical.")
	assert.Length(ownLogger.logs, 5)

	ownLogger = &testLogger{}
	logger.SetLogger(ownLogger)
	logger.SetLevel(logger.LevelError)
	logger.Debugf("Debug.")
	logger.Infof("Info.")
	logger.Warningf("Warning.")
	logger.Errorf("Error.")
	logger.Criticalf("Critical.")
	assert.Length(ownLogger.logs, 2)
}
Exemple #17
0
func handleSignals() {
	c := make(chan os.Signal, 1)
	// SIGTERM is not exactly portable, but Go has a fake signal for it
	// with Windows so it being there should theoretically not break it
	// running on windows
	signal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGHUP)

	// if we receive a SIGINT or SIGTERM, do cleanup here.
	go func() {
		for sig := range c {
			if sig == os.Interrupt || sig == syscall.SIGTERM {
				logger.Infof("cleaning up...")
				if config.Config.FreezeData {
					if config.Config.DataStoreFile != "" {
						ds := datastore.New()
						if err := ds.Save(config.Config.DataStoreFile); err != nil {
							logger.Errorf(err.Error())
						}
					}
					if err := indexer.SaveIndex(); err != nil {
						logger.Errorf(err.Error())
					}
				}
				if config.UsingDB() {
					datastore.Dbh.Close()
				}
				if config.Config.UseSerf {
					serfin.Serfer.Close()
				}
				os.Exit(0)
			} else if sig == syscall.SIGHUP {
				logger.Infof("Reloading configuration...")
				config.ParseConfigOptions()
			}
		}
	}()
}
Exemple #18
0
// NewEnvironment creates a new environment.
func NewEnvironment(idParts ...interface{}) Environment {
	var id string
	if len(idParts) == 0 {
		id = identifier.NewUUID().String()
	} else {
		id = identifier.Identifier(idParts...)
	}
	env := &environment{
		id:    id,
		cells: newRegistry(),
	}
	runtime.SetFinalizer(env, (*environment).Stop)
	logger.Infof("cells environment %q started", env.ID())
	return env
}
Exemple #19
0
// Test logging with the syslogger.
func TestSysLogger(t *testing.T) {
	assert := audit.NewTestingAssertion(t, true)

	logger.SetLevel(logger.LevelDebug)

	sl, err := logger.NewSysLogger("GOAS")
	assert.Nil(err)
	logger.SetLogger(sl)

	logger.Debugf("Debug.")
	logger.Infof("Info.")
	logger.Warningf("Warning.")
	logger.Errorf("Error.")
	logger.Criticalf("Critical.")
}
Exemple #20
0
// New creates a new sandbox, given a map of null values with file checksums as
// keys.
func New(checksumHash map[string]interface{}) (*Sandbox, error) {
	/* For some reason the checksums come in a JSON hash that looks like
	 * this:
	 * { "checksums": {
	 * "385ea5490c86570c7de71070bce9384a":null,
	 * "f6f73175e979bd90af6184ec277f760c":null,
	 * "2e03dd7e5b2e6c8eab1cf41ac61396d5":null
	 * } } --- per the chef server api docs. Not sure why it comes in that
	 * way rather than as an array, since those nulls are apparently never
	 * anything but nulls. */

	/* First generate an id for this sandbox. Collisions are certainly
	 * possible, so we'll give it five tries to make a unique one before
	 * bailing. This may later turn out not to be the ideal sandbox creation
	 * method, but we'll see. */
	var sandboxID string
	var err error
	for i := 0; i < 5; i++ {
		sandboxID, err = generateSandboxID()
		if err != nil {
			/* Something went very wrong. */
			return nil, err
		}
		if s, _ := Get(sandboxID); s != nil {
			logger.Infof("Collision! Somehow %s already existed as a sandbox id on attempt %d. Trying again.", sandboxID, i)
			sandboxID = ""
		}
	}

	if sandboxID == "" {
		err = fmt.Errorf("Somehow every attempt to create a unique sandbox id failed. Bailing.")
		return nil, err
	}
	checksums := make([]string, len(checksumHash))
	j := 0
	for k := range checksumHash {
		checksums[j] = k
		j++
	}

	sbox := &Sandbox{
		ID:           sandboxID,
		CreationTime: time.Now(),
		Completed:    false,
		Checksums:    checksums,
	}
	return sbox, nil
}
Exemple #21
0
// handle handles a request.
func (m *mapping) handle(ctx *context) error {
	// Find handler.
	location := m.location(ctx.Domain(), ctx.Resource())
	hl, ok := m.handlers[location]
	if !ok {
		defaultLocation := m.location(ctx.DefaultDomain(), ctx.DefaultResource())
		hl, ok = m.handlers[defaultLocation]
		if !ok {
			return errors.New(ErrNoHandler, errorMessages, location, defaultLocation)
		}
		location = defaultLocation
	}
	// Dispatch by method.
	logger.Infof("handling %s", ctx)
	return hl.handle(ctx)
}
Exemple #22
0
// checkRecovering checks if the cell may recover after a panic. It will
// signal an error and let the cell stop working if there have been 12 recoverings
// during the last minute or the behaviors Recover() signals, that it cannot
// handle the error.
func (c *cell) checkRecovering(rs loop.Recoverings) (loop.Recoverings, error) {
	logger.Warningf("recovering cell %q after error: %v", c.id, rs.Last().Reason)
	// Check frequency.
	if rs.Frequency(c.recoveringNumber, c.recoveringDuration) {
		err := errors.New(ErrRecoveredTooOften, errorMessages, rs.Last().Reason)
		logger.Errorf("recovering frequency of cell %q too high", c.id)
		return nil, err
	}
	// Try to recover.
	if err := c.behavior.Recover(rs.Last().Reason); err != nil {
		err := errors.Annotate(err, ErrEventRecovering, errorMessages, rs.Last().Reason)
		logger.Errorf("recovering of cell %q failed: %v", c.id, err)
		return nil, err
	}
	logger.Infof("successfully recovered cell %q", c.id)
	return rs.Trim(c.recoveringNumber), nil
}
Exemple #23
0
// Post is specified on the PostResourceHandler interface.
func (h *FileUploadHandler) Post(ctx Context) (bool, error) {
	if err := ctx.Request().ParseMultipartForm(defaultMaxMemory); err != nil {
		return false, errors.Annotate(err, ErrUploadingFile, errorMessages)
	}
	for _, headers := range ctx.Request().MultipartForm.File {
		for _, header := range headers {
			logger.Infof("receiving file %q", header.Filename)
			// Open file and process it.
			if infile, err := header.Open(); err != nil {
				return false, errors.Annotate(err, ErrUploadingFile, errorMessages)
			} else if err := h.processor(ctx, header, infile); err != nil {
				return false, errors.Annotate(err, ErrUploadingFile, errorMessages)
			}
		}
	}
	return true, nil
}
Exemple #24
0
// stop terminates the cell.
func (c *cell) stop() error {
	// Terminate connactions to emitters and subscribers.
	c.emitters.do(func(ec *cell) error {
		ec.subscribers.remove(c.id)
		return nil
	})
	c.subscribers.do(func(sc *cell) error {
		sc.emitters.remove(c.id)
		return nil
	})
	// Stop own backend.
	c.emitTimeoutTicker.Stop()
	err := c.loop.Stop()
	if err != nil {
		logger.Errorf("cell '%s' stopped with error: %v", c.id, err)
	} else {
		logger.Infof("cell '%s' stopped", c.id)
	}
	return err
}
Exemple #25
0
// GetNodesByStatus returns the nodes that currently have the given status.
func GetNodesByStatus(nodeNames []string, status string) ([]*Node, error) {
	if config.UsingDB() {
		return getNodesByStatusSQL(nodeNames, status)
	}
	var statNodes []*Node
	nodes := make([]*Node, 0, len(nodeNames))
	for _, name := range nodeNames {
		n, _ := Get(name)
		if n != nil {
			nodes = append(nodes, n)
		}
	}
	for _, n := range nodes {
		ns, _ := n.LatestStatus()
		if ns == nil {
			logger.Infof("No status found at all for node %s, skipping", n.Name)
			continue
		}
		if ns.Status == status {
			statNodes = append(statNodes, n)
		}
	}
	return statNodes, nil
}
Exemple #26
0
func (HTTPChecker *HTTPChecker) doRequest() error {
	client := &http.Client{
		Timeout: timeout,
	}
	if HTTPChecker.Parameters.Strict_tls == false {
		client.Transport = &http.Transport{
			TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
		}
	}

	var err error
	var resp *http.Response
	var req *http.Request
	if HTTPChecker.Parameters.URL != "" {
		resp, err = client.Get(HTTPChecker.Parameters.URL)
	} else {
		ip := net.ParseIP(HTTPChecker.Parameters.IP)
		if ip == nil {
			return fmt.Errorf("Cannot parse IP: %s", HTTPChecker.Parameters.IP)
		}
		if !(HTTPChecker.Parameters.Scheme == "http" || HTTPChecker.Parameters.Scheme == "https") {
			return fmt.Errorf("Scheme must be http or https")
		}
		if HTTPChecker.Parameters.Port > 65535 || HTTPChecker.Parameters.Port < 1 {
			return fmt.Errorf("Port must be 1-65535")
		}
		req, err = http.NewRequest("GET", HTTPChecker.Parameters.Scheme+"://"+HTTPChecker.Parameters.IP+":"+
			strconv.Itoa(int(HTTPChecker.Parameters.Port))+HTTPChecker.Parameters.Path, nil)
		if err != nil {
			return err
		}
		req.Host = HTTPChecker.Parameters.ServerName
		resp, err = client.Do(req)
	}

	if resp != nil {
		defer resp.Body.Close()
	}

	if err != nil {
		return err
	}

	if resp.StatusCode != HTTPChecker.Expect.Status_code {
		reason := "Unexpected response code: " + strconv.Itoa(resp.StatusCode) + ". Expected " + strconv.Itoa(HTTPChecker.Expect.Status_code)
		return errors.New(reason)
	}

	if HTTPChecker.Expect.Contain_keyword != "" {
		bodybuf := new(bytes.Buffer)
		_, err := bodybuf.ReadFrom(resp.Body)
		body_s := bodybuf.String()
		if err != nil {
			logger.Warningf("HTTPChecker: " + err.Error())
			return errors.New("HTTPChecker: " + err.Error())
		}
		if !strings.Contains(body_s, HTTPChecker.Expect.Contain_keyword) {
			logger.Infof("Response does not contain keyword: " + HTTPChecker.Expect.Contain_keyword)
			return errors.New("Response does not contain keyword: " + HTTPChecker.Expect.Contain_keyword)
		}
	}

	return nil
}
Exemple #27
0
// stop terminates the cell.
func (c *cell) stop() error {
	defer logger.Infof("cell %q terminated", c.id)
	return c.loop.Stop()
}
Exemple #28
0
// Get is specified on the GetResourceHandler interface.
func (h *FileServeHandler) Get(ctx Context) (bool, error) {
	filename := h.dir + ctx.ResourceID()
	logger.Infof("serving file %q", filename)
	http.ServeFile(ctx.ResponseWriter(), ctx.Request(), filename)
	return true, nil
}
Exemple #29
0
// ProcessEvent logs the event at info level.
func (b *loggerBehavior) ProcessEvent(event cells.Event) error {
	logger.Infof("(%s) processing event %v", b.cell.ID(), event)
	return nil
}
Exemple #30
0
func importAll(fileName string) error {
	fp, err := os.Open(fileName)
	if err != nil {
		return err
	}
	exportedData := &ExportData{}
	dec := json.NewDecoder(fp)
	if err := dec.Decode(&exportedData); err != nil {
		return err
	}

	// What versions of the exported data are supported?
	// At the moment it's only 1.0.

	if exportedData.MajorVersion == 1 && (exportedData.MinorVersion == 0 || exportedData.MinorVersion == 1) {
		logger.Infof("Importing data, version %d.%d created on %s", exportedData.MajorVersion, exportedData.MinorVersion, exportedData.CreatedTime)

		// load clients
		logger.Infof("Loading clients")
		for _, v := range exportedData.Data["client"] {
			c, err := client.NewFromJSON(v.(map[string]interface{}))
			if err != nil {
				return err
			}
			pkerr := c.SetPublicKey(v.(map[string]interface{})["public_key"])
			if pkerr != nil {
				return pkerr
			}
			gerr := c.Save()
			if gerr != nil {
				return gerr
			}
		}

		// load users
		logger.Infof("Loading users")
		for _, v := range exportedData.Data["user"] {
			pwhash, _ := v.(map[string]interface{})["password"].(string)
			v.(map[string]interface{})["password"] = ""
			u, err := user.NewFromJSON(v.(map[string]interface{}))
			if err != nil {
				return err
			}
			u.SetPasswdHash(pwhash)
			pkerr := u.SetPublicKey(v.(map[string]interface{})["public_key"])
			if pkerr != nil {
				return pkerr
			}
			gerr := u.Save()
			if gerr != nil {
				return gerr
			}
		}

		// load filestore
		logger.Infof("Loading filestore")
		for _, v := range exportedData.Data["filestore"] {
			fileData, err := base64.StdEncoding.DecodeString(v.(map[string]interface{})["Data"].(string))
			if err != nil {
				return err
			}
			fdBuf := bytes.NewBuffer(fileData)
			fdRc := ioutil.NopCloser(fdBuf)
			fs, err := filestore.New(v.(map[string]interface{})["Chksum"].(string), fdRc, int64(fdBuf.Len()))
			if err != nil {
				return err
			}
			if err = fs.Save(); err != nil {
				return err
			}
		}

		// load cookbooks
		logger.Infof("Loading cookbooks")
		for _, v := range exportedData.Data["cookbook"] {
			cb, err := cookbook.New(v.(map[string]interface{})["Name"].(string))
			if err != nil {
				return err
			}
			gerr := cb.Save()
			if gerr != nil {
				return gerr
			}
			for ver, cbvData := range v.(map[string]interface{})["Versions"].(map[string]interface{}) {
				cbvData, cerr := checkAttrs(cbvData.(map[string]interface{}))
				if cerr != nil {
					return cerr
				}
				_, cbverr := cb.NewVersion(ver, cbvData)
				if cbverr != nil {
					return cbverr
				}
			}
		}

		// load data bags
		logger.Infof("Loading data bags")
		for _, v := range exportedData.Data["data_bag"] {
			dbag, err := databag.New(v.(map[string]interface{})["Name"].(string))
			if err != nil {
				return err
			}
			gerr := dbag.Save()
			if gerr != nil {
				return gerr
			}
			for _, dbagData := range v.(map[string]interface{})["DataBagItems"].(map[string]interface{}) {
				_, dbierr := dbag.NewDBItem(dbagData.(map[string]interface{})["raw_data"].(map[string]interface{}))
				if dbierr != nil {
					return dbierr
				}
			}
			gerr = dbag.Save()
			if gerr != nil {
				return gerr
			}
		}
		// load environments
		logger.Infof("Loading environments")
		for _, v := range exportedData.Data["environment"] {
			envData, cerr := checkAttrs(v.(map[string]interface{}))
			if cerr != nil {
				return nil
			}
			if envData["name"].(string) != "_default" {
				e, err := environment.NewFromJSON(envData)
				if err != nil {
					return err
				}
				gerr := e.Save()
				if gerr != nil {
					return gerr
				}
			}
		}

		// load nodes
		logger.Infof("Loading nodes")
		for _, v := range exportedData.Data["node"] {
			nodeData, cerr := checkAttrs(v.(map[string]interface{}))
			if cerr != nil {
				return nil
			}
			n, err := node.NewFromJSON(nodeData)
			if err != nil {
				return err
			}
			gerr := n.Save()
			if gerr != nil {
				return gerr
			}
		}

		// load roles
		logger.Infof("Loading roles")
		for _, v := range exportedData.Data["role"] {
			roleData, cerr := checkAttrs(v.(map[string]interface{}))
			if cerr != nil {
				return nil
			}
			r, err := role.NewFromJSON(roleData)
			if err != nil {
				return err
			}
			gerr := r.Save()
			if gerr != nil {
				return gerr
			}
		}

		// load sandboxes
		logger.Infof("Loading sandboxes")
		for _, v := range exportedData.Data["sandbox"] {
			sbid, _ := v.(map[string]interface{})["Id"].(string)
			sbts, _ := v.(map[string]interface{})["CreationTime"].(string)
			sbcomplete, _ := v.(map[string]interface{})["Completed"].(bool)
			sbck, _ := v.(map[string]interface{})["Checksums"].([]interface{})
			sbTime, err := time.Parse(time.RFC3339, sbts)
			if err != nil {
				return err
			}
			sbChecksums := make([]string, len(sbck))
			for i, c := range sbck {
				sbChecksums[i] = c.(string)
			}
			sbox := &sandbox.Sandbox{ID: sbid, CreationTime: sbTime, Completed: sbcomplete, Checksums: sbChecksums}
			if err = sbox.Save(); err != nil {
				return err
			}
		}

		// load loginfos
		logger.Infof("Loading loginfo")
		for _, v := range exportedData.Data["loginfo"] {
			if err := loginfo.Import(v.(map[string]interface{})); err != nil {
				return err
			}
		}

		// load reports
		logger.Infof("Loading reports")
		for _, o := range exportedData.Data["report"] {
			// handle data exported from a bugged report export
			var nodeName string
			v := o.(map[string]interface{})
			if n, ok := v["node_name"]; ok {
				nodeName = n.(string)
			} else if n, ok := v["nodeName"]; ok {
				nodeName = n.(string)
			}
			v["action"] = "start"
			if st, ok := v["start_time"].(string); ok {
				t, err := time.Parse(time.RFC3339, st)
				if err != nil {
					return err
				}
				v["start_time"] = t.Format(report.ReportTimeFormat)
			}
			if et, ok := v["end_time"].(string); ok {
				t, err := time.Parse(time.RFC3339, et)
				if err != nil {
					return err
				}
				v["end_time"] = t.Format(report.ReportTimeFormat)
			}
			r, err := report.NewFromJSON(nodeName, v)
			if err != nil {
				return err
			}
			gerr := r.Save()
			if gerr != nil {
				return gerr
			}
			v["action"] = "end"
			if err := r.UpdateFromJSON(v); err != nil {
				return err
			}
			gerr = r.Save()
			if gerr != nil {
				return gerr
			}
		}

		if exportedData.MinorVersion == 1 {
			// import shovey jobs, run, and streams, and node
			// statuses
			logger.Infof("Loading node statuses...")
			for _, v := range exportedData.Data["node_status"] {
				ns := v.(map[string]interface{})
				err := node.ImportStatus(ns)
				if err != nil {
					return err
				}
			}
			logger.Infof("Loading shoveys...")
			for _, v := range exportedData.Data["shovey"] {
				s := v.(map[string]interface{})
				err := shovey.ImportShovey(s)
				if err != nil {
					return err
				}
			}
			logger.Infof("Loading shovey runs...")
			for _, v := range exportedData.Data["shovey_run"] {
				s := v.(map[string]interface{})
				err := shovey.ImportShoveyRun(s)
				if err != nil {
					return err
				}

			}
			logger.Infof("Loading shovey run streams...")
			for _, v := range exportedData.Data["shovey_run_stream"] {
				s := v.(map[string]interface{})
				err := shovey.ImportShoveyRunStream(s)
				if err != nil {
					return err
				}
			}
		}

	} else {
		err := fmt.Errorf("goiardi export data version %d.%d is not supported by this version of goiardi", exportedData.MajorVersion, exportedData.MinorVersion)
		return err
	}
	return nil
}