// Lazy file loading
func (dir *Directory) loadChildren() {
	util.P_out("Loading children")
	for key := range dir.ChildVids {
		util.P_out(key)
		if dir.IsDir[key] {
			child, err := filesystem.GetDirectory(dir.ChildVids[key], dir.Source)
			if err != nil {
				util.P_err("Error loading directory from db: ", err)
			} else {
				child.parent = dir
				child.archive = dir.isArchive()
				dir.setChild(child)
			}
		} else {
			child, err := filesystem.GetFile(dir.ChildVids[key], dir.Source)
			if err != nil {
				util.P_err("Error loading file from db: ", err)
			} else {
				child.parent = dir
				child.archive = dir.isArchive()
				dir.setChild(child)
			}
		}
	}
	dir.childrenInMemory = true
}
func flushNode(flushTime time.Time, node NamedNode, changes map[string][]byte) {
	if dir, ok := node.(*Directory); ok {
		util.P_out("Visiting node: "+dir.Name+" at version %d", dir.Vid)
		for _, node := range dir.children {
			if node != nil {
				if !node.isArchive() {
					flushNode(flushTime, node, changes)
				}
			}
		}
		// TODO: make this a dir method or another function here
		if dir.dirty {
			util.P_out("Putting " + dir.Name)
			dir.LastVid = dir.Vid
			//dir.Vid = filesystem.getNextVid()
			dir.Vid = dir.ComputeVid()
			dir.Vtime = flushTime
			dir.Source = filesystem.replInfo.Pid
			if dir.parent != nil {
				dir.parent.setChild(dir)
				if err := filesystem.database.PutDirectory(dir); err != nil {
					util.P_err("Error putting directory in db: ", err)
				}
				dir.parent.dirty = true
			} else {
				if err := filesystem.database.SetRoot(dir); err != nil {
					util.P_err("Error setting root in db: ", err)
				}
				changes[HEAD] = dir.Vid
			}
			changes[hex.EncodeToString(dir.Vid)], _ = json.Marshal(dir)
		}
	}
	if file, ok := node.(*File); ok {
		util.P_out("Visiting " + file.Name)
	}
	// TODO: make this a file method or another function here
	if file, ok := node.(*File); ok && file.dirty {
		util.P_out("Putting " + file.Name)
		file.LastVid = file.Vid
		//file.Vid = filesystem.getNextVid()
		file.Vid = file.ComputeVid()
		file.Vtime = flushTime
		file.Source = filesystem.replInfo.Pid
		file.commitChunks()
		if err := filesystem.database.PutFile(file); err != nil {
			util.P_err("Error putting file in db: ", err)
		}
		if file.parent != nil {
			file.parent.setChild(file)
			file.parent.dirty = true
		}
		changes[hex.EncodeToString(file.Vid)], _ = json.Marshal(file)
	}
}
func (fsdb *LeveldbFsDatabase) PutDirectory(dir *Directory) error {
	//key := []byte(strconv.FormatUint(dir.Vid, 10))
	//dir.updateChildVids()
	if val, jsonErr := json.Marshal(dir); jsonErr == nil {
		if dbErr := fsdb.database.Put(dir.Vid, val, nil); dbErr != nil {
			util.P_err("Error writing to the db: ", dbErr)
			return dbErr
		} else {
			dir.dirty = false
			return nil
		}
	} else {
		util.P_err("Error jsonifying direcotory: ", jsonErr)
		return jsonErr
	}
}
func (file *File) commitChunks() {
	chunker := util.DefaultChunker()
	chunks := chunker.Chunks(file.data)
	file.DataBlocks = make([][]byte, len(chunks))
	for i, chunk := range chunks {
		hasher := sha1.New()
		hasher.Write(chunk)
		dataHash := hasher.Sum(nil)
		// Academic assumption: collisions aren't a thing, so we'll assume there will
		// never be any corruption, might as well save on writes in the process.
		util.P_out(string(dataHash))
		file.DataBlocks[i] = dataHash
		if !filesystem.DbContains(dataHash) {
			if dbErr := filesystem.PutChunk(dataHash, chunk); dbErr != nil {
				util.P_err("Failed to write chunk to db: ", dbErr)
			}
		}
	}
}
func (file *File) loadChunks() {
	tmp := make([][]byte, len(file.DataBlocks))
	size := 0
	for i, chunkSha := range file.DataBlocks {
		if data, err := filesystem.GetChunk(chunkSha, file.Source); err != nil {
			util.P_err("Unable to load block", err)
			tmp[i] = []byte{} // TODO: probably shouldn't fail this silently
		} else {
			tmp[i] = data
		}
		size += len(tmp[i])
	}
	file.data = make([]byte, size)
	offset := 0
	for _, chunk := range tmp {
		copy(file.data[offset:], chunk)
		offset += len(chunk)
	}
	file.loaded = true
}
func main() {
	flag.Usage = Usage

	debugPtr := flag.Bool("debug", false, "print lots of stuff")
	newfsPtr := flag.Bool("newfs", false, "start with an empty file system")
	mtimePtr := flag.Bool("mtimeArchives", false, "use modify timestamp instead of version timestamp for archives")
	name := flag.String("name", "auto", "replica name")
	configFile := flag.String("config", "config.txt", "path to config file")
	flag.Parse()
	util.SetDebug(*debugPtr)
	myfs.UseMtime = *mtimePtr

	util.P_out("main\n")

	pid := myfs.GetOurPid(*configFile, *name)
	replicas := myfs.ReadReplicaInfo(*configFile)
	thisReplica := replicas[pid]

	if thisReplica == nil {
		util.P_err("No applicable replica")
		os.Exit(1)
	}

	if *newfsPtr {
		os.RemoveAll(thisReplica.DbPath)
	}

	if _, err := os.Stat(thisReplica.MntPoint); os.IsNotExist(err) {
		os.MkdirAll(thisReplica.MntPoint, os.ModeDir)
	}

	db, err := myfs.NewLeveldbFsDatabase(thisReplica.DbPath)
	//db := &myfs.DummyFsDb{}
	//err := error(nil)
	if err != nil {
		util.P_err("Problem loading the database: ", err)
		os.Exit(-1)
	}
	filesystem := myfs.NewFs(db, thisReplica, replicas)
	go filesystem.PeriodicFlush()

	//mountpoint := flag.Arg(0)
	mountpoint := thisReplica.MntPoint

	fuse.Unmount(mountpoint) //!!
	c, err := fuse.Mount(mountpoint)
	if err != nil {
		log.Fatal(err)
	}
	defer c.Close()

	err = fs.Serve(c, filesystem)
	if err != nil {
		log.Fatal(err)
	}

	// check if the mount process has an error to report
	<-c.Ready
	if err := c.MountError; err != nil {
		log.Fatal(err)
	}
}