// Use for sending a file to be managed by the auto manager.  The auto manager will decide if and when the file should
// be sent to the bundle manager for upload.
func (self *autoManager) addAutoFile(fs fileState, newFileName string, groups [][2]string) {
	common.Dprintln("Entering addAutoFile")
	defer common.Dprintln("Leaving addAutoFile")

	self.mutex.Lock()
	defer self.mutex.Unlock()

	self.refreshWorkingBundlesNoLock(false)

	common.Dprintf("addAutoFile working with %+v", fs)

	//Skip this file if it has been seen before.
	if fs.passOff != notSeenBefore {
		common.Dprintf("file %s, %s, %s, has been seen before...skipping.", fs.userName, fs.ruleName, fs.fullPath)
		return
	}

	af := &autoFile{fs: &fs, newFileName: newFileName, groups: groups}

	//Get the corresponding WatchRule for this fileState.
	wr := getWatchRule(fs.userName, fs.ruleName)
	if wr == nil {
		log.Printf("getWatchRule returned no watchRule for user %s and rule %s, addAutoFile will be re-attempted later.",
			fs.userName, fs.ruleName)
		return
	}

	//Bundle the file and log any error.
	err := self.bundleAutoFileNoLock(af, fs.userName, fs.ruleName, wr)
	if err != nil {
		log.Printf("bundleAutoFileNoLock failed with error %v, addAutoFile will be re-attempted later.", err)
		return
	}
}
func (self *autoManager) scheduleSubmit() {
	common.Dprintln("Entering scheduleSubmit")
	defer common.Dprintln("Leaving scheduleSubmit")

	self.mutex.Lock()
	defer self.mutex.Unlock()

	self.refreshWorkingBundlesNoLock(false)

	for _, v := range self.workingBundles {
		if v.autoSubmitBid == nil {
			continue
		}

		common.Dprintf("scheduleSubmit workingBundle %+v", *v)

		//Get the BundleMD for autoSubmitBid
		b, err := autoBm.BundleGet(v.user, *v.autoSubmitBid)
		if err != nil {
			log.Printf("BundleGet(%s, %d) failed %v", v.user, *v.autoSubmitBid, err)
			continue
		}

		//Get the amount of time since the last modification to the BundleMD
		modTime := time.Now().UnixNano() - v.autoSubmitLastTouched

		//Get the number of files in this BundleMD
		count, err := b.BundleFileCountGet()
		if err != nil {
			log.Printf("Failed to get file count for bundle id %d, error %v", v.autoSubmitBid, err)
			continue
		}

		common.Dprintf("modTime == %d, bundleModThreshold == %d", modTime, bundleModThreshold)

		//Submit only the autoSubmitBid if its activity has slowed or it has met the maximum number
		//of files threshold.
		if time.Duration(modTime) >= bundleModThreshold ||
			count >= maxNumFilesBundle {
			//Submit...
			err := b.Submit()
			if err != nil {
				log.Printf("Failed to submit bundle, error %v", err)
				continue
			}

			common.Dprintf("Bundle %d submitted!!!", v.autoSubmitBid)

			//Set the userBundles auto bundle to nil so a new one will get created on the next file add.
			self.workingBundlesNeedsRefresh = true
			v.autoSubmitBid = nil
			err = self.stateManager.db.setUserBundles(v)
			if err != nil {
				log.Printf("Failed to set userBundles %+v for, error %v", v, err)
				continue
			}
		}
	}
}
func (self *autoManager) refreshWorkingBundlesNoLock(force bool) {
	if force || self.workingBundlesNeedsRefresh {
		common.Dprintf("refreshing workingBundles")
		self.workingBundles = self.stateManager.db.getWorkingBundles()
		self.workingBundlesNeedsRefresh = false
	}
}
func (self *autoManager) runAuto() {
	go func() {
		for {
			common.Dprintf("runAuto sleeping for %v", autoTimerFreq)
			t := time.NewTimer(autoTimerFreq)
			<-t.C
			self.scheduleSubmit()
			self.monitorSubmitted()
		}
	}()
}
func (self *watcher) processPaths() {
	self.pathMutex.Lock()
	tmpPaths := self.paths
	self.pathMutex.Unlock()
	for _, path := range tmpPaths {
		pathFi, err := os.Stat(path)
		if err != nil {
			common.Dprintf("Could not stat %s, %v", path, err)
			continue
		}

		if !pathFi.IsDir() {
			self.callFound(path, path)
		} else {
			w := func(newPath string, info os.FileInfo, err error) error {
				//FIXME work around broken Go!
				//See issue http://code.google.com/p/go/issues/detail?id=3486
				if info.IsDir() && err == nil {
					f, err := os.Open(newPath)
					if err != nil {
						return filepath.SkipDir
					}
					f.Close()
				}
				if info.IsDir() && err != nil {
					common.Dprintf("%s skipped, error %v\n", newPath, err)
					return filepath.SkipDir
				} else if !info.IsDir() {
					self.callFound(path, newPath)
				}
				return nil
			}
			err = filepath.Walk(path, w)
			if err != nil {
				common.Dprintf("Walk failed %v with %v\n", path, err)
			}
		}
	}
}
func (self *stateDatabase) setFileState(fs *fileState) error {
	if fs == nil {
		return errors.New("fs is nil")
	}

	tmp := self.getFileState(fs.userName, fs.ruleName, fs.fullPath)

	self.mutex.Lock()
	defer self.mutex.Unlock()

	var sql string

	if tmp == nil { //fs is a new fileState in the database.
		//The insert statement relies on AUTOINCREMENT
		if fs.bundleId == nil || fs.bundleFileId == nil {
			sql = fmt.Sprintf(
				"INSERT INTO file_states"+
					"(id, last_modified, digest, full_path, last_seen, bundle_id, bundle_file_id, pass_off, user_name, rule_name, auto_added_to_bundle_time) "+
					"VALUES "+
					"(NULL, %d, \"%s\", \"%s\", %d, NULL, NULL, %d, \"%s\", \"%s\", %d);",
				fs.lastModified, fs.digest, fs.fullPath, fs.lastSeen, fs.passOff, fs.userName, fs.ruleName, fs.autoAddedToBundleTime)
		} else {
			sql = fmt.Sprintf(
				"INSERT INTO file_states"+
					"(id, last_modified, digest, full_path, last_seen, bundle_id, bundle_file_id, pass_off, user_name, rule_name, auto_added_to_bundle_time) "+
					"VALUES "+
					"(NULL, %d, \"%s\", \"%s\", %d, %d, %d, %d, \"%s\", \"%s\", %d);",
				fs.lastModified, fs.digest, fs.fullPath, fs.lastSeen, fs.bundleId, fs.bundleFileId, fs.passOff, fs.userName, fs.ruleName, fs.autoAddedToBundleTime)
		}
	} else { //fs is in the database, updating.
		if fs.bundleId == nil || fs.bundleFileId == nil {
			sql = fmt.Sprintf("UPDATE file_states "+
				"SET last_modified=%d, digest=\"%s\", full_path=\"%s\", last_seen=%d, pass_off=%d, user_name=\"%s\", rule_name=\"%s\", auto_added_to_bundle_time=%d "+
				"WHERE id=%d;",
				fs.lastModified, fs.digest, fs.fullPath, fs.lastSeen, fs.passOff, fs.userName, fs.ruleName, fs.autoAddedToBundleTime, fs.id)
		} else {
			sql = fmt.Sprintf("UPDATE file_states "+
				"SET last_modified=%d, digest=\"%s\", full_path=\"%s\", last_seen=%d, bundle_id=%d, bundle_file_id=%d, pass_off=%d, user_name=\"%s\", rule_name=\"%s\", auto_added_to_bundle_time=%d "+
				"WHERE id=%d;",
				fs.lastModified, fs.digest, fs.fullPath, fs.lastSeen, *fs.bundleId, *fs.bundleFileId, fs.passOff, fs.userName, fs.ruleName, fs.autoAddedToBundleTime, fs.id)
		}
	}

	common.Dprintf("%s", sql)

	return self.conn.Exec(sql)
}
func (self *stateDatabase) setUserBundles(ub *userBundles) error {
	common.Dprintln("Entering setUserBundles")
	defer common.Dprintln("Leaving setUserBundles")

	if ub == nil {
		return errors.New("ub must not be nil")
	}

	self.mutex.Lock()
	defer self.mutex.Unlock()

	var autoSql string
	if ub.autoSubmitBid == nil {
		autoSql = "NULL"
		ub.autoSubmitLastTouched = 0
	} else {
		autoSql = fmt.Sprintf("%d", *ub.autoSubmitBid)
		ub.autoSubmitLastTouched = time.Now().UnixNano()
	}

	var noAutoSql string
	if ub.noAutoSubmitBid == nil {
		noAutoSql = "NULL"
	} else {
		noAutoSql = fmt.Sprintf("%d", *ub.noAutoSubmitBid)
	}

	sql := fmt.Sprintf("INSERT OR REPLACE INTO user_bundles "+
		"(user, auto_submit_bid, no_auto_submit_bid, auto_submit_last_touched) "+
		"VALUES "+
		"(\"%s\", %s, %s, %d);",
		ub.user, autoSql, noAutoSql, ub.autoSubmitLastTouched)

	common.Dprintf("%s", sql)

	err := self.conn.Exec(sql)
	if err != nil {
		return err
	}

	return nil
}
//TODO - break this function up into more digestable bits, it's deeply nested and painful to read.
func (self *matcher) found(path string, fullpath string) {
	//TODO - remove...
	//common.Dprintf("Found path: %s fullpath: %s\n", path, fullpath)
	fileinfo, err := os.Lstat(fullpath)
	if err != nil {
		log.Printf("Failed to stat %s\n", fullpath)
		return
	}
	reversePaths := *self.reversePaths
	rlea := reversePaths[path]
	if rlea == nil {
		return
	}
	if len(rlea) < 1 {
		common.Dprintf("No reverse lookup entries found for %s", path)
	}

	//Process each ReverseLookupEntry set for path
	for _, rle := range rlea {
		if rle.Rule != nil && (rle.Prefix == "" || strings.HasPrefix(fullpath, rle.Prefix+string(os.PathSeparator)) || fullpath == rle.Prefix) {
			//Determine if the fullpath matches the exclude pattern for the corresponding WatchRule.
			excluded := false
			for i := 0; i < len(rle.Rule.ExcludePatterns); i++ {
				//FIXME This can perform better by precompiling patterns and putting them in rle. Consider doing this later.
				matched, err := regexp.Match(rle.Rule.ExcludePatterns[i], []byte(fullpath))
				if err != nil || matched == false {
					continue
				}
				excluded = true
			}
			if excluded {
				common.Dprintf("Excluding %s %s %s", rle.User, rle.Rule.Name, fullpath)
				continue
			}

			//Get the fileState for this path
			filestate, err := fsm.getFileState(rle.User, rle.Rule.Name, fullpath)
			if err != nil {
				log.Printf("Failed to get a fileState for %s, %s, %s, error %v",
					rle.User, rle.Rule.Name, fullpath, err)
				continue
			}

			//The file has changed, it should be processed (a.k.a matched)
			if checkFileChanged(fullpath, fileinfo, filestate) {
				ok, err := common.UserAccess(rle.User, fullpath)
				if err != nil {
					log.Printf("User access to %v could not be granted", err)
				}
				if ok {
					groups := make([][2]string, 0)
					for _, sg := range rle.Rule.StaticMetadata {
						groups = append(groups, [2]string{sg.Type, sg.Name})
					}

					//TODO - NGT 4/25/12 This is difficult to read.  It looks like it is both determining if the file matches
					//and generating groups.  A great candidate for refactoring into one or two functions...
					//Aha, after reading more closely it appears the groups list is being "appended" (or potentially replaced) with additional
					//type/name pairs based on groups that were passed through regular expressions in the WatchRules.
					//If so, this needs to be clearly explained in comments.  Also, we need to document in the UI somewhere the
					//regular expressions format allowed by the regex engine (even if it's just a link) or a couple canned examples...
					//This code is nothing short of magic :)
					for _, rule := range rle.Rule.MetadataPattern {
						common.Dprintf("Got pattern %v\n", rule.Pattern)
						//FIXME This can perform better by precompiling patterns and putting them in rle. Consider doing this later.
						re, err := regexp.Compile(rule.Pattern)
						if err != nil {
							break
						}
						groupMap := make(map[string]string)
						for _, x := range rule.Group {
							groupMap[x.Pattern] = x.Value
						}
						subexpnames := re.SubexpNames()
						patternMapList := []patternMapEntry{}
						for id, x := range subexpnames {
							if x != "" && groupMap[x] != "" {
								common.Dprintf("Subexp: %v %v\n", x, groupMap[x])
								patternMapList = append(patternMapList, patternMapEntry{id, groupMap[x]})
							}
						}
						match := re.FindStringSubmatch(fullpath)
						if match != nil {
							common.Dprintf("%s matched.\n", fullpath)
							for _, x := range patternMapList {
								common.Dprintf("Map: %v %v %v\n", x.value, match[x.id], fullpath)
								groups = append(groups, [2]string{x.value, match[x.id]})
							}
						}
					}

					//TODO - another block that is a good candidate for refactor into a function
					//Generate the new file name based on the rename patterns
					var newfilename string = fullpath
					for _, rule := range rle.Rule.RenamePatterns {
						re, err := regexp.Compile(rule.Pattern)
						if err != nil {
							break
						}
						match := re.FindStringSubmatchIndex(fullpath)
						if match != nil {
							n := re.ExpandString([]byte{}, rule.Value, newfilename, match)
							if n != nil {
								common.Dprintf("Renaming %s %s\n", newfilename, string(n))
								newfilename = string(n)
							}
						}
					}
					var tmpprefix string
					if rle.Prefix == "" {
						tmpprefix = path
					} else {
						tmpprefix = rle.Prefix
					}
					tmpprefix += string(os.PathSeparator)
					if strings.HasPrefix(newfilename, tmpprefix) {
						newfilename = newfilename[len(tmpprefix):]
					}
					newfilename = strings.Replace(newfilename, string(os.PathSeparator), "/", -1)

					am.addAutoFile(*filestate, newfilename, groups)
				}
			} else {
				common.Dprintf("checkFileChanged(%s, %+v, %+v) returned false skipping...", fullpath, fileinfo, filestate)
			}
		} else {
			common.Dprintf("reverse lookup entry %+v was skipped for processing", rle)
		}
	}
}
//TODO - break this function up into more digestable bits, it's deeply nested and painful to read.
func (self *matcher) updatePaths(allConfigs *map[string]*UserConfig) {
	reversePaths := map[string][]ReverseLookupEntry{}
	paths := map[string]bool{}
	for user, uc := range *allConfigs {
		for _, rule := range uc.Rules {
			for _, path := range rule.Paths {
				paths[path] = true
				if reversePaths[path] == nil {
					reversePaths[path] = make([]ReverseLookupEntry, 0)
				}
				reversePaths[path] = append(reversePaths[path], ReverseLookupEntry{User: user, Rule: rule})
			}
		}
	}
	var newpaths filePathSlice
	for key, _ := range paths {
		newpaths = append(newpaths, key)
	}
	newpaths.Sort()
	var valid []bool = make([]bool, len(newpaths))
	for i := 0; i < len(newpaths); i++ {
		valid[i] = true
	}
	for i := 0; i < len(newpaths); i++ {
		path := newpaths[i]
		common.Dprintf("newpath - %d %s\n", i, path)
		for j := i + 1; j < len(newpaths); j++ {
			if valid[j] && strings.HasPrefix(newpaths[j], path+string(os.PathSeparator)) {
				common.Dprintf("Collapsable - %s\n", newpaths[j])
				rlea := reversePaths[newpaths[j]]
				if rlea != nil {
					for _, rle := range rlea {
						rle.Prefix = newpaths[j]
						reversePaths[path] = append(reversePaths[path], rle)
					}
				}
				valid[j] = false
			}
			if valid[j] {
				common.Dprintf("cmpnewpath - %s %s %t\n", path, newpaths[j], strings.HasPrefix(newpaths[j], path+string(os.PathSeparator)))
			}
		}
	}
	var tmpnewpaths filePathSlice
	tmpReversePaths := map[string][]ReverseLookupEntry{}
	for i := 0; i < len(newpaths); i++ {
		if valid[i] {
			tmpnewpaths = append(tmpnewpaths, newpaths[i])
			tmpReversePaths[newpaths[i]] = reversePaths[newpaths[i]]
		}
	}
	newpaths = tmpnewpaths
	reversePaths = tmpReversePaths
	for path, rlea := range reversePaths {
		if rlea != nil {
			for _, rle := range rlea {
				if rle.Rule != nil {
					common.Dprintf("rle - %s %s %s %s\n", path, rle.User, rle.Rule.Name, rle.Prefix)
				}
			}
		}
	}
	if w != nil {
		w.updatePaths([]string(newpaths))
	}
	common.Dprintf("Unique paths: %v\n", newpaths)
	self.reversePaths = &reversePaths
}