Ejemplo n.º 1
0
func ProcessLocalEvent_Upper(globals *AsinkGlobals, event *asink.Event) error {
	var err error

	StatStartLocalUpdate()
	defer StatStopLocalUpdate()

	//make the path relative before we save/send it anywhere
	absolutePath := event.Path
	event.Path, err = filepath.Rel(globals.syncDir, event.Path)
	if err != nil {
		return ProcessingError{TEMPORARY, err}
	}

	latestLocal := LockPath(event.Path, true)

	defer func() {
		if err != nil {
			event.LocalStatus |= asink.DISCARDED
		}
		event.LocalStatus |= asink.NOSAVE //make sure event doesn't get saved back until lower half
		UnlockPath(event)
	}()

	err = processLocalEvent_Upper(globals, event, latestLocal, absolutePath)
	return err
}
Ejemplo n.º 2
0
func (adb *AsinkDB) DatabaseAddEvent(e *asink.Event) (err error) {
	adb.lock.Lock()
	tx, err := adb.db.Begin()
	if err != nil {
		return err
	}
	//make sure the transaction gets rolled back on error, and the database gets unlocked
	defer func() {
		if err != nil {
			tx.Rollback()
		}
		adb.lock.Unlock()
	}()

	result, err := tx.Exec("INSERT INTO events (id, type, localstatus, path, hash, predecessor, timestamp, permissions) VALUES (?,?,?,?,?,?,?,?);", e.Id, e.Type, e.LocalStatus, e.Path, e.Hash, e.Predecessor, e.Timestamp, e.Permissions)
	if err != nil {
		return err
	}
	id, err := result.LastInsertId()
	if err != nil {
		return err
	}
	err = tx.Commit()
	if err != nil {
		return err
	}

	e.LocalId = id
	e.InDB = true
	return nil
}
Ejemplo n.º 3
0
func ProcessLocalEvent(globals *AsinkGlobals, event *asink.Event) error {
	var err error

	StatStartLocalUpdate()
	defer StatStopLocalUpdate()

	//make the path relative before we save/send it anywhere
	absolutePath := event.Path
	event.Path, err = filepath.Rel(globals.syncDir, event.Path)
	if err != nil {
		return ProcessingError{TEMPORARY, err}
	}

	latestLocal := LockPath(event.Path, true)
	defer func() {
		if err != nil {
			event.LocalStatus |= asink.DISCARDED
		}
		UnlockPath(event)
	}()

	err = processLocalEvent_Upper(globals, event, latestLocal, absolutePath)
	if err != nil {
		return err
	}
	//don't process the second half if the first half discarded it
	if event.LocalStatus&asink.DISCARDED != 0 {
		return nil
	}
	err = processLocalEvent_Lower(globals, event, latestLocal)
	return err
}
Ejemplo n.º 4
0
func ProcessLocalEvent_Lower(globals *AsinkGlobals, event *asink.Event) error {
	var err error

	StatStartLocalUpdate()
	defer StatStopLocalUpdate()

	latestLocal := LockPath(event.Path, true)
	defer func() {
		if err != nil {
			event.LocalStatus |= asink.DISCARDED
		}
		event.LocalStatus &= ^asink.NOSAVE //clear NOSAVE set in upper half
		UnlockPath(event)
	}()

	err = processLocalEvent_Lower(globals, event, latestLocal)
	return err
}
Ejemplo n.º 5
0
//handle a conflict by copying the loser event to another file
func handleConflict(globals *AsinkGlobals, loser *asink.Event, copyFrom string) error {
	if loser.IsUpdate() {
		//come up with new file name
		conflictedPath := path.Join(globals.syncDir, loser.Path) + "_conflicted_copy_" + time.Now().Format("2006-01-02_15:04:05.000000")

		//copy file to new filename
		src, err := os.Open(copyFrom)
		if err != nil {
			return err
		}
		defer src.Close()
		sink, err := os.Create(conflictedPath)
		if err != nil {
			return err
		}
		defer sink.Close()

		_, err = io.Copy(sink, src)
		return err
	}
	return nil
}
Ejemplo n.º 6
0
func (sc *StartupContext) Run() error {
	//process top halves of local updates so the files are saved off at least locally
	localEvents := []*asink.Event{}
	initialWalkIncomplete := true
	for initialWalkIncomplete {
		select {
		case event := <-sc.localUpdatesChan:
			//process top half of local event
			err := ProcessLocalEvent_Upper(sc.globals, event)
			if err != nil {
				if e, ok := err.(ProcessingError); !ok || e.ErrorType != TEMPORARY {
					return err
				} else {
					//if error was temporary, retry once
					event.LocalStatus = 0
					err := ProcessLocalEvent_Upper(sc.globals, event)
					if err != nil {
						return err
					}
				}
			}
			if event.LocalStatus&asink.DISCARDED == 0 {
				localEvents = append(localEvents, event)
			}
		case <-sc.initialWalkComplete:
			initialWalkIncomplete = false
		case <-sc.exitChan:
			return ProcessingError{EXITED, nil}
		}
	}

	//find any files that have been deleted since the last time we ran
	deletedFiles := []*asink.Event{}
	resultChan := make(chan *asink.Event)
	errorChan := make(chan error)
	go sc.globals.db.DatabaseGetAllFiles(resultChan, errorChan)
	deletionWalkIncomplete := true
	for deletionWalkIncomplete {
		select {
		case oldEvent := <-resultChan:
			if oldEvent == nil {
				deletionWalkIncomplete = false
				break
			}

			//if the file still exists, disregard this event
			absolutePath := path.Join(sc.globals.syncDir, oldEvent.Path)
			if _, err := os.Stat(absolutePath); err == nil {
				break
			}

			event := new(asink.Event)
			event.Path = absolutePath
			event.Type = asink.DELETE
			event.Timestamp = time.Now().UnixNano()
			deletedFiles = append(deletedFiles, event)
		case err := <-errorChan:
			return ProcessingError{PERMANENT, err}
		}
	}

	for _, event := range deletedFiles {
		//make sure we don't need to exit
		select {
		case <-sc.exitChan:
			return ProcessingError{EXITED, nil}
		default:
		}
		//process top half of local event
		err := ProcessLocalEvent_Upper(sc.globals, event)
		if err != nil {
			if e, ok := err.(ProcessingError); !ok || e.ErrorType != TEMPORARY {
				return err
			} else {
				//if error was temporary, retry once
				event.LocalStatus = 0
				err := ProcessLocalEvent_Upper(sc.globals, event)
				if err != nil {
					return err
				}
			}
		}
		if event.LocalStatus&asink.DISCARDED == 0 {
			localEvents = append(localEvents, event)
		}
	}

	//then process remote events (possibly taking a break whenever a local one comes in to process it)
	timeout := time.NewTimer(1 * time.Second)
	timedOut := false
	for !timedOut {
		select {
		case event := <-sc.localUpdatesChan:
			//process top half of local event
			err := ProcessLocalEvent_Upper(sc.globals, event)
			if err != nil {
				if e, ok := err.(ProcessingError); !ok || e.ErrorType != TEMPORARY {
					return err
				} else {
					//if error was temporary, retry once
					event.LocalStatus = 0
					err := ProcessLocalEvent_Upper(sc.globals, event)
					if err != nil {
						return err
					}
				}
			}
			if event.LocalStatus&asink.DISCARDED == 0 {
				localEvents = append(localEvents, event)
			}
			timeout.Reset(1 * time.Second)
		case event := <-sc.remoteUpdatesChan:
			err := ProcessRemoteEvent(sc.globals, event)
			if err != nil {
				if e, ok := err.(ProcessingError); !ok || e.ErrorType != TEMPORARY {
					return err
				} else {
					//if error was temporary, retry once
					event.LocalStatus = 0
					err := ProcessRemoteEvent(sc.globals, event)
					if err != nil {
						return err
					}
				}
			}
			timeout.Reset(1 * time.Second)
		case <-timeout.C:
			timedOut = true
		case <-sc.exitChan:
			return ProcessingError{EXITED, nil}
		}
	}

	for _, event := range localEvents {
		//make sure we don't need to exit
		select {
		case <-sc.exitChan:
			return ProcessingError{EXITED, nil}
		default:
		}
		err := ProcessLocalEvent_Lower(sc.globals, event)
		if err != nil {
			if e, ok := err.(ProcessingError); !ok || e.ErrorType != TEMPORARY {
				return err
			} else {
				//if error was temporary, retry once
				event.LocalStatus = 0
				err := ProcessLocalEvent_Lower(sc.globals, event)
				if err != nil {
					return err
				}
			}
		}
	}

	//finally, process the bottom halves of local updates
	return nil
}
Ejemplo n.º 7
0
func ProcessRemoteEvent(globals *AsinkGlobals, event *asink.Event) error {
	var err error

	StatStartRemoteUpdate()
	defer StatStopRemoteUpdate()
	latestLocal := LockPath(event.Path, false)
	defer func() {
		if err != nil {
			event.LocalStatus |= asink.DISCARDED
		}
		UnlockPath(event)
	}()

	//get the absolute path because we may need it later
	absolutePath := path.Join(globals.syncDir, event.Path)

	//if we already have this event, or if it is older than our most recent event, bail out
	if latestLocal != nil {
		if event.Timestamp < latestLocal.Timestamp {
			event.LocalStatus |= asink.DISCARDED
			return nil
		}
		if event.IsSameEvent(latestLocal) {
			return nil
		}

		if latestLocal.Hash != event.Predecessor && latestLocal.Hash != event.Hash {
			err = handleConflict(globals, latestLocal, path.Join(globals.cacheDir, latestLocal.Hash))
			if err != nil {
				return ProcessingError{PERMANENT, err}
			}
		}
	}

	//Download event
	if event.IsUpdate() {
		if latestLocal == nil || event.Hash != latestLocal.Hash {

			outfile, err := ioutil.TempFile(globals.tmpDir, "asink")
			if err != nil {
				return ProcessingError{CONFIG, err}
			}
			tmpfilename := outfile.Name()
			StatStartDownload()
			downloadReadCloser, err := globals.storage.Get(event.Hash)
			if err != nil {
				StatStopDownload()
				return ProcessingError{STORAGE, err}
			}
			defer downloadReadCloser.Close()
			if globals.encrypted {
				decrypter, err := NewDecrypter(downloadReadCloser, globals.key)
				if err != nil {
					StatStopDownload()
					return ProcessingError{STORAGE, err}
				}
				_, err = io.Copy(outfile, decrypter)
			} else {
				_, err = io.Copy(outfile, downloadReadCloser)
			}

			outfile.Close()
			StatStopDownload()
			if err != nil {
				return ProcessingError{STORAGE, err}
			}

			//rename to local hashed filename
			hashedFilename := path.Join(globals.cacheDir, event.Hash)
			err = os.Rename(tmpfilename, hashedFilename)
			if err != nil {
				err = os.Remove(tmpfilename)
				if err != nil {
					return ProcessingError{PERMANENT, err}
				}
				return ProcessingError{PERMANENT, err}
			}

			//copy hashed file to another tmp, then rename it to the actual file.
			tmpfilename, err = util.CopyToTmp(hashedFilename, globals.tmpDir)
			if err != nil {
				return ProcessingError{PERMANENT, err}
			}

			//make sure containing directory exists
			err = util.EnsureDirExists(path.Dir(absolutePath))
			if err != nil {
				return ProcessingError{PERMANENT, err}
			}

			err = os.Rename(tmpfilename, absolutePath)
			if err != nil {
				err2 := os.Remove(tmpfilename)
				if err2 != nil {
					return ProcessingError{PERMANENT, err2}
				}
				return ProcessingError{PERMANENT, err}
			}
		}
		if latestLocal == nil || event.Hash != latestLocal.Hash || event.Permissions != latestLocal.Permissions {
			err = os.Chmod(absolutePath, event.Permissions)
			if err != nil && !util.ErrorFileNotFound(err) {
				return ProcessingError{PERMANENT, err}
			}
		}
	} else {
		//intentionally ignore errors in case this file has been deleted out from under us
		os.Remove(absolutePath)
		//delete the directory previously containing this file if its the last file
		util.RecursiveRemoveEmptyDirs(path.Dir(absolutePath))
	}

	return nil
}
Ejemplo n.º 8
0
func processLocalEvent_Lower(globals *AsinkGlobals, event *asink.Event, latestLocal *asink.Event) error {
	var err error

	//if we already have this event, or if it is older than our most recent event, bail out
	if latestLocal != nil {
		if event.Timestamp < latestLocal.Timestamp {
			event.LocalStatus |= asink.DISCARDED
			return nil
		}

		//if the remote side snuck in an event that has the same hash
		//as we do, disregard our event
		if event.Hash == latestLocal.Hash {
			event.LocalStatus |= asink.DISCARDED
			return nil
		}

		//if our predecessor has changed, it means we have received a
		//remote event for this file since the top half of processing
		//this local event. If this is true, we have a conflict we
		//can't resolve without user intervention.
		if latestLocal.Hash != event.Predecessor {
			err = handleConflict(globals, event, path.Join(globals.cacheDir, event.Hash))
			event.LocalStatus |= asink.DISCARDED
			if err != nil {
				return ProcessingError{PERMANENT, err}
			}
			return nil
		}
	}

	if event.IsUpdate() {
		//upload file to remote storage
		StatStartUpload()
		done := make(chan error, 1)
		uploadWriteCloser, err := globals.storage.Put(event.Hash, done)
		if err != nil {
			return ProcessingError{STORAGE, err}
		}

		cachedFilename := path.Join(globals.cacheDir, event.Hash)
		uploadFile, err := os.Open(cachedFilename)
		if err != nil {
			uploadWriteCloser.Close()
			return ProcessingError{STORAGE, err}
		}

		if globals.encrypted {
			encrypter, err := NewEncrypter(uploadWriteCloser, globals.key)
			if err != nil {
				uploadWriteCloser.Close()
				uploadFile.Close()
				return ProcessingError{STORAGE, err}
			}
			_, err = io.Copy(encrypter, uploadFile)
			encrypter.Close()
		} else {
			_, err = io.Copy(uploadWriteCloser, uploadFile)
		}
		uploadFile.Close()
		uploadWriteCloser.Close()

		//ensure the upload is observable by other clients before proceeding
		err = <-done
		if err != nil {
			return ProcessingError{STORAGE, err}
		}

		StatStopUpload()
		if err != nil {
			return ProcessingError{STORAGE, err}
		}
	}

	//finally, send it off to the server
	StatStartSending()
	err = SendEvent(globals, event)
	StatStopSending()
	if err != nil {
		return ProcessingError{NETWORK, err}
	}
	return nil
}
Ejemplo n.º 9
0
func processLocalEvent_Upper(globals *AsinkGlobals, event *asink.Event, latestLocal *asink.Event, absolutePath string) error {
	if latestLocal != nil {
		event.Predecessor = latestLocal.Hash

		if event.Timestamp < latestLocal.Timestamp {
			fmt.Printf("trying to send event older than latestLocal:\n")
			fmt.Printf("OLD %+v\n", latestLocal)
			fmt.Printf("NEW %+v\n", event)
		}
	}

	if event.IsUpdate() {
		//copy to tmp
		//TODO upload in chunks and check modification times to make sure it hasn't been changed instead of copying the whole thing off
		tmpfilename, err := util.CopyToTmp(absolutePath, globals.tmpDir)
		if err != nil {
			//bail out if the file we are trying to upload already got deleted
			if util.ErrorFileNotFound(err) {
				event.LocalStatus |= asink.DISCARDED
				return nil
			}
			return err
		}

		//try to collect the file's permissions
		fileinfo, err := os.Stat(absolutePath)
		if err != nil {
			//bail out if the file we are trying to upload already got deleted
			if util.ErrorFileNotFound(err) {
				event.LocalStatus |= asink.DISCARDED
				return nil
			}
			return ProcessingError{PERMANENT, err}
		} else {
			event.Permissions = fileinfo.Mode()
		}

		//get the file's hash
		hash, err := HashFile(tmpfilename)
		if err != nil {
			return ProcessingError{TEMPORARY, err}
		}
		event.Hash = hash

		//If the hash is the same, don't try to upload the event again
		if latestLocal != nil && event.Hash == latestLocal.Hash {
			os.Remove(tmpfilename)
			//If neither the file contents nor permissions changed, squash this event completely
			if event.Permissions == latestLocal.Permissions {
				event.LocalStatus |= asink.DISCARDED
				return nil
			}
		} else {
			//rename to local cache w/ filename=hash
			cachedFilename := path.Join(globals.cacheDir, event.Hash)
			err = os.Rename(tmpfilename, cachedFilename)
			if err != nil {
				err = os.Remove(tmpfilename)
				if err != nil {
					return ProcessingError{PERMANENT, err}
				}
				return ProcessingError{PERMANENT, err}
			}
		}
	} else {
		//if we're trying to delete a file that we thought was already deleted, there's no need to delete it again
		if latestLocal != nil && latestLocal.IsDelete() {
			event.LocalStatus |= asink.DISCARDED
			return nil
		}
	}
	return nil
}
Ejemplo n.º 10
0
func StartWatching(watchDir string, fileUpdates chan *asink.Event, initialWalkComplete chan int) {
	watcher, err := fsnotify.NewWatcher()
	if err != nil {
		panic("Failed to create fsnotify watcher")
	}

	//function called by filepath.Walk to start watching a directory and all subdirectories
	watchDirFn := func(path string, info os.FileInfo, err error) error {
		if info.IsDir() {
			err = watcher.Watch(path)
			if err != nil {
				if e, ok := err.(syscall.Errno); ok && e == 28 {
					//If we reach here, it means we've received ENOSPC from the Linux kernel:
					//ENOSPC The user limit on the total number of inotify watches was reached or the kernel failed to allocate a needed resource.
					panic("Exhausted the allowed number of inotify watches, please increase /proc/sys/fs/inotify/max_user_watches")
				}
				panic("Failed to watch " + path)
			}
		} else if info.Mode().IsRegular() {
			event := new(asink.Event)
			event.Path = path
			event.Type = asink.UPDATE
			event.Timestamp = time.Now().UnixNano()
			fileUpdates <- event
		}
		return nil
	}

	//processes all the fsnotify events into asink events
	go func() {
		for {
			select {
			case ev := <-watcher.Event:
				//if a directory was created, begin recursively watching all its subdirectories
				if fi, err := os.Stat(ev.Name); err == nil && fi.IsDir() {
					if ev.IsCreate() {
						//Note: even though filepath.Walk will visit root, we must watch root first so we catch files/directories created after the walk begins but before this directory begins being watched
						err = watcher.Watch(ev.Name)
						if err != nil {
							if e, ok := err.(syscall.Errno); ok && e == 28 {
								//If we reach here, it means we've received ENOSPC from the Linux kernel:
								//ENOSPC The user limit on the total number of inotify watches was reached or the kernel failed to allocate a needed resource.
								panic("Exhausted the allowed number of inotify watches, please increase /proc/sys/fs/inotify/max_user_watches")
							}
							panic("Failed to watch " + ev.Name)
						}
						//scan this directory to ensure any file events we missed before starting to watch this directory are caught
						filepath.Walk(ev.Name, watchDirFn)
					}
					continue
				}

				event := new(asink.Event)
				if ev.IsCreate() || ev.IsModify() {
					event.Type = asink.UPDATE
				} else if ev.IsDelete() || ev.IsRename() {
					event.Type = asink.DELETE
				} else {
					panic("Unknown fsnotify event type")
				}

				event.Path = ev.Name
				event.Timestamp = time.Now().UnixNano()

				fileUpdates <- event

			case err := <-watcher.Error:
				panic(err)
			}
		}
	}()

	//start watching the directory passed in
	filepath.Walk(watchDir, watchDirFn)
	initialWalkComplete <- 0
}