Beispiel #1
0
func (d *Downloader) download(localId int64, remoteId string, checksum string) {
	// TODO: handle all error cases, make sure queue is not blocked
	// with erroneous files
	logger.V("Downloading", remoteId, checksum)
	var (
		resp *http.Response
		err  error
	)
	if resp, err = d.client.Get(baseUrlDownloadHost + "/" + remoteId); err != nil {
		logger.V("error downloading", remoteId, err)
		return
	}

	if resp.StatusCode == 404 {
		d.metaService.SetOp(localId, metadata.OpNone)
		logger.V("error downloading [not found]", remoteId)
		return
	}

	if resp.StatusCode < 200 || resp.StatusCode > 299 {
		logger.V("error downloading [not ok]", remoteId, resp.StatusCode)
		return
	}

	defer resp.Body.Close()
	err = d.blobMngr.Save(localId, checksum, resp.Body)
	if err != nil {
		logger.V(err)
		return
	}

	d.metaService.SetOp(localId, metadata.OpNone)
}
Beispiel #2
0
func (d *CachedSyncer) Sync(isForce bool) (err error) {
	d.mu.Lock()
	defer d.mu.Unlock()

	logger.V("Started syncer...")
	err = d.syncInbound(isForce)
	if err != nil {
		logger.V("error during sync", err)
		return
	}
	logger.V("Done syncing...")
	return
}
Beispiel #3
0
func main() {
	flag.Parse()
	// add a lock to the config dir, no two instances should
	// run at the same time
	cfg := config.NewConfig(*flagDataDir)
	err := cfg.Setup()
	if err != nil {
		logger.F("Error initializing configuration.", err)
	}

	if *flagRunAuthWizard {
		cmd.RunAuthWizard(cfg)
		os.Exit(0)
	}

	err = cfg.Load()
	if err != nil {
		logger.F("Did you mean --wizard? Error reading configuration.", err)
	}

	transport := auth.NewTransport(cfg.FirstAccount())
	metaService, _ = metadata.New(cfg.MetadataPath())
	blobManager = blob.New(cfg.BlobPath())

	syncManager := syncer.NewCachedSyncer(
		transport,
		metaService,
		blobManager)

	if *flagBlockSync {
		syncManager.Sync(true)
	}
	syncManager.Start()

	logger.V("mounting...")
	mountpoint := cfg.FirstAccount().LocalPath
	// TODO: Better error checking here. All sorts of things like stale
	// mounts will surface at this moment.
	err = os.MkdirAll(mountpoint, 0774)
	if err != nil {
		logger.V(err)
	}
	shutdownChan := make(chan io.Closer, 1)
	go gracefulShutDown(shutdownChan, mountpoint)
	if err = mount.MountAndServe(mountpoint, metaService, blobManager); err != nil {
		logger.F(err)
	}
}
Beispiel #4
0
func gracefulShutDown(shutdownc <-chan io.Closer, mountpoint string) {
	c := make(chan os.Signal, 1)
	signal.Notify(c, syscall.SIGTERM, syscall.SIGINT)

	select {
	case <-c:
		logger.V("Gracefully shutting down...")
		mount.Umount(mountpoint)
		// TODO(burcud): Handle Umount errors
		go func() {
			<-time.After(3 * time.Second)
			logger.V("Couldn't umount, do it manually, now shutting down...")
			os.Exit(1)
		}()
	}
}
Beispiel #5
0
func (d *CachedSyncer) mergeChanges(isInitialSync bool, rootId string, startChangeId int64, pageToken string) (nextPageToken string, err error) {
	logger.V("merging changes starting with pageToken:", pageToken, "and startChangeId", startChangeId)

	req := d.remoteService.Changes.List()
	req.IncludeSubscribed(false)
	if pageToken != "" {
		req.PageToken(pageToken)
	} else if startChangeId > 0 { // can't set page token and start change mutually
		req.StartChangeId(startChangeId)
	}
	if isInitialSync {
		req.IncludeDeleted(false)
	}

	var changes *client.ChangeList
	if changes, err = req.Do(); err != nil {
		return
	}

	var largestId int64
	nextPageToken = changes.NextPageToken
	for _, item := range changes.Items {
		if err = d.mergeChange(rootId, item); err != nil {
			return
		}
		largestId = item.Id
	}
	if largestId > 0 {
		// persist largest change id
		d.metaService.SaveLargestChangeId(largestId)
	}
	return
}
Beispiel #6
0
func (f *Manager) cleanup(id int64, checksum string) (err error) {
	var blobs []os.FileInfo
	if blobs, err = ioutil.ReadDir(f.getBlobDir(id)); err != nil {
		return
	}
	for _, file := range blobs {
		if file.Name() != f.getBlobName(id, checksum) && strings.Contains(file.Name(), f.getBlobName(id, "")) {
			logger.V("Deleting blob", file.Name())
			// errors are not show stoppers here, they will cost additional disk space
			// we can get rid of on the next removal try.
			if rmErr := os.Remove(path.Join(f.getBlobDir(id), file.Name())); rmErr != nil {
				logger.V(rmErr)
			}
		}
	}
	return nil
}
Beispiel #7
0
// Persists the largest change id synchnonized.
func (m *MetaService) SaveLargestChangeId(id int64) error {
	m.mu.Lock()
	defer m.mu.Unlock()
	logger.V("Saving largest change Id", id)
	e := &KeyValueEntry{Key: keyLargestChangeId, Value: fmt.Sprintf("%d", id)}
	val, err := m.getKey(keyLargestChangeId)
	if err != nil {
		return err
	}
	if val == "" {
		return m.dbmap.Insert(e)
	}
	_, err = m.dbmap.Update(e)
	return err
}
Beispiel #8
0
func (m *MetaService) RemoteRm(remoteId string) (err error) {
	m.mu.Lock()
	defer m.mu.Unlock()
	// TODO: Handle directories recursively
	logger.V("Deleting metadata for", remoteId)
	var file *CachedDriveFile
	if file, err = m.getByRemoteId(remoteId); err != nil {
		return err
	}
	if file == nil {
		return
	}
	file.Op = OpDelete
	_, err = m.dbmap.Update(file)
	return err
}
Beispiel #9
0
// Permanently saves a file/folder's metadata.
func (m *MetaService) RemoteMod(remoteId string, newParentRemoteId string, data *CachedDriveFile) (err error) {
	m.mu.Lock()
	defer m.mu.Unlock()

	logger.V("Caching metadata for", remoteId)
	var parentFile *CachedDriveFile
	if newParentRemoteId != "" {
		if parentFile, err = m.getByRemoteId(newParentRemoteId); err != nil {
			return err
		}
	}

	var file *CachedDriveFile
	if file, err = m.getByRemoteId(remoteId); err != nil {
		return err
	}
	if file == nil {
		file = &CachedDriveFile{Id: remoteId}
	}
	if data.Md5Checksum != file.Md5Checksum && !data.IsDir {
		file.Op = OpDownload
	}
	file.Id = remoteId

	file.Name = data.Name
	file.LastMod = data.LastMod
	file.Md5Checksum = data.Md5Checksum
	file.LastEtag = data.LastEtag
	file.FileSize = data.FileSize
	file.IsDir = data.IsDir
	file.LocalParentId = 0
	if parentFile != nil {
		file.LocalParentId = parentFile.LocalId
	}
	if file.LocalId > 0 {
		_, err = m.dbmap.Update(file)
		return
	}
	return m.dbmap.Insert(file)
}