Exemple #1
0
func main() {

	stdout.Printf("Using '%s' as directory storage.\n", dir)
	if err := os.MkdirAll(dir, os.FileMode(0775)); err != nil {
		stderr.Fatalf("Unable to ensure directory exists: %s", err)
	}

	var store tusd.DataStore
	store = filestore.FileStore{
		Path: dir,
	}

	if storeSize > 0 {
		store = limitedstore.New(storeSize, store)
		stdout.Printf("Using %.2fMB as storage size.\n", float64(storeSize)/1024/1024)

		// We need to ensure that a single upload can fit into the storage size
		if maxSize > storeSize || maxSize == 0 {
			maxSize = storeSize
		}
	}

	stdout.Printf("Using %.2fMB as maximum size.\n", float64(maxSize)/1024/1024)

	handler, err := tusd.NewHandler(tusd.Config{
		MaxSize:               maxSize,
		BasePath:              "files/",
		DataStore:             store,
		NotifyCompleteUploads: true,
	})
	if err != nil {
		stderr.Fatalf("Unable to create handler: %s", err)
	}

	address := httpHost + ":" + httpPort
	stdout.Printf("Using %s as address to listen.\n", address)

	go func() {
		for {
			select {
			case info := <-handler.CompleteUploads:
				stdout.Printf("Upload %s (%d bytes) finished\n", info.ID, info.Size)
			}
		}
	}()

	http.Handle(basepath, http.StripPrefix(basepath, handler))

	timeoutDuration := time.Duration(timeout) * time.Millisecond
	listener, err := NewListener(address, timeoutDuration, timeoutDuration)
	if err != nil {
		stderr.Fatalf("Unable to create listener: %s", err)
	}

	if err = http.Serve(listener, nil); err != nil {
		stderr.Fatalf("Unable to serve: %s", err)
	}
}
Exemple #2
0
func ExampleNewStoreComposer() {
	composer := tusd.NewStoreComposer()

	fs := filestore.New("./data")
	fs.UseIn(composer)

	cl := consullocker.New(nil)
	cl.UseIn(composer)

	ls := limitedstore.New(1024*1024*1024, composer.Core, composer.Terminater)
	ls.UseIn(composer)

	config := tusd.Config{
		StoreComposer: composer,
	}

	_, _ = tusd.NewHandler(config)
}
Exemple #3
0
func CreateComposer() {
	// Attempt to use S3 as a backend if the -s3-bucket option has been supplied.
	// If not, we default to storing them locally on disk.
	Composer = tusd.NewStoreComposer()
	if Flags.S3Bucket == "" {
		dir := Flags.UploadDir

		stdout.Printf("Using '%s' as directory storage.\n", dir)
		if err := os.MkdirAll(dir, os.FileMode(0774)); err != nil {
			stderr.Fatalf("Unable to ensure directory exists: %s", err)
		}

		store := filestore.New(dir)
		store.UseIn(Composer)
	} else {
		stdout.Printf("Using 's3://%s' as S3 bucket for storage.\n", Flags.S3Bucket)

		// Derive credentials from AWS_SECRET_ACCESS_KEY, AWS_ACCESS_KEY_ID and
		// AWS_REGION environment variables.
		credentials := aws.NewConfig().WithCredentials(credentials.NewEnvCredentials())
		store := s3store.New(Flags.S3Bucket, s3.New(session.New(), credentials))
		store.UseIn(Composer)

		locker := memorylocker.New()
		locker.UseIn(Composer)
	}

	storeSize := Flags.StoreSize
	maxSize := Flags.MaxSize

	if storeSize > 0 {
		limitedstore.New(storeSize, Composer.Core, Composer.Terminater).UseIn(Composer)
		stdout.Printf("Using %.2fMB as storage size.\n", float64(storeSize)/1024/1024)

		// We need to ensure that a single upload can fit into the storage size
		if maxSize > storeSize || maxSize == 0 {
			Flags.MaxSize = storeSize
		}
	}

	stdout.Printf("Using %.2fMB as maximum size.\n", float64(Flags.MaxSize)/1024/1024)
}