func init() { AccessKeyId := os.Getenv("AccessKeyId") AccessKeySecret := os.Getenv("AccessKeySecret") if len(AccessKeyId) != 0 && len(AccessKeySecret) != 0 { client = oss.NewOSSClient(TestRegion, false, AccessKeyId, AccessKeySecret, false) } else { client = oss.NewOSSClient(TestRegion, false, TestAccessKeyId, TestAccessKeySecret, false) } }
// New constructs a new Driver with the given Aliyun credentials, region, encryption flag, and // bucketName func New(params DriverParameters) (*Driver, error) { client := oss.NewOSSClient(params.Region, params.Internal, params.AccessKeyID, params.AccessKeySecret, params.Secure) client.SetEndpoint(params.Endpoint) bucket := client.Bucket(params.Bucket) client.SetDebug(false) // Validate that the given credentials have at least read permissions in the // given bucket scope. if _, err := bucket.List(strings.TrimRight(params.RootDirectory, "/"), "", "", 1); err != nil { return nil, err } // TODO(tg123): Currently multipart uploads have no timestamps, so this would be unwise // if you initiated a new OSS client while another one is running on the same bucket. d := &driver{ Client: client, Bucket: bucket, ChunkSize: params.ChunkSize, Encrypt: params.Encrypt, RootDirectory: params.RootDirectory, } return &Driver{ baseEmbed: baseEmbed{ Base: base.Base{ StorageDriver: d, }, }, }, nil }
"io" "io/ioutil" "math/rand" "net/http" "strconv" "sync" //"net/http" "testing" "time" "github.com/denverdino/aliyungo/oss" ) var ( //If you test on ECS, you can set the internal param to true client = oss.NewOSSClient(TestRegion, false, TestAccessKeyId, TestAccessKeySecret, false) ) func TestCreateBucket(t *testing.T) { b := client.Bucket(TestBucket) err := b.PutBucket(oss.Private) if err != nil { t.Errorf("Failed for PutBucket: %v", err) } t.Log("Wait a while for bucket creation ...") time.Sleep(10 * time.Second) } func TestHead(t *testing.T) {
func NewOssvfs(bucket string, flags *FlagStorage) *Ossvfs { // Set up the basic struct. fs := &Ossvfs{ bucketName: bucket, flags: flags, umask: 0122, } fs.client = oss.NewOSSClient(flags.Region, flags.Internal, flags.AccessKeyId, flags.AccessKeySecret, true) fs.bucket = fs.client.Bucket(bucket) if flags.DebugOSS { fs.client.SetDebug(flags.DebugOSS) ossLog.Level = logrus.DebugLevel } location, err := fs.bucket.Location() if err != nil { if mapOssError(err) == fuse.ENOENT { log.Errorf("bucket %v does not exist", bucket) return nil } } if oss.Region(location) != fs.flags.Region { log.Errorf("the location of bucket %v is wrong") return nil } now := time.Now() fs.rootAttrs = fuseops.InodeAttributes{ Size: 4096, Nlink: 2, Mode: flags.DirMode | os.ModeDir, Atime: now, Mtime: now, Ctime: now, Crtime: now, Uid: fs.flags.Uid, Gid: fs.flags.Gid, } fs.bufferPool = NewBufferPool(1000*1024*1024, 200*1024*1024) fs.nextInodeID = fuseops.RootInodeID + 1 fs.inodes = make(map[fuseops.InodeID]*Inode) root := NewInode(getStringPointer(""), getStringPointer(""), flags) root.Id = fuseops.RootInodeID root.Attributes = &fs.rootAttrs fs.inodes[fuseops.RootInodeID] = root fs.inodesCache = make(map[string]*Inode) fs.nextHandleID = 1 fs.dirHandles = make(map[fuseops.HandleID]*DirHandle) fs.fileHandles = make(map[fuseops.HandleID]*FileHandle) return fs }
func (s *OssvfsTest) SetUpSuite(t *C) { s.client = oss.NewOSSClient() }