// getBucketMetadataReaders - readers are returned in map rather than slice func (donut API) getBucketMetadataReaders() (map[int]io.ReadCloser, error) { readers := make(map[int]io.ReadCloser) disks := make(map[int]disk.Disk) var err error for _, node := range donut.nodes { nDisks, err := node.ListDisks() if err != nil { return nil, iodine.New(err, nil) } for k, v := range nDisks { disks[k] = v } } var bucketMetaDataReader io.ReadCloser for order, disk := range disks { bucketMetaDataReader, err = disk.OpenFile(filepath.Join(donut.config.DonutName, bucketMetadataConfig)) if err != nil { continue } readers[order] = bucketMetaDataReader } if err != nil { return nil, iodine.New(err, nil) } return readers, nil }
// writeEncodedData - func (b bucket) writeEncodedData(k, m uint8, writers []io.WriteCloser, objectData io.Reader, sumMD5, sum512 hash.Hash) (int, int, error) { chunks := split.Stream(objectData, 10*1024*1024) encoder, err := newEncoder(k, m, "Cauchy") if err != nil { return 0, 0, iodine.New(err, nil) } chunkCount := 0 totalLength := 0 for chunk := range chunks { if chunk.Err == nil { totalLength = totalLength + len(chunk.Data) encodedBlocks, _ := encoder.Encode(chunk.Data) sumMD5.Write(chunk.Data) sum512.Write(chunk.Data) for blockIndex, block := range encodedBlocks { _, err := io.Copy(writers[blockIndex], bytes.NewBuffer(block)) if err != nil { return 0, 0, iodine.New(err, nil) } } } chunkCount = chunkCount + 1 } return chunkCount, totalLength, nil }
// GetObjectMetadata - get object metadata from cache func (donut API) GetObjectMetadata(bucket, key string) (ObjectMetadata, error) { donut.lock.Lock() defer donut.lock.Unlock() // check if bucket exists if !IsValidBucket(bucket) { return ObjectMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil) } if !IsValidObjectName(key) { return ObjectMetadata{}, iodine.New(ObjectNameInvalid{Object: key}, nil) } if !donut.storedBuckets.Exists(bucket) { return ObjectMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil) } storedBucket := donut.storedBuckets.Get(bucket).(storedBucket) objectKey := bucket + "/" + key if objMetadata, ok := storedBucket.objectMetadata[objectKey]; ok == true { return objMetadata, nil } if len(donut.config.NodeDiskMap) > 0 { objMetadata, err := donut.getObjectMetadata(bucket, key) if err != nil { return ObjectMetadata{}, iodine.New(err, nil) } // update storedBucket.objectMetadata[objectKey] = objMetadata donut.storedBuckets.Set(bucket, storedBucket) return objMetadata, nil } return ObjectMetadata{}, iodine.New(ObjectNotFound{Object: key}, nil) }
// listDonutBuckets - func (donut API) listDonutBuckets() error { for _, node := range donut.nodes { disks, err := node.ListDisks() if err != nil { return iodine.New(err, nil) } for _, disk := range disks { dirs, err := disk.ListDir(donut.config.DonutName) if err != nil { return iodine.New(err, nil) } for _, dir := range dirs { splitDir := strings.Split(dir.Name(), "$") if len(splitDir) < 3 { return iodine.New(CorruptedBackend{Backend: dir.Name()}, nil) } bucketName := splitDir[0] // we dont need this once we cache from makeDonutBucket() bucket, _, err := newBucket(bucketName, "private", donut.config.DonutName, donut.nodes) if err != nil { return iodine.New(err, nil) } donut.buckets[bucketName] = bucket } } } return nil }
// trapSignal wait on listed signals for pre-defined behaviors func (a *app) trapSignal(wg *sync.WaitGroup) { ch := make(chan os.Signal, 10) signal.Notify(ch, syscall.SIGTERM, syscall.SIGUSR2, syscall.SIGHUP) for { sig := <-ch switch sig { case syscall.SIGTERM: // this ensures a subsequent TERM will trigger standard go behaviour of terminating signal.Stop(ch) // roll through all initialized http servers and stop them for _, s := range a.sds { go func(s httpdown.Server) { defer wg.Done() if err := s.Stop(); err != nil { a.errors <- iodine.New(err, nil) } }(s) } return case syscall.SIGUSR2: fallthrough case syscall.SIGHUP: // we only return here if there's an error, otherwise the new process // will send us a TERM when it's ready to trigger the actual shutdown. if _, err := a.net.StartProcess(); err != nil { a.errors <- iodine.New(err, nil) } } } }
// writeObjectData - func (b bucket) writeObjectData(k, m uint8, writers []io.WriteCloser, objectData io.Reader, sumMD5, sum256, sum512 hash.Hash) (int, int, error) { encoder, err := newEncoder(k, m, "Cauchy") if err != nil { return 0, 0, iodine.New(err, nil) } chunkCount := 0 totalLength := 0 for chunk := range split.Stream(objectData, 10*1024*1024) { if chunk.Err != nil { return 0, 0, iodine.New(err, nil) } totalLength = totalLength + len(chunk.Data) encodedBlocks, err := encoder.Encode(chunk.Data) if err != nil { return 0, 0, iodine.New(err, nil) } sumMD5.Write(chunk.Data) sum256.Write(chunk.Data) sum512.Write(chunk.Data) for blockIndex, block := range encodedBlocks { errCh := make(chan error, 1) go func(writer io.Writer, reader io.Reader) { defer close(errCh) _, err := io.Copy(writers[blockIndex], bytes.NewReader(block)) errCh <- err }(writers[blockIndex], bytes.NewReader(block)) if err := <-errCh; err != nil { return 0, 0, iodine.New(err, nil) } } chunkCount = chunkCount + 1 } return chunkCount, totalLength, nil }
// ListenUnix announces on the local network address laddr. The network net // must be a: "unix" or "unixpacket". It returns an inherited net.Listener for // the matching network and address, or creates a new one using net.ListenUnix. func (n *nimbleNet) ListenUnix(nett string, laddr *net.UnixAddr) (*net.UnixListener, error) { if err := n.getInheritedListeners(); err != nil { return nil, iodine.New(err, nil) } n.mutex.Lock() defer n.mutex.Unlock() // look for an inherited listener for i, l := range n.inheritedListeners { if l == nil { // we nil used inherited listeners continue } equal := nimbleAddr{l.Addr()}.IsEqual(laddr) if equal { n.inheritedListeners[i] = nil n.activeListeners = append(n.activeListeners, l) return l.(*net.UnixListener), nil } } // make a fresh listener l, err := net.ListenUnix(nett, laddr) if err != nil { return nil, iodine.New(err, nil) } n.activeListeners = append(n.activeListeners, l) return l, nil }
// SINGLE SOURCE - Type B: copy(f, d) -> copy(f, d/f) -> A // prepareCopyURLsTypeB - prepares target and source URLs for copying. func prepareCopyURLsTypeB(sourceURL string, targetURL string) copyURLs { _, sourceContent, err := url2Stat(sourceURL) if err != nil { // Source does not exist or insufficient privileges. return copyURLs{Error: NewIodine(iodine.New(err, nil))} } if !sourceContent.Type.IsRegular() { // Source is not a regular file. return copyURLs{Error: NewIodine(iodine.New(errInvalidSource{URL: sourceURL}, nil))} } // All OK.. We can proceed. Type B: source is a file, target is a folder and exists. sourceURLParse, err := client.Parse(sourceURL) if err != nil { return copyURLs{Error: NewIodine(iodine.New(errInvalidSource{URL: sourceURL}, nil))} } targetURLParse, err := client.Parse(targetURL) if err != nil { return copyURLs{Error: NewIodine(iodine.New(errInvalidTarget{URL: targetURL}, nil))} } targetURLParse.Path = filepath.Join(targetURLParse.Path, filepath.Base(sourceURLParse.Path)) return prepareCopyURLsTypeA(sourceURL, targetURLParse.String()) }
// GetAuthKeys get access key id and secret access key func GetAuthKeys(url string) ([]byte, error) { op := RPCOps{ Method: "Auth.Get", Request: rpc.Args{Request: ""}, } req, err := NewRequest(url, op, http.DefaultTransport) if err != nil { return nil, iodine.New(err, nil) } resp, err := req.Do() defer closeResp(resp) if err != nil { return nil, iodine.New(err, nil) } var reply rpc.AuthReply if err := jsonrpc.DecodeClientResponse(resp.Body, &reply); err != nil { return nil, iodine.New(err, nil) } authConfig := &auth.Config{} authConfig.Version = "0.0.1" authConfig.Users = make(map[string]*auth.User) user := &auth.User{} user.Name = "testuser" user.AccessKeyID = reply.AccessKeyID user.SecretAccessKey = reply.SecretAccessKey authConfig.Users[reply.AccessKeyID] = user if err := auth.SaveConfig(authConfig); err != nil { return nil, iodine.New(err, nil) } return json.MarshalIndent(reply, "", "\t") }
// checkCopySyntaxTypeB verifies if the source is a valid file and target is a valid dir. func checkCopySyntaxTypeB(srcURLs []string, tgtURL string) { if len(srcURLs) != 1 { console.Fatalf("Invalid number of source arguments to copy command. %s\n", NewIodine(iodine.New(errInvalidArgument{}, nil))) } srcURL := srcURLs[0] _, srcContent, err := url2Stat(srcURL) // Source exist?. if err != nil { console.Fatalf("Unable to stat source ‘%s’. %s\n", srcURL, NewIodine(iodine.New(err, nil))) } if srcContent.Type.IsDir() { console.Fatalf("Source ‘%s’ is a folder. Use ‘%s...’ argument to copy this folder and its contents recursively. %s\n", srcURL, srcURL, NewIodine(iodine.New(errInvalidArgument{}, nil))) } if !srcContent.Type.IsRegular() { console.Fatalf("Source ‘%s’ is not a file. %s\n", srcURL, NewIodine(iodine.New(errInvalidArgument{}, nil))) } _, tgtContent, err := url2Stat(tgtURL) // Target exist?. if err == nil { if !tgtContent.Type.IsDir() { console.Fatalf("Target ‘%s’ is not a folder. %s\n", tgtURL, NewIodine(iodine.New(errInvalidArgument{}, nil))) } } }
// checkCopySyntaxTypeD verifies if the source is a valid list of file or valid recursive dir and target is a valid dir. func checkCopySyntaxTypeD(srcURLs []string, tgtURL string) { for _, srcURL := range srcURLs { if isURLRecursive(srcURL) { srcURL = stripRecursiveURL(srcURL) _, srcContent, err := url2Stat(srcURL) // Source exist?. if err != nil { console.Fatalf("Unable to stat source ‘%s’. %s\n", srcURL, NewIodine(iodine.New(err, nil))) } if !srcContent.Type.IsDir() { // Ellipses is supported only for folders. console.Fatalf("Source ‘%s’ is not a folder. %s\n", stripRecursiveURL(srcURL), NewIodine(iodine.New(errInvalidArgument{}, nil))) } } else { // Regular URL. _, srcContent, err := url2Stat(srcURL) // Source exist?. if err != nil { console.Fatalf("Unable to stat source ‘%s’. %s\n", srcURL, NewIodine(iodine.New(err, nil))) } if srcContent.Type.IsDir() { console.Fatalf("Source ‘%s’ is a folder. Use ‘%s...’ argument to copy this folder and its contents recursively. %s\n", srcURL, srcURL, NewIodine(iodine.New(errInvalidArgument{}, nil))) } if !srcContent.Type.IsRegular() { console.Fatalf("Source ‘%s’ is not a file. %s\n", srcURL, NewIodine(iodine.New(errInvalidArgument{}, nil))) } } } _, tgtContent, err := url2Stat(tgtURL) // Target exist?. if err != nil { console.Fatalf("Unable to stat target ‘%s’. %s\n", tgtURL, NewIodine(iodine.New(err, nil))) } if !tgtContent.Type.IsDir() { console.Fatalf("Target ‘%s’ is not a folder. %s\n", tgtURL, NewIodine(iodine.New(errInvalidArgument{}, nil))) } }
// ListObjects - list all objects func (b bucket) ListObjects() (map[string]Object, error) { nodeSlice := 0 for _, node := range b.nodes { disks, err := node.ListDisks() if err != nil { return nil, iodine.New(err, nil) } for _, disk := range disks { bucketSlice := fmt.Sprintf("%s$%d$%d", b.name, nodeSlice, disk.GetOrder()) bucketPath := filepath.Join(b.donutName, bucketSlice) objects, err := disk.ListDir(bucketPath) if err != nil { return nil, iodine.New(err, nil) } for _, object := range objects { newObject, err := NewObject(object.Name(), filepath.Join(disk.GetPath(), bucketPath)) if err != nil { return nil, iodine.New(err, nil) } newObjectMetadata, err := newObject.GetObjectMetadata() if err != nil { return nil, iodine.New(err, nil) } objectName, ok := newObjectMetadata["object"] if !ok { return nil, iodine.New(ObjectCorrupted{Object: object.Name()}, nil) } b.objects[objectName] = newObject } } nodeSlice = nodeSlice + 1 } return b.objects, nil }
// Rebalance - func (d donut) Rebalance() error { var totalOffSetLength int var newDisks []disk.Disk var existingDirs []os.FileInfo for _, node := range d.nodes { disks, err := node.ListDisks() if err != nil { return iodine.New(err, nil) } totalOffSetLength = len(disks) fmt.Println(totalOffSetLength) for _, disk := range disks { dirs, err := disk.ListDir(d.name) if err != nil { return iodine.New(err, nil) } if len(dirs) == 0 { newDisks = append(newDisks, disk) } existingDirs = append(existingDirs, dirs...) } } for _, dir := range existingDirs { splits := strings.Split(dir.Name(), "$") bucketName, segment, offset := splits[0], splits[1], splits[2] fmt.Println(bucketName, segment, offset) } return nil }
// listDonutBuckets - func (donut API) listDonutBuckets() error { var disks map[int]disk.Disk var err error for _, node := range donut.nodes { disks, err = node.ListDisks() if err != nil { return iodine.New(err, nil) } } var dirs []os.FileInfo for _, disk := range disks { dirs, err = disk.ListDir(donut.config.DonutName) if err == nil { break } } // if all disks are missing then return error if err != nil { return iodine.New(err, nil) } for _, dir := range dirs { splitDir := strings.Split(dir.Name(), "$") if len(splitDir) < 3 { return iodine.New(CorruptedBackend{Backend: dir.Name()}, nil) } bucketName := splitDir[0] // we dont need this once we cache from makeDonutBucket() bucket, _, err := newBucket(bucketName, "private", donut.config.DonutName, donut.nodes) if err != nil { return iodine.New(err, nil) } donut.buckets[bucketName] = bucket } return nil }
// Start a single disk subsystem func Start(paths []string) (chan<- string, <-chan error, drivers.Driver) { ctrlChannel := make(chan string) errorChannel := make(chan error) // Soon to be user configurable, when Management API is available // we should remove "default" to something which is passed down // from configuration paramters var d donut.Donut var err error if len(paths) == 1 { d, err = donut.NewDonut("default", createNodeDiskMap(paths[0])) if err != nil { err = iodine.New(err, nil) log.Error.Println(err) } } else { d, err = donut.NewDonut("default", createNodeDiskMapFromSlice(paths)) if err != nil { err = iodine.New(err, nil) log.Error.Println(err) } } s := new(donutDriver) s.donut = d s.paths = paths go start(ctrlChannel, errorChannel, s) return ctrlChannel, errorChannel, s }
// SetDonut - set donut config func SetDonut(url, hostname string, disks []string) error { op := RPCOps{ Method: "Donut.Set", Request: rpc.DonutArgs{ Hostname: hostname, Disks: disks, Name: "default", MaxSize: 512000000, }, } req, err := NewRequest(url, op, http.DefaultTransport) if err != nil { return iodine.New(err, nil) } resp, err := req.Do() defer closeResp(resp) if err != nil { return iodine.New(err, nil) } var reply rpc.Reply if err := jsonrpc.DecodeClientResponse(resp.Body, &reply); err != nil { return iodine.New(err, nil) } return reply.Error }
// IsUsable provides a comprehensive way of knowing if the provided mountPath is mounted and writable func IsUsable(mountPath string) (bool, error) { mntpoint, err := os.Stat(mountPath) if err != nil { return false, iodine.New(err, nil) } parent, err := os.Stat(filepath.Join(mountPath, "..")) if err != nil { return false, iodine.New(err, nil) } mntpointSt := mntpoint.Sys().(*syscall.Stat_t) parentSt := parent.Sys().(*syscall.Stat_t) if mntpointSt.Dev == parentSt.Dev { return false, iodine.New(errors.New("not mounted"), nil) } testFile, err := ioutil.TempFile(mountPath, "writetest-") if err != nil { return false, iodine.New(err, nil) } testFileName := testFile.Name() // close the file, to avoid leaky fd's testFile.Close() if err := os.Remove(testFileName); err != nil { return false, iodine.New(err, nil) } return true, nil }
// getMcConfig - reads configuration file and returns config func getMcConfig() (*configV1, error) { if !isMcConfigExists() { return nil, iodine.New(errInvalidArgument{}, nil) } configFile, err := getMcConfigPath() if err != nil { return nil, iodine.New(err, nil) } // Cached in private global variable. if v := cache.Get(); v != nil { // Use previously cached config. return v.(quick.Config).Data().(*configV1), nil } conf := newConfigV1() qconf, err := quick.New(conf) if err != nil { return nil, iodine.New(err, nil) } err = qconf.Load(configFile) if err != nil { return nil, iodine.New(err, nil) } cache.Put(qconf) return qconf.Data().(*configV1), nil }
// Save writes config data in JSON format to a file. func (d config) Save(filename string) (err error) { d.lock.Lock() defer d.lock.Unlock() jsonData, err := json.MarshalIndent(d.data, "", "\t") if err != nil { return iodine.New(err, nil) } file, err := atomic.FileCreate(filename) if err != nil { return iodine.New(err, nil) } if runtime.GOOS == "windows" { jsonData = []byte(strings.Replace(string(jsonData), "\n", "\r\n", -1)) } _, err = file.Write(jsonData) if err != nil { return iodine.New(err, nil) } if err := file.Close(); err != nil { return iodine.New(err, nil) } return nil }
func doCatCmd(sourceURL string) (string, error) { sourceClnt, err := source2Client(sourceURL) if err != nil { return "Unable to create client: " + sourceURL, NewIodine(iodine.New(err, nil)) } // ignore size, since os.Stat() would not return proper size all the time for local filesystem // for example /proc files. reader, _, err := sourceClnt.GetObject(0, 0) if err != nil { return "Unable to retrieve file: " + sourceURL, NewIodine(iodine.New(err, nil)) } defer reader.Close() // read till EOF _, err = io.Copy(os.Stdout, reader) if err != nil { switch e := iodine.ToError(err).(type) { case *os.PathError: if e.Err == syscall.EPIPE { // stdout closed by the user. Gracefully exit. return "", nil } return "Writing data to stdout failed, unexpected problem.. please report this error", iodine.New(err, nil) default: return "Reading data from source failed: " + sourceURL, NewIodine(iodine.New(err, nil)) } } return "", nil }
// New - instantiate new disk func New(diskPath string) (Disk, error) { if diskPath == "" { return Disk{}, iodine.New(InvalidArgument{}, nil) } st, err := os.Stat(diskPath) if err != nil { return Disk{}, iodine.New(err, nil) } if !st.IsDir() { return Disk{}, iodine.New(syscall.ENOTDIR, nil) } s := syscall.Statfs_t{} err = syscall.Statfs(diskPath, &s) if err != nil { return Disk{}, iodine.New(err, nil) } disk := Disk{ lock: &sync.Mutex{}, path: diskPath, fsInfo: make(map[string]string), } if fsType := getFSType(s.Type); fsType != "UNKNOWN" { disk.fsInfo["FSType"] = fsType disk.fsInfo["MountPoint"] = disk.path return disk, nil } return Disk{}, iodine.New(UnsupportedFilesystem{Type: strconv.FormatInt(int64(s.Type), 10)}, map[string]string{"Type": strconv.FormatInt(int64(s.Type), 10)}) }
// doDiffDirs - Diff two Dir URLs func doDiffDirs(firstURL, secondURL string, recursive bool, ch chan diff) { firstClnt, firstContent, err := url2Stat(firstURL) if err != nil { ch <- diff{ message: "Failed to stat ‘" + firstURL + "’", err: iodine.New(err, nil), } return } _, secondContent, err := url2Stat(secondURL) if err != nil { ch <- diff{ message: "Failed to stat ‘" + secondURL + "’", err: iodine.New(err, nil), } return } switch { case firstContent.Type.IsDir(): if !secondContent.Type.IsDir() { ch <- diff{ message: firstURL + " and " + secondURL + " differs in type.", err: nil, } } default: ch <- diff{ message: "‘" + firstURL + "’ is not an object. Please report this bug with ‘--debug’ option.", err: iodine.New(errNotAnObject{url: firstURL}, nil), } return } dodiffdirs(firstClnt, firstURL, secondURL, recursive, ch) }
// AbortMultipartUpload - abort an incomplete multipart session func (donut API) AbortMultipartUpload(bucket, key, uploadID string, signature *Signature) error { donut.lock.Lock() defer donut.lock.Unlock() if signature != nil { ok, err := signature.DoesSignatureMatch("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855") if err != nil { return iodine.New(err, nil) } if !ok { return iodine.New(SignatureDoesNotMatch{}, nil) } } if !IsValidBucket(bucket) { return iodine.New(BucketNameInvalid{Bucket: bucket}, nil) } if !donut.storedBuckets.Exists(bucket) { return iodine.New(BucketNotFound{Bucket: bucket}, nil) } storedBucket := donut.storedBuckets.Get(bucket).(storedBucket) if storedBucket.multiPartSession[key].uploadID != uploadID { return iodine.New(InvalidUploadID{UploadID: uploadID}, nil) } donut.cleanupMultipartSession(bucket, key, uploadID) return nil }
// getNewClient gives a new client interface func getNewClient(urlStr string, auth *hostConfig) (clnt client.Client, err error) { url, err := client.Parse(urlStr) if err != nil { return nil, iodine.New(errInvalidURL{URL: urlStr}, map[string]string{"URL": urlStr}) } switch url.Type { case client.Object: // Minio and S3 compatible object storage if auth == nil { return nil, iodine.New(errInvalidArgument{}, nil) } s3Config := new(s3.Config) s3Config.AccessKeyID = func() string { if auth.AccessKeyID == globalAccessKeyID { return "" } return auth.AccessKeyID }() s3Config.SecretAccessKey = func() string { if auth.SecretAccessKey == globalSecretAccessKey { return "" } return auth.SecretAccessKey }() s3Config.AppName = "Minio" s3Config.AppVersion = getVersion() s3Config.AppComments = []string{os.Args[0], runtime.GOOS, runtime.GOARCH} s3Config.HostURL = urlStr s3Config.Debug = globalDebugFlag return s3.New(s3Config) case client.Filesystem: return fs.New(urlStr) } return nil, iodine.New(errInvalidURL{URL: urlStr}, nil) }
// ReadObject - open an object to read func (b bucket) ReadObject(objectName string) (reader io.ReadCloser, size int64, err error) { b.lock.RLock() defer b.lock.RUnlock() reader, writer := io.Pipe() // get list of objects bucketMetadata, err := b.getBucketMetadata() if err != nil { return nil, 0, iodine.New(err, nil) } // check if object exists if _, ok := bucketMetadata.Buckets[b.getBucketName()].BucketObjects[objectName]; !ok { return nil, 0, iodine.New(ObjectNotFound{Object: objectName}, nil) } objMetadata := ObjectMetadata{} metadataReaders, err := b.getDiskReaders(normalizeObjectName(objectName), objectMetadataConfig) if err != nil { return nil, 0, iodine.New(err, nil) } for _, metadataReader := range metadataReaders { defer metadataReader.Close() } for _, metadataReader := range metadataReaders { jdec := json.NewDecoder(metadataReader) if err := jdec.Decode(&objMetadata); err != nil { return nil, 0, iodine.New(err, nil) } break } // read and reply back to GetObject() request in a go-routine go b.readEncodedData(normalizeObjectName(objectName), writer, objMetadata) return reader, objMetadata.Size, nil }
// GetBucketMetadata retrieves an bucket's metadata func (d donutDriver) GetBucketMetadata(bucketName string) (drivers.BucketMetadata, error) { if d.donut == nil { return drivers.BucketMetadata{}, iodine.New(drivers.InternalError{}, nil) } if !drivers.IsValidBucket(bucketName) || strings.Contains(bucketName, ".") { return drivers.BucketMetadata{}, drivers.BucketNameInvalid{Bucket: bucketName} } metadata, err := d.donut.GetBucketMetadata(bucketName) if err != nil { return drivers.BucketMetadata{}, iodine.New(drivers.BucketNotFound{Bucket: bucketName}, nil) } created, err := time.Parse(time.RFC3339Nano, metadata["created"]) if err != nil { return drivers.BucketMetadata{}, iodine.New(err, nil) } acl, ok := metadata["acl"] if !ok { return drivers.BucketMetadata{}, iodine.New(drivers.BackendCorrupted{}, nil) } bucketMetadata := drivers.BucketMetadata{ Name: bucketName, Created: created, ACL: drivers.BucketACL(acl), } return bucketMetadata, nil }
// decodeEncodedData - func (b bucket) decodeEncodedData(totalLeft, blockSize int64, readers []io.ReadCloser, encoder encoder, writer *io.PipeWriter) ([]byte, error) { var curBlockSize int64 if blockSize < totalLeft { curBlockSize = blockSize } else { curBlockSize = totalLeft // cast is safe, blockSize in if protects } curChunkSize, err := encoder.GetEncodedBlockLen(int(curBlockSize)) if err != nil { return nil, iodine.New(err, nil) } encodedBytes := make([][]byte, len(readers)) for i, reader := range readers { var bytesBuffer bytes.Buffer _, err := io.CopyN(&bytesBuffer, reader, int64(curChunkSize)) if err != nil { return nil, iodine.New(err, nil) } encodedBytes[i] = bytesBuffer.Bytes() } decodedData, err := encoder.Decode(encodedBytes, int(curBlockSize)) if err != nil { return nil, iodine.New(err, nil) } return decodedData, nil }
// CreateObject creates a new object func (d donutDriver) CreateObject(bucketName, objectName, contentType, expectedMD5Sum string, size int64, reader io.Reader) (string, error) { errParams := map[string]string{ "bucketName": bucketName, "objectName": objectName, "contentType": contentType, } if d.donut == nil { return "", iodine.New(drivers.InternalError{}, errParams) } if !drivers.IsValidBucket(bucketName) || strings.Contains(bucketName, ".") { return "", iodine.New(drivers.BucketNameInvalid{Bucket: bucketName}, nil) } if !drivers.IsValidObjectName(objectName) || strings.TrimSpace(objectName) == "" { return "", iodine.New(drivers.ObjectNameInvalid{Object: objectName}, nil) } if strings.TrimSpace(contentType) == "" { contentType = "application/octet-stream" } metadata := make(map[string]string) metadata["contentType"] = strings.TrimSpace(contentType) metadata["contentLength"] = strconv.FormatInt(size, 10) if strings.TrimSpace(expectedMD5Sum) != "" { expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum)) if err != nil { return "", iodine.New(err, nil) } expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes) } calculatedMD5Sum, err := d.donut.PutObject(bucketName, objectName, expectedMD5Sum, ioutil.NopCloser(reader), metadata) if err != nil { return "", iodine.New(err, errParams) } return calculatedMD5Sum, nil }
// aliasExpand expands aliased (name:/path) to full URL, used by url-parser func aliasExpand(aliasedURL string, aliases map[string]string) (newURL string, err error) { u, err := client.Parse(aliasedURL) if err != nil { return aliasedURL, iodine.New(errInvalidURL{URL: aliasedURL}, nil) } // proper URL if u.Host != "" { return aliasedURL, nil } for aliasName, expandedURL := range aliases { if strings.HasPrefix(aliasedURL, aliasName+":") { // Match found. Expand it. splits := strings.Split(aliasedURL, ":") // if expandedURL is missing, return aliasedURL treat it like fs if expandedURL == "" { return aliasedURL, nil } // if more splits found return if len(splits) == 2 { // remove any prefixed slashes trimmedURL := expandedURL + "/" + strings.TrimPrefix(strings.TrimPrefix(splits[1], "/"), "\\") u, err := client.Parse(trimmedURL) if err != nil { return aliasedURL, iodine.New(errInvalidURL{URL: aliasedURL}, nil) } return u.String(), nil } return aliasedURL, nil } } return aliasedURL, nil }
// SINGLE SOURCE - Type A: copy(f, f) -> copy(f, f) // prepareCopyURLsTypeA - prepares target and source URLs for copying. func prepareCopyURLsTypeA(sourceURL string, targetURL string) <-chan cpURLs { cpURLsCh := make(chan cpURLs, 10000) go func(sourceURL, targetURL string, cpURLsCh chan cpURLs) { defer close(cpURLsCh) _, sourceContent, err := url2Stat(sourceURL) if err != nil { // Source does not exist or insufficient privileges. cpURLsCh <- cpURLs{Error: iodine.New(err, nil)} return } if !sourceContent.Type.IsRegular() { // Source is not a regular file cpURLsCh <- cpURLs{Error: iodine.New(errInvalidSource{URL: sourceURL}, nil)} return } targetClient, err := target2Client(targetURL) if err != nil { cpURLsCh <- cpURLs{Error: iodine.New(err, nil)} return } // Target exists? targetContent, err := targetClient.Stat() if err == nil { // Target exists. if !targetContent.Type.IsRegular() { // Target is not a regular file cpURLsCh <- cpURLs{Error: iodine.New(errInvalidTarget{URL: targetURL}, nil)} return } } // All OK.. We can proceed. Type A sourceContent.Name = sourceURL cpURLsCh <- cpURLs{SourceContent: sourceContent, TargetContent: &client.Content{Name: targetURL}} }(sourceURL, targetURL, cpURLsCh) return cpURLsCh }