// Load - loads json config from filename for the a given struct data func Load(filename string, data interface{}) (Config, *probe.Error) { _, err := os.Stat(filename) if err != nil { return nil, probe.NewError(err) } fileData, err := ioutil.ReadFile(filename) if err != nil { return nil, probe.NewError(err) } if runtime.GOOS == "windows" { fileData = []byte(strings.Replace(string(fileData), "\r\n", "\n", -1)) } err = json.Unmarshal(fileData, &data) if err != nil { switch err := err.(type) { case *json.SyntaxError: return nil, probe.NewError(FormatJSONSyntaxError(bytes.NewReader(fileData), err)) default: return nil, probe.NewError(err) } } config, perr := New(data) if perr != nil { return nil, perr.Trace() } return config, nil }
func extractHTTPFormValues(reader *multipart.Reader) (io.Reader, map[string]string, *probe.Error) { /// HTML Form values formValues := make(map[string]string) filePart := new(bytes.Buffer) var err error for err == nil { var part *multipart.Part part, err = reader.NextPart() if part != nil { if part.FileName() == "" { buffer, err := ioutil.ReadAll(part) if err != nil { return nil, nil, probe.NewError(err) } formValues[http.CanonicalHeaderKey(part.FormName())] = string(buffer) } else { _, err := io.Copy(filePart, part) if err != nil { return nil, nil, probe.NewError(err) } } } } return filePart, formValues, nil }
// initPresignedSignatureV4 initializing presigned signature verification func initPresignedSignatureV4(req *http.Request) (*signv4.Signature, *probe.Error) { credentialElements := strings.Split(strings.TrimSpace(req.URL.Query().Get("X-Amz-Credential")), "/") if len(credentialElements) != 5 { return nil, probe.NewError(errCredentialTagMalformed) } accessKeyID := credentialElements[0] if !IsValidAccessKey(accessKeyID) { return nil, probe.NewError(errAccessKeyIDInvalid) } authConfig, err := LoadConfig() if err != nil { return nil, err.Trace() } signedHeaders := strings.Split(strings.TrimSpace(req.URL.Query().Get("X-Amz-SignedHeaders")), ";") signature := strings.TrimSpace(req.URL.Query().Get("X-Amz-Signature")) for _, user := range authConfig.Users { if user.AccessKeyID == accessKeyID { signature := &signv4.Signature{ AccessKeyID: user.AccessKeyID, SecretAccessKey: user.SecretAccessKey, Signature: signature, SignedHeaders: signedHeaders, Presigned: true, Request: req, } return signature, nil } } return nil, probe.NewError(errAccessKeyIDInvalid) }
func (sl *sortedList) Match(source *client.Content) (bool, *probe.Error) { if len(sl.current.Name) == 0 { // for the first time read if err := sl.dec.Decode(&sl.current); err != nil { if err != io.EOF { return false, probe.NewError(err) } return false, nil } } for { compare := strings.Compare(source.Name, sl.current.Name) if compare == 0 { if source.Type.IsRegular() && sl.current.Type.IsRegular() && source.Size == sl.current.Size { return true, nil } return false, nil } if compare < 0 { return false, nil } // assign zero values to fields because if s.current's previous decode had non zero value // fields it will not be over written if this loop's decode does not contain those fields sl.current = client.Content{} if err := sl.dec.Decode(&sl.current); err != nil { return false, probe.NewError(err) } } }
func (c *s3Client) ShareUpload(recursive bool, expires time.Duration, contentType string) (map[string]string, *probe.Error) { bucket, object := c.url2BucketAndObject() p := minio.NewPostPolicy() if err := p.SetExpires(time.Now().UTC().Add(expires)); err != nil { return nil, probe.NewError(err) } if strings.TrimSpace(contentType) != "" || contentType != "" { // No need to check for errors here, we have trimmed off the spaces p.SetContentType(contentType) } if err := p.SetBucket(bucket); err != nil { return nil, probe.NewError(err) } if recursive { if err := p.SetKeyStartsWith(object); err != nil { return nil, probe.NewError(err) } } else { if err := p.SetKey(object); err != nil { return nil, probe.NewError(err) } } m, err := c.api.PresignedPostPolicy(p) return m, probe.NewError(err) }
func printServerMsg(serverConf *http.Server) { host, port, e := net.SplitHostPort(serverConf.Addr) fatalIf(probe.NewError(e), "Unable to split host port.", nil) var hosts []string switch { case host != "": hosts = append(hosts, host) default: addrs, e := net.InterfaceAddrs() fatalIf(probe.NewError(e), "Unable to get interface address.", nil) for _, addr := range addrs { if addr.Network() == "ip+net" { host := strings.Split(addr.String(), "/")[0] if ip := net.ParseIP(host); ip.To4() != nil { hosts = append(hosts, host) } } } } for _, host := range hosts { if serverConf.TLSConfig != nil { Printf(" https://%s:%s\n", host, port) } else { Printf(" http://%s:%s\n", host, port) } } }
// MakeBucket - PUT Bucket func (fs API) MakeBucket(bucket, acl string) *probe.Error { fs.lock.Lock() defer fs.lock.Unlock() // verify bucket path legal if !IsValidBucket(bucket) { return probe.NewError(BucketNameInvalid{Bucket: bucket}) } // get bucket path bucketDir := filepath.Join(fs.path, bucket) // check if bucket exists if _, err := os.Stat(bucketDir); err == nil { return probe.NewError(BucketExists{ Bucket: bucket, }) } // make bucket err := os.Mkdir(bucketDir, 0700) if err != nil { return probe.NewError(err) } return nil }
// GetObjectMetadata - get object metadata. func (fs Filesystem) GetObjectMetadata(bucket, object string) (ObjectMetadata, *probe.Error) { // Input validation. if !IsValidBucketName(bucket) { return ObjectMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) } if !IsValidObjectName(object) { return ObjectMetadata{}, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: bucket}) } // Normalize buckets. bucket = fs.denormalizeBucket(bucket) bucketPath := filepath.Join(fs.path, bucket) if _, e := os.Stat(bucketPath); e != nil { if os.IsNotExist(e) { return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) } return ObjectMetadata{}, probe.NewError(e) } metadata, err := getMetadata(fs.path, bucket, object) if err != nil { return ObjectMetadata{}, err.Trace(bucket, object) } if metadata.Mode.IsDir() { return ObjectMetadata{}, probe.NewError(ObjectNotFound{Bucket: bucket, Object: object}) } return metadata, nil }
// deleteObjectPath - delete object path if its empty. func deleteObjectPath(basePath, deletePath, bucket, object string) *probe.Error { if basePath == deletePath { return nil } // Verify if the path exists. pathSt, e := os.Stat(deletePath) if e != nil { if os.IsNotExist(e) { return probe.NewError(ObjectNotFound{Bucket: bucket, Object: object}) } return probe.NewError(e) } if pathSt.IsDir() { // Verify if directory is empty. empty, e := ioutils.IsDirEmpty(deletePath) if e != nil { return probe.NewError(e) } if !empty { return nil } } // Attempt to remove path. if e := os.Remove(deletePath); e != nil { return probe.NewError(e) } // Recursively go down the next path and delete again. if err := deleteObjectPath(basePath, filepath.Dir(deletePath), bucket, object); err != nil { return err.Trace(basePath, deletePath, bucket, object) } return nil }
// Delete removes all the session files. func (s *sessionV8) Delete() *probe.Error { s.mutex.Lock() defer s.mutex.Unlock() if s.DataFP != nil { name := s.DataFP.Name() // close file pro-actively before deleting // ignore any error, it could be possibly that // the file is closed already s.DataFP.Close() // Remove the data file. if e := os.Remove(name); e != nil { return probe.NewError(e) } } // Fetch the session file. sessionFile, err := getSessionFile(s.SessionID) if err != nil { return err.Trace(s.SessionID) } // Remove session file if e := os.Remove(sessionFile); e != nil { return probe.NewError(e) } // Remove session backup file if any, ignore any error. os.Remove(sessionFile + ".old") return nil }
// GetAuthKeys get access key id and secret access key func GetAuthKeys(url string) ([]byte, *probe.Error) { op := RPCOps{ Method: "Auth.Get", Request: rpc.Args{Request: ""}, } req, perr := NewRequest(url, op, http.DefaultTransport) if perr != nil { return nil, perr.Trace() } resp, perr := req.Do() defer closeResp(resp) if perr != nil { return nil, perr.Trace() } var reply rpc.AuthReply if err := jsonrpc.DecodeClientResponse(resp.Body, &reply); err != nil { return nil, probe.NewError(err) } authConfig := &auth.Config{} authConfig.Version = "0.0.1" authConfig.Users = make(map[string]*auth.User) user := &auth.User{} user.Name = "testuser" user.AccessKeyID = reply.AccessKeyID user.SecretAccessKey = reply.SecretAccessKey authConfig.Users[reply.AccessKeyID] = user if err := auth.SaveConfig(authConfig); err != nil { return nil, err.Trace() } jsonRespBytes, err := json.MarshalIndent(reply, "", "\t") if err != nil { return nil, probe.NewError(err) } return jsonRespBytes, nil }
// save - wrapper for quick.Save and saves only if sessionHeader is // modified. func (s *sessionV8) save() *probe.Error { sessionFile, err := getSessionFile(s.SessionID) if err != nil { return err.Trace(s.SessionID) } // Verify if sessionFile is modified. modified, err := s.isModified(sessionFile) if err != nil { return err.Trace(s.SessionID) } // Header is modified, we save it. if modified { qs, e := quick.New(s.Header) if e != nil { return probe.NewError(e).Trace(s.SessionID) } // Save an return. e = qs.Save(sessionFile) if e != nil { return probe.NewError(e).Trace(sessionFile) } } return nil }
// IsModified - returns if in memory session header has changed from // its on disk value. func (s *sessionV8) isModified(sessionFile string) (bool, *probe.Error) { qs, e := quick.New(s.Header) if e != nil { return false, probe.NewError(e).Trace(s.SessionID) } var currentHeader = &sessionV8Header{} currentQS, e := quick.Load(sessionFile, currentHeader) if e != nil { // If session does not exist for the first, return modified to // be true. if os.IsNotExist(e) { return true, nil } // For all other errors return. return false, probe.NewError(e).Trace(s.SessionID) } changedFields, e := qs.DeepDiff(currentQS) if e != nil { return false, probe.NewError(e).Trace(s.SessionID) } // Returns true if there are changed entries. return len(changedFields) > 0, nil }
// Save this session. func (s *sessionV8) Save() *probe.Error { s.mutex.Lock() defer s.mutex.Unlock() if s.DataFP.dirty { if err := s.DataFP.Sync(); err != nil { return probe.NewError(err) } s.DataFP.dirty = false } qs, e := quick.New(s.Header) if e != nil { return probe.NewError(e).Trace(s.SessionID) } sessionFile, err := getSessionFile(s.SessionID) if err != nil { return err.Trace(s.SessionID) } e = qs.Save(sessionFile) if e != nil { return probe.NewError(e).Trace(sessionFile) } return nil }
// SetBucketMetadata - func (donut API) SetBucketMetadata(bucket string, metadata map[string]string, signature *Signature) *probe.Error { donut.lock.Lock() defer donut.lock.Unlock() if signature != nil { ok, err := signature.DoesSignatureMatch("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855") if err != nil { return err.Trace() } if !ok { return probe.NewError(SignatureDoesNotMatch{}) } } if !IsValidBucket(bucket) { return probe.NewError(BucketNameInvalid{Bucket: bucket}) } if !donut.storedBuckets.Exists(bucket) { return probe.NewError(BucketNotFound{Bucket: bucket}) } if len(donut.config.NodeDiskMap) > 0 { if err := donut.setBucketMetadata(bucket, metadata); err != nil { return err.Trace() } } storedBucket := donut.storedBuckets.Get(bucket).(storedBucket) storedBucket.bucketMetadata.ACL = BucketACL(metadata["acl"]) donut.storedBuckets.Set(bucket, storedBucket) return nil }
// GetObjectMetadata - get object metadata from cache func (donut API) GetObjectMetadata(bucket, key string) (ObjectMetadata, *probe.Error) { donut.lock.Lock() defer donut.lock.Unlock() // check if bucket exists if !IsValidBucket(bucket) { return ObjectMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) } if !IsValidObjectName(key) { return ObjectMetadata{}, probe.NewError(ObjectNameInvalid{Object: key}) } if !donut.storedBuckets.Exists(bucket) { return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) } storedBucket := donut.storedBuckets.Get(bucket).(storedBucket) objectKey := bucket + "/" + key if objMetadata, ok := storedBucket.objectMetadata[objectKey]; ok == true { return objMetadata, nil } if len(donut.config.NodeDiskMap) > 0 { objMetadata, err := donut.getObjectMetadata(bucket, key) if err != nil { return ObjectMetadata{}, err.Trace() } // update storedBucket.objectMetadata[objectKey] = objMetadata donut.storedBuckets.Set(bucket, storedBucket) return objMetadata, nil } return ObjectMetadata{}, probe.NewError(ObjectNotFound{Object: key}) }
// Load - loads json config func Load(filename string, data interface{}) (Config, *probe.Error) { _, err := os.Stat(filename) if err != nil { return nil, probe.NewError(err) } fileData, err := ioutil.ReadFile(filename) if err != nil { return nil, probe.NewError(err) } if runtime.GOOS == "windows" { fileData = []byte(strings.Replace(string(fileData), "\r\n", "\n", -1)) } err = json.Unmarshal(fileData, &data) if err != nil { return nil, probe.NewError(err) } config, perr := New(data) if perr != nil { return nil, perr.Trace() } return config, nil }
// putObject - put object func (xl API) putObject(bucket, object, expectedMD5Sum string, reader io.Reader, size int64, metadata map[string]string, signature *signV4.Signature) (ObjectMetadata, *probe.Error) { if bucket == "" || strings.TrimSpace(bucket) == "" { return ObjectMetadata{}, probe.NewError(InvalidArgument{}) } if object == "" || strings.TrimSpace(object) == "" { return ObjectMetadata{}, probe.NewError(InvalidArgument{}) } if err := xl.listXLBuckets(); err != nil { return ObjectMetadata{}, err.Trace() } if _, ok := xl.buckets[bucket]; !ok { return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) } bucketMeta, err := xl.getXLBucketMetadata() if err != nil { return ObjectMetadata{}, err.Trace() } if _, ok := bucketMeta.Buckets[bucket].BucketObjects[object]; ok { return ObjectMetadata{}, probe.NewError(ObjectExists{Object: object}) } objMetadata, err := xl.buckets[bucket].WriteObject(object, reader, size, expectedMD5Sum, metadata, signature) if err != nil { return ObjectMetadata{}, err.Trace() } bucketMeta.Buckets[bucket].BucketObjects[object] = struct{}{} if err := xl.setXLBucketMetadata(bucketMeta); err != nil { return ObjectMetadata{}, err.Trace() } return objMetadata, nil }
func configureWebServer(conf cloudServerConfig) (*http.Server, *probe.Error) { // Split the api address into host and port. host, port, e := net.SplitHostPort(conf.Address) if e != nil { return nil, probe.NewError(e) } webPort, e := strconv.Atoi(port) if e != nil { return nil, probe.NewError(e) } // Always choose the next port, based on the API address port. webPort = webPort + 1 webAddress := net.JoinHostPort(host, strconv.Itoa(webPort)) // Minio server config webServer := &http.Server{ Addr: webAddress, Handler: getWebAPIHandler(getNewWebAPI(conf)), MaxHeaderBytes: 1 << 20, } if conf.TLS { var err error webServer.TLSConfig = &tls.Config{} webServer.TLSConfig.Certificates = make([]tls.Certificate, 1) webServer.TLSConfig.Certificates[0], err = tls.LoadX509KeyPair(conf.CertFile, conf.KeyFile) if err != nil { return nil, probe.NewError(err) } } return webServer, nil }
// abortMultipartUpload - abort a incomplete multipart upload func (xl API) abortMultipartUpload(bucket, object, uploadID string) *probe.Error { if err := xl.listXLBuckets(); err != nil { return err.Trace() } if _, ok := xl.buckets[bucket]; !ok { return probe.NewError(BucketNotFound{Bucket: bucket}) } allbuckets, err := xl.getXLBucketMetadata() if err != nil { return err.Trace() } bucketMetadata := allbuckets.Buckets[bucket] if _, ok := bucketMetadata.Multiparts[object]; !ok { return probe.NewError(InvalidUploadID{UploadID: uploadID}) } if bucketMetadata.Multiparts[object].UploadID != uploadID { return probe.NewError(InvalidUploadID{UploadID: uploadID}) } delete(bucketMetadata.Multiparts, object) allbuckets.Buckets[bucket] = bucketMetadata if err := xl.setXLBucketMetadata(allbuckets); err != nil { return err.Trace() } return nil }
// DeleteBucket - delete bucket func (fs API) DeleteBucket(bucket string) *probe.Error { fs.lock.Lock() defer fs.lock.Unlock() // verify bucket path legal if !IsValidBucket(bucket) { return probe.NewError(BucketNameInvalid{Bucket: bucket}) } bucketDir := filepath.Join(fs.path, bucket) // check bucket exists if _, err := os.Stat(bucketDir); os.IsNotExist(err) { return probe.NewError(BucketNotFound{Bucket: bucket}) } files, err := ioutil.ReadDir(bucketDir) if err != nil { return probe.NewError(err) } if len(files) > 0 { return probe.NewError(BucketNotEmpty{Bucket: bucket}) } if err := os.Remove(bucketDir); err != nil { return probe.NewError(err) } return nil }
// PutObject - create a new file func (f *fsClient) PutObject(size int64, data io.Reader) *probe.Error { objectDir, _ := filepath.Split(f.Path) objectPath := f.Path if objectDir != "" { if err := os.MkdirAll(objectDir, 0700); err != nil { return probe.NewError(err) } } fs, err := os.Create(objectPath) if err != nil { return probe.NewError(err) } defer fs.Close() // even if size is zero try to read from source if size > 0 { _, err = io.CopyN(fs, data, int64(size)) if err != nil { return probe.NewError(err) } } else { // size could be 0 for virtual files on certain filesystems // for example /proc, so read till EOF for such files _, err = io.Copy(fs, data) if err != nil { return probe.NewError(err) } } return nil }
// GetBucketMetadata - get bucket metadata. func (fs Filesystem) GetBucketMetadata(bucket string) (BucketMetadata, *probe.Error) { if !IsValidBucketName(bucket) { return BucketMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) } bucket = fs.denormalizeBucket(bucket) // Get bucket path. bucketDir := filepath.Join(fs.path, bucket) fi, e := os.Stat(bucketDir) if e != nil { // Check if bucket exists. if os.IsNotExist(e) { return BucketMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) } return BucketMetadata{}, probe.NewError(e) } fs.rwLock.RLock() bucketMetadata, ok := fs.buckets.Metadata[bucket] fs.rwLock.RUnlock() // If metadata value is not found, get it from disk. if !ok { bucketMetadata = &BucketMetadata{} bucketMetadata.Name = fi.Name() bucketMetadata.Created = fi.ModTime() bucketMetadata.ACL = BucketACL("private") } return *bucketMetadata, nil }
// GetObject download an full or part object from bucket // getobject returns a reader, length and nil for no errors // with errors getobject will return nil reader, length and typed errors func (f *fsClient) GetObject(offset, length int64) (io.ReadCloser, int64, *probe.Error) { if offset < 0 || length < 0 { return nil, 0, probe.NewError(client.InvalidRange{Offset: offset}) } tmppath := f.Path // Golang strips trailing / if you clean(..) or // EvalSymlinks(..). Adding '.' prevents it from doing so. if strings.HasSuffix(tmppath, string(f.URL().Separator)) { tmppath = tmppath + "." } // Resolve symlinks _, err := filepath.EvalSymlinks(tmppath) if os.IsNotExist(err) { return nil, length, probe.NewError(err) } if err != nil { return nil, length, probe.NewError(err) } if offset == 0 && length == 0 { return f.get() } body, err := os.Open(f.Path) if err != nil { return nil, length, probe.NewError(err) } _, err = io.CopyN(ioutil.Discard, body, int64(offset)) if err != nil { return nil, length, probe.NewError(err) } return body, length, nil }
// AbortMultipartUpload - abort an incomplete multipart session func (donut API) AbortMultipartUpload(bucket, key, uploadID string, signature *Signature) *probe.Error { donut.lock.Lock() defer donut.lock.Unlock() if !IsValidBucket(bucket) { return probe.NewError(BucketNameInvalid{Bucket: bucket}) } if !IsValidObjectName(key) { return probe.NewError(ObjectNameInvalid{Object: key}) } if signature != nil { ok, err := signature.DoesSignatureMatch("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855") if err != nil { return err.Trace() } if !ok { return probe.NewError(SignatureDoesNotMatch{}) } } if len(donut.config.NodeDiskMap) > 0 { return donut.abortMultipartUpload(bucket, key, uploadID) } if !donut.storedBuckets.Exists(bucket) { return probe.NewError(BucketNotFound{Bucket: bucket}) } storedBucket := donut.storedBuckets.Get(bucket).(storedBucket) if storedBucket.multiPartSession[key].UploadID != uploadID { return probe.NewError(InvalidUploadID{UploadID: uploadID}) } donut.cleanupMultipartSession(bucket, key, uploadID) return nil }
// New - instantiate new disk func New(diskPath string) (Disk, *probe.Error) { if diskPath == "" { return Disk{}, probe.NewError(InvalidArgument{}) } st, err := os.Stat(diskPath) if err != nil { return Disk{}, probe.NewError(err) } if !st.IsDir() { return Disk{}, probe.NewError(syscall.ENOTDIR) } s := syscall.Statfs_t{} err = syscall.Statfs(diskPath, &s) if err != nil { return Disk{}, probe.NewError(err) } disk := Disk{ lock: &sync.Mutex{}, path: diskPath, fsInfo: make(map[string]string), } if fsType := getFSType(s.Type); fsType != "UNKNOWN" { disk.fsInfo["FSType"] = fsType disk.fsInfo["MountPoint"] = disk.path return disk, nil } return Disk{}, probe.NewError(UnsupportedFilesystem{Type: strconv.FormatInt(int64(s.Type), 10)}) }
// initPostPresignedPolicyV4 initializing post policy signature verification func initPostPresignedPolicyV4(formValues map[string]string) (*signv4.Signature, *probe.Error) { credentialElements := strings.Split(strings.TrimSpace(formValues["X-Amz-Credential"]), "/") if len(credentialElements) != 5 { return nil, probe.NewError(errCredentialTagMalformed) } accessKeyID := credentialElements[0] if !IsValidAccessKey(accessKeyID) { return nil, probe.NewError(errAccessKeyIDInvalid) } authConfig, perr := LoadConfig() if perr != nil { return nil, perr.Trace() } for _, user := range authConfig.Users { if user.AccessKeyID == accessKeyID { signature := &signv4.Signature{ AccessKeyID: user.AccessKeyID, SecretAccessKey: user.SecretAccessKey, Signature: formValues["X-Amz-Signature"], PresignedPolicy: formValues["Policy"], } return signature, nil } } return nil, probe.NewError(errAccessKeyIDInvalid) }
// GetBucketMetadata - func (donut API) GetBucketMetadata(bucket string, signature *Signature) (BucketMetadata, *probe.Error) { donut.lock.Lock() defer donut.lock.Unlock() if signature != nil { ok, err := signature.DoesSignatureMatch("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855") if err != nil { return BucketMetadata{}, err.Trace() } if !ok { return BucketMetadata{}, probe.NewError(SignatureDoesNotMatch{}) } } if !IsValidBucket(bucket) { return BucketMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) } if !donut.storedBuckets.Exists(bucket) { if len(donut.config.NodeDiskMap) > 0 { bucketMetadata, err := donut.getBucketMetadata(bucket) if err != nil { return BucketMetadata{}, err.Trace() } storedBucket := donut.storedBuckets.Get(bucket).(storedBucket) storedBucket.bucketMetadata = bucketMetadata donut.storedBuckets.Set(bucket, storedBucket) } return BucketMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) } return donut.storedBuckets.Get(bucket).(storedBucket).bucketMetadata, nil }
func (r rpcSignature) DoesSignatureMatch(hashedPayload string) (bool, *probe.Error) { // set new calulated payload r.Request.Header.Set("X-Minio-Content-Sha256", hashedPayload) // Add date if not present throw error var date string if date = r.Request.Header.Get(http.CanonicalHeaderKey("x-minio-date")); date == "" { if date = r.Request.Header.Get("Date"); date == "" { return false, probe.NewError(errMissingDateHeader) } } t, err := time.Parse(iso8601Format, date) if err != nil { return false, probe.NewError(err) } canonicalRequest := r.getCanonicalRequest() stringToSign := r.getStringToSign(canonicalRequest, t) signingKey := r.getSigningKey(t) newSignature := r.getSignature(signingKey, stringToSign) if newSignature != r.Signature { return false, nil } return true, nil }
// Delete removes all the session files. func (s *sessionV2) Delete() *probe.Error { s.mutex.Lock() defer s.mutex.Unlock() if s.DataFP != nil { name := s.DataFP.Name() // close file pro-actively before deleting // ignore any error, it could be possibly that // the file is closed already s.DataFP.Close() err := os.Remove(name) if err != nil { return probe.NewError(err) } } sessionFile, err := getSessionFile(s.SessionID) if err != nil { return err.Trace(s.SessionID) } if err := os.Remove(sessionFile); err != nil { return probe.NewError(err) } return nil }