// Removes bucket actions for given policy in given statement. func removeBucketActions(statement Statement, prefix string, bucketResource string, readOnlyInUse, writeOnlyInUse bool) Statement { removeReadOnly := func() { if !statement.Actions.Intersection(readOnlyBucketActions).Equals(readOnlyBucketActions) { return } if statement.Conditions == nil { statement.Actions = statement.Actions.Difference(readOnlyBucketActions) return } if prefix != "" { stringEqualsValue := statement.Conditions["StringEquals"] values := set.NewStringSet() if stringEqualsValue != nil { values = stringEqualsValue["s3:prefix"] if values == nil { values = set.NewStringSet() } } values.Remove(prefix) if stringEqualsValue != nil { if values.IsEmpty() { delete(stringEqualsValue, "s3:prefix") } if len(stringEqualsValue) == 0 { delete(statement.Conditions, "StringEquals") } } if len(statement.Conditions) == 0 { statement.Conditions = nil statement.Actions = statement.Actions.Difference(readOnlyBucketActions) } } } removeWriteOnly := func() { if statement.Conditions == nil { statement.Actions = statement.Actions.Difference(writeOnlyBucketActions) } } if len(statement.Resources) > 1 { statement.Resources.Remove(bucketResource) } else { if !readOnlyInUse { removeReadOnly() } if !writeOnlyInUse { removeWriteOnly() } } return statement }
// GetPolicies returns a map of policies rules of given bucket name, prefix in given statements. func GetPolicies(statements []Statement, bucketName string) map[string]BucketPolicy { policyRules := map[string]BucketPolicy{} objResources := set.NewStringSet() // Search all resources related to objects policy for _, s := range statements { for r := range s.Resources { if strings.HasPrefix(r, awsResourcePrefix+bucketName+"/") { objResources.Add(r) } } } // Pretend that policy resource as an actual object and fetch its policy for r := range objResources { // Put trailing * if exists in asterisk asterisk := "" if strings.HasSuffix(r, "*") { r = r[:len(r)-1] asterisk = "*" } objectPath := r[len(awsResourcePrefix+bucketName)+1 : len(r)] p := GetPolicy(statements, bucketName, objectPath) policyRules[bucketName+"/"+objectPath+asterisk] = p } return policyRules }
// Returns policy of given bucket name, prefix in given statements. func GetPolicy(statements []Statement, bucketName string, prefix string) BucketPolicy { bucketResource := awsResourcePrefix + bucketName objectResource := awsResourcePrefix + bucketName + "/" + prefix + "*" bucketCommonFound := false bucketReadOnly := false bucketWriteOnly := false matchedResource := "" objReadOnly := false objWriteOnly := false for _, s := range statements { matchedObjResources := set.NewStringSet() if s.Resources.Contains(objectResource) { matchedObjResources.Add(objectResource) } else { matchedObjResources = s.Resources.FuncMatch(resourceMatch, objectResource) } if !matchedObjResources.IsEmpty() { readOnly, writeOnly := getObjectPolicy(s) for resource := range matchedObjResources { if len(matchedResource) < len(resource) { objReadOnly = readOnly objWriteOnly = writeOnly matchedResource = resource } else if len(matchedResource) == len(resource) { objReadOnly = objReadOnly || readOnly objWriteOnly = objWriteOnly || writeOnly matchedResource = resource } } } else if s.Resources.Contains(bucketResource) { commonFound, readOnly, writeOnly := getBucketPolicy(s, prefix) bucketCommonFound = bucketCommonFound || commonFound bucketReadOnly = bucketReadOnly || readOnly bucketWriteOnly = bucketWriteOnly || writeOnly } } policy := BucketPolicyNone if bucketCommonFound { if bucketReadOnly && bucketWriteOnly && objReadOnly && objWriteOnly { policy = BucketPolicyReadWrite } else if bucketReadOnly && objReadOnly { policy = BucketPolicyReadOnly } else if bucketWriteOnly && objWriteOnly { policy = BucketPolicyWriteOnly } } return policy }
// checkBucketPolicyResources validates Resources in unmarshalled bucket policy structure. // First valation of Resources done for given set of Actions. // Later its validated for recursive Resources. func checkBucketPolicyResources(bucket string, bucketPolicy *bucketPolicy) APIErrorCode { // Validate statements for special actions and collect resources // for others to validate nesting. var resourceMap = set.NewStringSet() for _, statement := range bucketPolicy.Statements { for action := range statement.Actions { for resource := range statement.Resources { resourcePrefix := strings.SplitAfter(resource, AWSResourcePrefix)[1] if _, ok := invalidPrefixActions[action]; ok { // Resource prefix is not equal to bucket for // prefix invalid actions, reject them. if resourcePrefix != bucket { return ErrMalformedPolicy } } else { // For all other actions validate if resourcePrefix begins // with bucket name, if not reject them. if strings.Split(resourcePrefix, "/")[0] != bucket { return ErrMalformedPolicy } // All valid resources collect them separately to verify nesting. resourceMap.Add(resourcePrefix) } } } } var resources []string for resource := range resourceMap { resources = append(resources, resourcePrefix(resource)) } // Sort strings as shorter first. sort.Strings(resources) for len(resources) > 1 { var resource string resource, resources = resources[0], resources[1:] // Loop through all resources, if one of them matches with // previous shorter one, it means we have detected // nesting. Reject such rules. for _, otherResource := range resources { // Common prefix reject such rules. if strings.HasPrefix(otherResource, resource) { return ErrPolicyNesting } } } // No errors found. return ErrNone }
func waitForFormattingDisks(disks, ignoredDisks []string) ([]StorageAPI, error) { // FS Setup if len(disks) == 1 { storage, err := newStorageAPI(disks[0]) if err != nil && err != errDiskNotFound { return nil, err } return []StorageAPI{storage}, nil } // XL Setup if err := checkSufficientDisks(disks); err != nil { return nil, err } disksSet := set.NewStringSet() if len(ignoredDisks) > 0 { disksSet = set.CreateStringSet(ignoredDisks...) } // Bootstrap disks. storageDisks := make([]StorageAPI, len(disks)) for index, disk := range disks { // Check if disk is ignored. if disksSet.Contains(disk) { // Set this situation as disk not found. storageDisks[index] = nil continue } // Intentionally ignore disk not found errors. XL is designed // to handle these errors internally. storage, err := newStorageAPI(disk) if err != nil && err != errDiskNotFound { return nil, err } storageDisks[index] = storage } // Start wait loop retrying formatting disks. return retryFormattingDisks(disks, storageDisks) }
// Returns statements containing removed actions/statements for given // policy, bucket name and prefix. func removeStatements(statements []Statement, bucketName string, prefix string) []Statement { bucketResource := awsResourcePrefix + bucketName objectResource := awsResourcePrefix + bucketName + "/" + prefix + "*" readOnlyInUse, writeOnlyInUse := getInUsePolicy(statements, bucketName, prefix) out := []Statement{} readOnlyBucketStatements := []Statement{} s3PrefixValues := set.NewStringSet() for _, statement := range statements { if !isValidStatement(statement, bucketName) { out = append(out, statement) continue } if statement.Resources.Contains(bucketResource) { if statement.Conditions != nil { statement = removeBucketActions(statement, prefix, bucketResource, false, false) } else { statement = removeBucketActions(statement, prefix, bucketResource, readOnlyInUse, writeOnlyInUse) } } else if statement.Resources.Contains(objectResource) { statement = removeObjectActions(statement, objectResource) } if !statement.Actions.IsEmpty() { if statement.Resources.Contains(bucketResource) && statement.Actions.Intersection(readOnlyBucketActions).Equals(readOnlyBucketActions) && statement.Effect == "Allow" && statement.Principal.AWS.Contains("*") { if statement.Conditions != nil { stringEqualsValue := statement.Conditions["StringEquals"] values := set.NewStringSet() if stringEqualsValue != nil { values = stringEqualsValue["s3:prefix"] if values == nil { values = set.NewStringSet() } } s3PrefixValues = s3PrefixValues.Union(values.ApplyFunc(func(v string) string { return bucketResource + "/" + v + "*" })) } else if !s3PrefixValues.IsEmpty() { readOnlyBucketStatements = append(readOnlyBucketStatements, statement) continue } } out = append(out, statement) } } skipBucketStatement := true resourcePrefix := awsResourcePrefix + bucketName + "/" for _, statement := range out { if !statement.Resources.FuncMatch(startsWithFunc, resourcePrefix).IsEmpty() && s3PrefixValues.Intersection(statement.Resources).IsEmpty() { skipBucketStatement = false break } } for _, statement := range readOnlyBucketStatements { if skipBucketStatement && statement.Resources.Contains(bucketResource) && statement.Effect == "Allow" && statement.Principal.AWS.Contains("*") && statement.Conditions == nil { continue } out = append(out, statement) } if len(out) == 1 { statement := out[0] if statement.Resources.Contains(bucketResource) && statement.Actions.Intersection(commonBucketActions).Equals(commonBucketActions) && statement.Effect == "Allow" && statement.Principal.AWS.Contains("*") && statement.Conditions == nil { out = []Statement{} } } return out }
// newXLObjects - initialize new xl object layer. func newXLObjects(disks, ignoredDisks []string) (ObjectLayer, error) { if disks == nil { return nil, errInvalidArgument } disksSet := set.NewStringSet() if len(ignoredDisks) > 0 { disksSet = set.CreateStringSet(ignoredDisks...) } // Bootstrap disks. storageDisks := make([]StorageAPI, len(disks)) for index, disk := range disks { // Check if disk is ignored. if disksSet.Contains(disk) { storageDisks[index] = nil continue } var err error // Intentionally ignore disk not found errors. XL is designed // to handle these errors internally. storageDisks[index], err = newStorageAPI(disk) if err != nil && err != errDiskNotFound { switch diskType := storageDisks[index].(type) { case networkStorage: diskType.rpcClient.Close() } return nil, err } } // Fix format files in case of fresh or corrupted disks repairDiskMetadata(storageDisks) // Runs house keeping code, like t, cleaning up tmp files etc. if err := xlHouseKeeping(storageDisks); err != nil { return nil, err } // Load saved XL format.json and validate. newPosixDisks, err := loadFormatXL(storageDisks) if err != nil { // errCorruptedDisk - healing failed return nil, fmt.Errorf("Unable to recognize backend format, %s", err) } // Calculate data and parity blocks. dataBlocks, parityBlocks := len(newPosixDisks)/2, len(newPosixDisks)/2 // Initialize object cache. objCache := objcache.New(globalMaxCacheSize, globalCacheExpiry) // Initialize list pool. listPool := newTreeWalkPool(globalLookupTimeout) // Initialize xl objects. xl := xlObjects{ storageDisks: newPosixDisks, dataBlocks: dataBlocks, parityBlocks: parityBlocks, listPool: listPool, objCache: objCache, objCacheEnabled: globalMaxCacheSize > 0, } // Figure out read and write quorum based on number of storage disks. // READ and WRITE quorum is always set to (N/2) number of disks. xl.readQuorum = len(xl.storageDisks) / 2 xl.writeQuorum = len(xl.storageDisks)/2 + 1 // Return successfully initialized object layer. return xl, nil }