示例#1
0
// GetPolicy is used to retrieve a compiled policy object with a TTL. Does not
// support a blocking query.
func (a *ACL) GetPolicy(args *structs.ACLPolicyRequest, reply *structs.ACLPolicy) error {
	if done, err := a.srv.forward("ACL.GetPolicy", args, args, reply); done {
		return err
	}

	// Verify we are allowed to serve this request
	if a.srv.config.ACLDatacenter != a.srv.config.Datacenter {
		return fmt.Errorf(aclDisabled)
	}

	// Get the policy via the cache
	parent, policy, err := a.srv.aclAuthCache.GetACLPolicy(args.ACL)
	if err != nil {
		return err
	}

	// Generate an ETag
	conf := a.srv.config
	etag := fmt.Sprintf("%s:%s", parent, policy.ID)

	// Setup the response
	reply.ETag = etag
	reply.TTL = conf.ACLTTL
	a.srv.setQueryMeta(&reply.QueryMeta)

	// Only send the policy on an Etag mis-match
	if args.ETag != etag {
		reply.Parent = parent
		reply.Policy = policy
	}
	return nil
}
示例#2
0
// catalogPolicy supplies some standard policies to help with testing the
// catalog-related vet and filter functions.
func catalogPolicy(req *structs.ACLPolicyRequest, reply *structs.ACLPolicy) error {
	reply.Policy = &rawacl.Policy{}

	switch req.ACL {

	case "node-ro":
		reply.Policy.Nodes = append(reply.Policy.Nodes,
			&rawacl.NodePolicy{Name: "Node", Policy: "read"})

	case "node-rw":
		reply.Policy.Nodes = append(reply.Policy.Nodes,
			&rawacl.NodePolicy{Name: "Node", Policy: "write"})

	case "service-ro":
		reply.Policy.Services = append(reply.Policy.Services,
			&rawacl.ServicePolicy{Name: "service", Policy: "read"})

	case "service-rw":
		reply.Policy.Services = append(reply.Policy.Services,
			&rawacl.ServicePolicy{Name: "service", Policy: "write"})

	case "other-rw":
		reply.Policy.Services = append(reply.Policy.Services,
			&rawacl.ServicePolicy{Name: "other", Policy: "write"})

	default:
		return fmt.Errorf("unknown token %q", req.ACL)
	}

	return nil
}
示例#3
0
文件: acl.go 项目: pulcy/vault-monkey
// lookupACL is used when we are non-authoritative, and need to resolve an ACL.
func (c *aclCache) lookupACL(id, authDC string) (acl.ACL, error) {
	// Check the cache for the ACL.
	var cached *aclCacheEntry
	raw, ok := c.acls.Get(id)
	if ok {
		cached = raw.(*aclCacheEntry)
	}

	// Check for live cache.
	if cached != nil && time.Now().Before(cached.Expires) {
		metrics.IncrCounter([]string{"consul", "acl", "cache_hit"}, 1)
		return cached.ACL, nil
	} else {
		metrics.IncrCounter([]string{"consul", "acl", "cache_miss"}, 1)
	}

	// Attempt to refresh the policy from the ACL datacenter via an RPC.
	args := structs.ACLPolicyRequest{
		Datacenter: authDC,
		ACL:        id,
	}
	if cached != nil {
		args.ETag = cached.ETag
	}
	var reply structs.ACLPolicy
	err := c.rpc("ACL.GetPolicy", &args, &reply)
	if err == nil {
		return c.useACLPolicy(id, authDC, cached, &reply)
	}

	// Check for not-found, which will cause us to bail immediately. For any
	// other error we report it in the logs but can continue.
	if strings.Contains(err.Error(), aclNotFound) {
		return nil, errors.New(aclNotFound)
	} else {
		c.logger.Printf("[ERR] consul.acl: Failed to get policy from ACL datacenter: %v", err)
	}

	// TODO (slackpad) - We could do a similar thing *within* the ACL
	// datacenter if the leader isn't available. We have a local state
	// store of the ACLs, so by populating the local member in this cache,
	// it would fall back to the state store if there was a leader loss and
	// the extend-cache policy was true. This feels subtle to explain and
	// configure, and leader blips should be paved over by cache already, so
	// we won't do this for now but should consider for the future. This is
	// a lot different than the replication story where you might be cut off
	// from the ACL datacenter for an extended period of time and need to
	// carry on operating with the full set of ACLs as they were known
	// before the partition.

	// At this point we might have an expired cache entry and we know that
	// there was a problem getting the ACL from the ACL datacenter. If a
	// local ACL fault function is registered to query replicated ACL data,
	// and the user's policy allows it, we will try locally before we give
	// up.
	if c.local != nil && c.config.ACLDownPolicy == "extend-cache" {
		parent, rules, err := c.local(id)
		if err != nil {
			// We don't make an exception here for ACLs that aren't
			// found locally. It seems more robust to use an expired
			// cached entry (if we have one) rather than ignore it
			// for the case that replication was a bit behind and
			// didn't have the ACL yet.
			c.logger.Printf("[DEBUG] consul.acl: Failed to get policy from replicated ACLs: %v", err)
			goto ACL_DOWN
		}

		policy, err := acl.Parse(rules)
		if err != nil {
			c.logger.Printf("[DEBUG] consul.acl: Failed to parse policy for replicated ACL: %v", err)
			goto ACL_DOWN
		}
		policy.ID = acl.RuleID(rules)

		// Fake up an ACL datacenter reply and inject it into the cache.
		// Note we use the local TTL here, so this'll be used for that
		// amount of time even once the ACL datacenter becomes available.
		metrics.IncrCounter([]string{"consul", "acl", "replication_hit"}, 1)
		reply.ETag = makeACLETag(parent, policy)
		reply.TTL = c.config.ACLTTL
		reply.Parent = parent
		reply.Policy = policy
		return c.useACLPolicy(id, authDC, cached, &reply)
	}

ACL_DOWN:
	// Unable to refresh, apply the down policy.
	switch c.config.ACLDownPolicy {
	case "allow":
		return acl.AllowAll(), nil
	case "extend-cache":
		if cached != nil {
			return cached.ACL, nil
		}
		fallthrough
	default:
		return acl.DenyAll(), nil
	}
}