Exemplo n.º 1
0
// preApply does all the verification of a KVS update that is performed BEFORE
// we submit as a Raft log entry. This includes enforcing the lock delay which
// must only be done on the leader.
func kvsPreApply(srv *Server, acl acl.ACL, op structs.KVSOp, dirEnt *structs.DirEntry) (bool, error) {
	// Verify the entry.
	if dirEnt.Key == "" && op != structs.KVSDeleteTree {
		return false, fmt.Errorf("Must provide key")
	}

	// Apply the ACL policy if any.
	if acl != nil {
		switch op {
		case structs.KVSDeleteTree:
			if !acl.KeyWritePrefix(dirEnt.Key) {
				return false, permissionDeniedErr
			}

		case structs.KVSGet, structs.KVSGetTree:
			// Filtering for GETs is done on the output side.

		case structs.KVSCheckSession, structs.KVSCheckIndex:
			// These could reveal information based on the outcome
			// of the transaction, and they operate on individual
			// keys so we check them here.
			if !acl.KeyRead(dirEnt.Key) {
				return false, permissionDeniedErr
			}

		default:
			if !acl.KeyWrite(dirEnt.Key) {
				return false, permissionDeniedErr
			}
		}
	}

	// If this is a lock, we must check for a lock-delay. Since lock-delay
	// is based on wall-time, each peer would expire the lock-delay at a slightly
	// different time. This means the enforcement of lock-delay cannot be done
	// after the raft log is committed as it would lead to inconsistent FSMs.
	// Instead, the lock-delay must be enforced before commit. This means that
	// only the wall-time of the leader node is used, preventing any inconsistencies.
	if op == structs.KVSLock {
		state := srv.fsm.State()
		expires := state.KVSLockDelay(dirEnt.Key)
		if expires.After(time.Now()) {
			srv.logger.Printf("[WARN] consul.kvs: Rejecting lock of %s due to lock-delay until %v",
				dirEnt.Key, expires)
			return false, nil
		}
	}

	return true, nil
}