> diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
> index 093600363564..0d7581786a14 100644
> --- a/kernel/bpf/helpers.c
> +++ b/kernel/bpf/helpers.c

[ ... ]

> +__bpf_kfunc int bpf_list_add_impl(struct bpf_list_node *prev,
> +                               struct bpf_list_node *node,
> +                               void *meta__ign, u64 off)

> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> index 7a54cd749aec..9558b7020b89 100644
> --- a/kernel/bpf/verifier.c
> +++ b/kernel/bpf/verifier.c

[ ... ]

> @@ -14124,6 +14128,7 @@ static int check_kfunc_call(struct bpf_verifier_env 
> *env, struct bpf_insn *insn,
>
>       if (meta.func_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
>           meta.func_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
> +         meta.func_id == special_kfunc_list[KF_bpf_list_add_impl] ||
>           meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
>               release_ref_obj_id = regs[BPF_REG_2].ref_obj_id;
>               insn_aux->insert_off = regs[BPF_REG_2].off;

Is the verifier enforcing that the bpf_spin_lock is held when
bpf_list_add_impl is called?

The existing bpf_list_push_front_impl and bpf_list_push_back_impl get
lock enforcement because they take a bpf_list_head* as their first
argument. That argument type triggers __process_kf_arg_ptr_to_graph_root
in the verifier, which calls check_reg_allocation_locked():

kernel/bpf/verifier.c:__process_kf_arg_ptr_to_graph_root() {
    ...
    if (check_reg_allocation_locked(env, reg)) {
        verbose(env, "bpf_spin_lock at off=%d must be held for %s\n",
            rec->spin_lock_off, head_type_name);
        return -EINVAL;
    }
    ...
}

bpf_list_add_impl takes two bpf_list_node* arguments and no
bpf_list_head*. The verifier processes both arguments through
__process_kf_arg_ptr_to_graph_node, which does not call
check_reg_allocation_locked. This means a BPF program could call
bpf_list_add_impl without holding the spin lock.

With bpf_refcount, a program can hold an owning reference to an
in-list node that survives lock release:

    bpf_spin_lock(&lock);
    bpf_list_push_front(&head, &node->node);
    extra = bpf_refcount_acquire(node);
    bpf_spin_unlock(&lock);
    /* extra is still an owning ref, call without lock */
    bpf_list_add_impl(&extra->node, &new_node->node, ...);

This would execute list_add() without synchronization, which could
corrupt the list.


---
AI reviewed your patch. Please fix the bug or email reply why it's not a bug.
See: https://github.com/kernel-patches/vmtest/blob/master/ci/claude/README.md

CI run summary: https://github.com/kernel-patches/bpf/actions/runs/22391130139

AI-authorship-score: low
AI-authorship-explanation: Code closely follows existing 
bpf_list_push_front/back patterns with natural kernel commit style and precise 
BPF-specific terminology.
issues-found: 1
issue-severity-score: high
issue-severity-explanation: Missing verifier spin lock enforcement for 
bpf_list_add_impl allows unsynchronized list_add() calls that can corrupt the 
linked list.

Reply via email to