Commit 0888460c9050 ("kprobes: Annotate structs with __counted_by()")
added a __counted_by annotation without adjusting the code for the
__counted_by requirements, resulting in a panic when UBSAN_BOUNDS and
FORTIFY_SOURCE are enabled:

  | memset: detected buffer overflow: 512 byte write of buffer size 0
  | WARNING: CPU: 0 PID: 1 at lib/string_helpers.c:1032 
__fortify_report+0x64/0x80
  | Call Trace:
  |  __fortify_report+0x60/0x80 (unreliable)
  |  __fortify_panic+0x18/0x1c
  |  __get_insn_slot+0x33c/0x340

__counted_by requires that the counter be set before accessing the
flexible array but ->nused is not set until after ->slot_used is
accessed via memset(). Even if the current ->nused assignment were moved
up before memset(), the value of 1 would be incorrect because the entire
array is being accessed, not just one element.

Set ->nused to the full number of slots from slots_per_page() before
calling memset() to resolve the panic. While it is not strictly
necessary because of the new assignment, move the existing ->nused
assignment above accessing ->slot_used[0] for visual consistency.

The value of slots_per_page() should not change throughout
__get_insn_slot() because ->insn_size is never modified after its
initial assignment (which has to be done by this point otherwise it
would be incorrect) and the other values are constants, so use a new
variable to reuse its value directly.

Fixes: 0888460c9050 ("kprobes: Annotate structs with __counted_by()")
Signed-off-by: Nathan Chancellor <nat...@kernel.org>
---
 kernel/kprobes.c | 16 ++++++++++------
 1 file changed, 10 insertions(+), 6 deletions(-)

diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 
98d71a5acb723ddfff3efcc44cc6754ee36ec1de..2cf4628bc97ce2ae18547b513cd75b6350e9cc9c
 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -145,16 +145,18 @@ kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache 
*c)
 {
        struct kprobe_insn_page *kip;
        kprobe_opcode_t *slot = NULL;
+       int num_slots;
 
        /* Since the slot array is not protected by rcu, we need a mutex */
        mutex_lock(&c->mutex);
+       num_slots = slots_per_page(c);
  retry:
        rcu_read_lock();
        list_for_each_entry_rcu(kip, &c->pages, list) {
-               if (kip->nused < slots_per_page(c)) {
+               if (kip->nused < num_slots) {
                        int i;
 
-                       for (i = 0; i < slots_per_page(c); i++) {
+                       for (i = 0; i < num_slots; i++) {
                                if (kip->slot_used[i] == SLOT_CLEAN) {
                                        kip->slot_used[i] = SLOT_USED;
                                        kip->nused++;
@@ -164,7 +166,7 @@ kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache 
*c)
                                }
                        }
                        /* kip->nused is broken. Fix it. */
-                       kip->nused = slots_per_page(c);
+                       kip->nused = num_slots;
                        WARN_ON(1);
                }
        }
@@ -175,7 +177,7 @@ kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache 
*c)
                goto retry;
 
        /* All out of space.  Need to allocate a new page. */
-       kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
+       kip = kmalloc(KPROBE_INSN_PAGE_SIZE(num_slots), GFP_KERNEL);
        if (!kip)
                goto out;
 
@@ -185,9 +187,11 @@ kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache 
*c)
                goto out;
        }
        INIT_LIST_HEAD(&kip->list);
-       memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
-       kip->slot_used[0] = SLOT_USED;
+       /* nused must be set before accessing slot_used */
+       kip->nused = num_slots;
+       memset(kip->slot_used, SLOT_CLEAN, num_slots);
        kip->nused = 1;
+       kip->slot_used[0] = SLOT_USED;
        kip->ngarbage = 0;
        kip->cache = c;
        list_add_rcu(&kip->list, &c->pages);

-- 
2.47.0


Reply via email to