From: Lad Prabhakar <[email protected]>

Fix logic issues introduced by the kzalloc_flex() conversion in
mmc_test_alloc_mem() due to interaction with the __counted_by
annotation on the flexible array.

Bounds-checking sanitizers rely on the counter field reflecting the
allocated array size before any array access occurs. However, use
mem->cnt both as the allocation size and as the runtime insertion
index, causing incorrect indexing and potentially invalid bounds
tracking.

Initialize mem->cnt to the maximum allocated number of segments
immediately after kzalloc_flex(), then use a separate local index
variable to track successfully allocated entries. Update mem->cnt to
the actual number of initialized elements before returning or entering
the cleanup path.

Also rewrite mmc_test_free_mem() to use a forward for-loop, improving
readability and ensuring only initialized entries are freed.

Fixes: c3126dccfd7b ("mmc: mmc_test: use kzalloc_flex")
Signed-off-by: Lad Prabhakar <[email protected]>
---
 drivers/mmc/core/mmc_test.c | 21 ++++++++++++++-------
 1 file changed, 14 insertions(+), 7 deletions(-)

diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c
index ab38e4c45a8d..e0e1b5df76dc 100644
--- a/drivers/mmc/core/mmc_test.c
+++ b/drivers/mmc/core/mmc_test.c
@@ -316,11 +316,13 @@ static int mmc_test_buffer_transfer(struct mmc_test_card 
*test,
 
 static void mmc_test_free_mem(struct mmc_test_mem *mem)
 {
+       unsigned int idx;
+
        if (!mem)
                return;
-       while (mem->cnt--)
-               __free_pages(mem->arr[mem->cnt].page,
-                            mem->arr[mem->cnt].order);
+       for (idx = 0; idx < mem->cnt; idx++)
+               __free_pages(mem->arr[idx].page,
+                            mem->arr[idx].order);
        kfree(mem);
 }
 
@@ -341,6 +343,7 @@ static struct mmc_test_mem *mmc_test_alloc_mem(unsigned 
long min_sz,
        unsigned long page_cnt = 0;
        unsigned long limit = nr_free_buffer_pages() >> 4;
        struct mmc_test_mem *mem;
+       unsigned int idx = 0;
 
        if (max_page_cnt > limit)
                max_page_cnt = limit;
@@ -356,6 +359,7 @@ static struct mmc_test_mem *mmc_test_alloc_mem(unsigned 
long min_sz,
        mem = kzalloc_flex(*mem, arr, max_segs);
        if (!mem)
                return NULL;
+       mem->cnt = max_segs;
 
        while (max_page_cnt) {
                struct page *page;
@@ -375,23 +379,26 @@ static struct mmc_test_mem *mmc_test_alloc_mem(unsigned 
long min_sz,
                                goto out_free;
                        break;
                }
-               mem->arr[mem->cnt].page = page;
-               mem->arr[mem->cnt].order = order;
-               mem->cnt += 1;
+               mem->arr[idx].page = page;
+               mem->arr[idx].order = order;
+               idx += 1;
                if (max_page_cnt <= (1UL << order))
                        break;
                max_page_cnt -= 1UL << order;
                page_cnt += 1UL << order;
-               if (mem->cnt >= max_segs) {
+               if (idx >= mem->cnt) {
                        if (page_cnt < min_page_cnt)
                                goto out_free;
                        break;
                }
        }
 
+       mem->cnt = idx;
+
        return mem;
 
 out_free:
+       mem->cnt = idx;
        mmc_test_free_mem(mem);
        return NULL;
 }
-- 
2.54.0


Reply via email to