From: Masami Hiramatsu (Google) <[email protected]>

Since the MSBs of rb_data_page::commit are used for storing
RB_MISSED_EVENTS and RB_MISSED_STORED, we need to mask out those bits
when it is used for finding the size of data pages.

Fixes: 5f3b6e839f3c ("ring-buffer: Validate boot range memory events")
Fixes: 5b7be9c709e1 ("ring-buffer: Add test to validate the time stamp deltas")
Cc: [email protected]
Signed-off-by: Masami Hiramatsu (Google) <[email protected]>
---
 Changes in v5:
   - Do not move rb_commit_index().
   - Fix verify_event() and rb_cpu_meta_valid() too.
 Changes in v4:
   - Fix to move rb_commit_index() after ring_buffer_per_cpu definition.
---
 kernel/trace/ring_buffer.c |   27 +++++++++++++++------------
 1 file changed, 15 insertions(+), 12 deletions(-)

diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 156ed19fb569..87d6958e9656 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -396,6 +396,12 @@ static __always_inline unsigned int rb_page_commit(struct 
buffer_page *bpage)
        return local_read(&bpage->page->commit);
 }
 
+/* Size is determined by what has been committed */
+static __always_inline unsigned int rb_page_size(struct buffer_page *bpage)
+{
+       return rb_page_commit(bpage) & ~RB_MISSED_MASK;
+}
+
 static void free_buffer_page(struct buffer_page *bpage)
 {
        /* Range pages are not to be freed */
@@ -677,7 +683,7 @@ static void verify_event(struct ring_buffer_per_cpu 
*cpu_buffer,
        do {
                if (page == tail_page || WARN_ON_ONCE(stop++ > 100))
                        done = true;
-               commit = local_read(&page->page->commit);
+               commit = rb_page_size(page);
                write = local_read(&page->write);
                if (addr >= (unsigned long)&page->page->data[commit] &&
                    addr < (unsigned long)&page->page->data[write])
@@ -1821,13 +1827,16 @@ static bool rb_cpu_meta_valid(struct 
ring_buffer_cpu_meta *meta, int cpu,
 
        /* Is the meta buffers and the subbufs themselves have correct data? */
        for (i = 0; i < meta->nr_subbufs; i++) {
+               unsigned long commit;
+
                if (meta->buffers[i] < 0 ||
                    meta->buffers[i] >= meta->nr_subbufs) {
                        pr_info("Ring buffer boot meta [%d] array out of 
range\n", cpu);
                        return false;
                }
 
-               if ((unsigned)local_read(&subbuf->commit) > subbuf_size) {
+               commit = local_read(&subbuf->commit) & ~RB_MISSED_MASK;
+               if (commit > subbuf_size) {
                        pr_info("Ring buffer boot meta [%d] buffer invalid 
commit\n", cpu);
                        return false;
                }
@@ -1908,7 +1917,7 @@ static int rb_validate_buffer(struct buffer_data_page 
*dpage, int cpu)
        u64 delta;
        int tail;
 
-       tail = local_read(&dpage->commit);
+       tail = local_read(&dpage->commit) & ~RB_MISSED_MASK;
        return rb_read_data_buffer(dpage, tail, cpu, &ts, &delta);
 }
 
@@ -1935,7 +1944,7 @@ static void rb_meta_validate_events(struct 
ring_buffer_per_cpu *cpu_buffer)
                goto invalid;
        }
        entries += ret;
-       entry_bytes += local_read(&cpu_buffer->reader_page->page->commit);
+       entry_bytes += rb_page_size(cpu_buffer->reader_page);
        local_set(&cpu_buffer->reader_page->entries, ret);
 
        ts = head_page->page->time_stamp;
@@ -2055,7 +2064,7 @@ static void rb_meta_validate_events(struct 
ring_buffer_per_cpu *cpu_buffer)
                        local_inc(&cpu_buffer->pages_touched);
 
                entries += ret;
-               entry_bytes += local_read(&head_page->page->commit);
+               entry_bytes += rb_page_size(head_page);
                local_set(&cpu_buffer->head_page->entries, ret);
 
                if (head_page == cpu_buffer->commit_page)
@@ -3258,12 +3267,6 @@ rb_iter_head_event(struct ring_buffer_iter *iter)
        return NULL;
 }
 
-/* Size is determined by what has been committed */
-static __always_inline unsigned rb_page_size(struct buffer_page *bpage)
-{
-       return rb_page_commit(bpage) & ~RB_MISSED_MASK;
-}
-
 static __always_inline unsigned
 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
 {
@@ -4434,7 +4437,7 @@ static void check_buffer(struct ring_buffer_per_cpu 
*cpu_buffer,
 
        if (tail == CHECK_FULL_PAGE) {
                full = true;
-               tail = local_read(&bpage->commit);
+               tail = local_read(&bpage->commit) & ~RB_MISSED_MASK;
        } else if (info->add_timestamp &
                   (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)) {
                /* Ignore events with absolute time stamps */


Reply via email to