On Thu, 21 Aug 2025 09:13:50 +0100 Vincent Donnefort <vdonnef...@google.com> wrote:
> +int ring_buffer_poll_remote(struct trace_buffer *buffer, int cpu); > + > +struct trace_buffer * > +__ring_buffer_alloc_remote(struct ring_buffer_remote *remote, > + struct lock_class_key *key); > + > +#define ring_buffer_remote(remote) \ > +({ \ > + static struct lock_class_key __key; \ > + __ring_buffer_alloc_remote(remote, &__key); \ If this allocates, then it should have "alloc" in its name too. Why isn't this: #define ring_buffer_alloc_remote(remote) \ ? > +}) > #endif /* _LINUX_RING_BUFFER_H */ > diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c > index 064005c9347e..8044cb23ef88 100644 > --- a/kernel/trace/ring_buffer.c > +++ b/kernel/trace/ring_buffer.c > @@ -523,6 +523,8 @@ struct ring_buffer_per_cpu { > struct trace_buffer_meta *meta_page; > struct ring_buffer_cpu_meta *ring_meta; > > + struct ring_buffer_remote *remote; > + > /* ring buffer pages to update, > 0 to add, < 0 to remove */ > long nr_pages_to_update; > struct list_head new_pages; /* new pages to add */ > @@ -545,6 +547,8 @@ struct trace_buffer { > > struct ring_buffer_per_cpu **buffers; > > + struct ring_buffer_remote *remote; > + > struct hlist_node node; > u64 (*clock)(void); > > @@ -2296,6 +2300,40 @@ static int __rb_allocate_pages(struct > ring_buffer_per_cpu *cpu_buffer, > return -ENOMEM; > } > > +int ring_buffer_poll_remote(struct trace_buffer *buffer, int cpu) > +{ > + struct ring_buffer_per_cpu *cpu_buffer; > + unsigned long flags; > + > + if (cpu != RING_BUFFER_ALL_CPUS) { > + if (!cpumask_test_cpu(cpu, buffer->cpumask)) > + return -EINVAL; > + > + cpu_buffer = buffer->buffers[cpu]; > + > + raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); The above can be replaced by: guard(raw_spinlock)(&cpu_buffer->reader_lock); > + if (rb_read_remote_meta_page(cpu_buffer)) > + rb_wakeups(buffer, cpu_buffer); > + raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); Then we don't need the unlock. > + > + return 0; > + } > + > + /* > + * Make sure all the ring buffers are up to date before we start reading > + * them. > + */ > + for_each_buffer_cpu(buffer, cpu) { > + cpu_buffer = buffer->buffers[cpu]; > + > + raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); Ditto. > + rb_read_remote_meta_page(buffer->buffers[cpu]); > + raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); > + } > + > + for_each_buffer_cpu(buffer, cpu) { > + cpu_buffer = buffer->buffers[cpu]; > + > + raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); Ditto. -- Steve > + if (rb_num_of_entries(cpu_buffer)) > + rb_wakeups(buffer, buffer->buffers[cpu]); > + raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); > + } > + > + return 0; > +} > + > #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP > /** > * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers > @@ -6600,6 +6801,7 @@ int ring_buffer_read_page(struct trace_buffer *buffer, > unsigned int commit; > unsigned int read; > u64 save_timestamp; > + bool force_memcpy; > > if (!cpumask_test_cpu(cpu, buffer->cpumask)) > return -1; > @@ -6637,6 +6839,8 @@ int ring_buffer_read_page(struct trace_buffer *buffer, > /* Check if any events were dropped */ > missed_events = cpu_buffer->lost_events; > > + force_memcpy = cpu_buffer->mapped || cpu_buffer->remote; > + > /* > * If this page has been partially read or > * if len is not big enough to read the rest of the page or > @@ -6646,7 +6850,7 @@ int ring_buffer_read_page(struct trace_buffer *buffer, > */ > if (read || (len < (commit - read)) || > cpu_buffer->reader_page == cpu_buffer->commit_page || > - cpu_buffer->mapped) { > + force_memcpy) { > struct buffer_data_page *rpage = cpu_buffer->reader_page->page; > unsigned int rpos = read; > unsigned int pos = 0; > @@ -7222,7 +7426,7 @@ int ring_buffer_map(struct trace_buffer *buffer, int > cpu, > unsigned long flags, *subbuf_ids; > int err; > > - if (!cpumask_test_cpu(cpu, buffer->cpumask)) > + if (!cpumask_test_cpu(cpu, buffer->cpumask) || buffer->remote) > return -EINVAL; > > cpu_buffer = buffer->buffers[cpu];