On Fri,  9 Feb 2024 16:34:46 +0000
Vincent Donnefort <vdonnef...@google.com> wrote:

> +static void tracing_buffers_mmap_close(struct vm_area_struct *vma)
> +{
> +     struct ftrace_buffer_info *info = vma->vm_file->private_data;
> +     struct trace_iterator *iter = &info->iter;
> +     struct trace_array __maybe_unused *tr = iter->tr;
> +
> +     ring_buffer_unmap(iter->array_buffer->buffer, iter->cpu_file);
> +
> +#ifdef CONFIG_TRACER_MAX_TRACE
> +     spin_lock(&tr->snapshot_trigger_lock);
> +     if (!WARN_ON(!tr->mapped))
> +             tr->mapped--;
> +     spin_unlock(&tr->snapshot_trigger_lock);
> +#endif

You can add a section with the #ifdef *MAX_TRACE and use static inline
for these (or use an existing section):

#ifdef CONFIG_TRACER_MAX_TRACE
static void put_snapshot_map(struct trace_array *tr)
{
        spin_lock(&tr->snapshot_trigger_lock);
        if (!WARN_ON(!tr->mapped))
                tr->mapped--;
        spin_unlock(&tr->snapshot_trigger_lock);        
}
[..]
#else
static inline void put_snapshot_map(struct trace_array *tr) { }
[..]
#endif

> +}
> +
> +static void tracing_buffers_mmap_open(struct vm_area_struct *vma)
> +{
> +     struct ftrace_buffer_info *info = vma->vm_file->private_data;
> +     struct trace_iterator *iter = &info->iter;
> +
> +     WARN_ON(ring_buffer_map(iter->array_buffer->buffer, iter->cpu_file));
> +}
> +
> +static const struct vm_operations_struct tracing_buffers_vmops = {
> +     .open           = tracing_buffers_mmap_open,
> +     .close          = tracing_buffers_mmap_close,
> +     .fault          = tracing_buffers_mmap_fault,
> +};
> +
> +static int tracing_buffers_mmap(struct file *filp, struct vm_area_struct 
> *vma)
> +{
> +     struct ftrace_buffer_info *info = filp->private_data;
> +     struct trace_iterator *iter = &info->iter;
> +     struct trace_array __maybe_unused *tr = iter->tr;
> +     int ret = 0;
> +
> +     if (vma->vm_flags & VM_WRITE || vma->vm_flags & VM_EXEC)
> +             return -EPERM;
> +
> +     vm_flags_mod(vma, VM_DONTCOPY | VM_DONTDUMP, VM_MAYWRITE);
> +     vma->vm_ops = &tracing_buffers_vmops;
> +
> +#ifdef CONFIG_TRACER_MAX_TRACE
> +     /*
> +      * We hold mmap_lock here. lockdep would be unhappy if we would now take
> +      * trace_types_lock. Instead use the specific snapshot_trigger_lock.
> +      */
> +     spin_lock(&tr->snapshot_trigger_lock);
> +     if (tr->snapshot || tr->mapped == UINT_MAX) {
> +             spin_unlock(&tr->snapshot_trigger_lock);
> +             return -EBUSY;
> +     }
> +     tr->mapped++;
> +     spin_unlock(&tr->snapshot_trigger_lock);
> +
> +     /* Wait for update_max_tr() to observe iter->tr->mapped */
> +     if (tr->mapped == 1)
> +             synchronize_rcu();
> +#endif

The above could be:

static int get_snapshot_map(struct trace_array *tr)
{
        /*
         * We hold mmap_lock here. lockdep would be unhappy if we would now take
         * trace_types_lock. Instead use the specific snapshot_trigger_lock.
         */
        spin_lock(&tr->snapshot_trigger_lock);
        if (tr->snapshot || tr->mapped == UINT_MAX) {
                spin_unlock(&tr->snapshot_trigger_lock);
                return -EBUSY;
        }
        tr->mapped++;
        spin_unlock(&tr->snapshot_trigger_lock);

        /* Wait for update_max_tr() to observe iter->tr->mapped */
        if (tr->mapped == 1)
                synchronize_rcu();

        return 0;
}
#else
static inline test_snapshot_map(struct trace_array *tr)
{
        return 0;
}
[..]

Then here just have:

        ret = test_snapshot_map(tr);
        if (ret < 0)
                return ret;


> +     ret = ring_buffer_map(iter->array_buffer->buffer, iter->cpu_file);
> +#ifdef CONFIG_TRACER_MAX_TRACE
> +     if (ret) {
> +             spin_lock(&tr->snapshot_trigger_lock);
> +             iter->tr->mapped--;
> +             spin_unlock(&tr->snapshot_trigger_lock);
> +     }
> +#endif

and the above is just:

        if (ret)
                put_snapshot_map(tr);

-- Steve

> +     return ret;
> +}
> +
>  static const struct file_operations tracing_buffers_fops = {
>       .open           = tracing_buffers_open,
>       .read           = tracing_buffers_read,
> @@ -8681,6 +8794,7 @@ static const struct file_operations 
> tracing_buffers_fops = {
>       .splice_read    = tracing_buffers_splice_read,
>       .unlocked_ioctl = tracing_buffers_ioctl,
>       .llseek         = no_llseek,
> +     .mmap           = tracing_buffers_mmap,
>  };
>  
>  static ssize_t
> diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
> index bd312e9afe25..8a96e7a89e6b 100644
> --- a/kernel/trace/trace.h
> +++ b/kernel/trace/trace.h
> @@ -336,6 +336,7 @@ struct trace_array {
>       bool                    allocated_snapshot;
>       spinlock_t              snapshot_trigger_lock;
>       unsigned int            snapshot;
> +     unsigned int            mapped;
>       unsigned long           max_latency;
>  #ifdef CONFIG_FSNOTIFY
>       struct dentry           *d_max_latency;


Reply via email to