From: Robert Richter <robert.rich...@linaro.org>

Factor out a function to detach all events from a ringbuffer. No
functional changes.

Signed-off-by: Robert Richter <robert.rich...@linaro.org>
Signed-off-by: Robert Richter <r...@kernel.org>
---
 kernel/events/core.c | 82 ++++++++++++++++++++++++++++------------------------
 1 file changed, 44 insertions(+), 38 deletions(-)

diff --git a/kernel/events/core.c b/kernel/events/core.c
index 928fae7c..5dcc5fe 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3775,6 +3775,49 @@ static void ring_buffer_detach(struct perf_event *event, 
struct ring_buffer *rb)
        spin_unlock_irqrestore(&rb->event_lock, flags);
 }
 
+static void ring_buffer_detach_all(struct ring_buffer *rb)
+{
+       struct perf_event *event;
+again:
+       rcu_read_lock();
+       list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
+               if (!atomic_long_inc_not_zero(&event->refcount)) {
+                       /*
+                        * This event is en-route to free_event() which will
+                        * detach it and remove it from the list.
+                        */
+                       continue;
+               }
+               rcu_read_unlock();
+
+               mutex_lock(&event->mmap_mutex);
+               /*
+                * Check we didn't race with perf_event_set_output() which can
+                * swizzle the rb from under us while we were waiting to
+                * acquire mmap_mutex.
+                *
+                * If we find a different rb; ignore this event, a next
+                * iteration will no longer find it on the list. We have to
+                * still restart the iteration to make sure we're not now
+                * iterating the wrong list.
+                */
+               if (event->rb == rb) {
+                       rcu_assign_pointer(event->rb, NULL);
+                       ring_buffer_detach(event, rb);
+                       ring_buffer_put(rb); /* can't be last, we still have 
one */
+               }
+               mutex_unlock(&event->mmap_mutex);
+               put_event(event);
+
+               /*
+                * Restart the iteration; either we're on the wrong list or
+                * destroyed its integrity by doing a deletion.
+                */
+               goto again;
+       }
+       rcu_read_unlock();
+}
+
 static void ring_buffer_wakeup(struct perf_event *event)
 {
        struct ring_buffer *rb;
@@ -3867,44 +3910,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
         * into the now unreachable buffer. Somewhat complicated by the
         * fact that rb::event_lock otherwise nests inside mmap_mutex.
         */
-again:
-       rcu_read_lock();
-       list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
-               if (!atomic_long_inc_not_zero(&event->refcount)) {
-                       /*
-                        * This event is en-route to free_event() which will
-                        * detach it and remove it from the list.
-                        */
-                       continue;
-               }
-               rcu_read_unlock();
-
-               mutex_lock(&event->mmap_mutex);
-               /*
-                * Check we didn't race with perf_event_set_output() which can
-                * swizzle the rb from under us while we were waiting to
-                * acquire mmap_mutex.
-                *
-                * If we find a different rb; ignore this event, a next
-                * iteration will no longer find it on the list. We have to
-                * still restart the iteration to make sure we're not now
-                * iterating the wrong list.
-                */
-               if (event->rb == rb) {
-                       rcu_assign_pointer(event->rb, NULL);
-                       ring_buffer_detach(event, rb);
-                       ring_buffer_put(rb); /* can't be last, we still have 
one */
-               }
-               mutex_unlock(&event->mmap_mutex);
-               put_event(event);
-
-               /*
-                * Restart the iteration; either we're on the wrong list or
-                * destroyed its integrity by doing a deletion.
-                */
-               goto again;
-       }
-       rcu_read_unlock();
+       ring_buffer_detach_all(rb);
 
        /*
         * It could be there's still a few 0-ref events on the list; they'll
-- 
1.8.3.2

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to