Add a new option 'signal_on_wakeup' to request for a signal to be
delivered on ring buffer wakeup controlled through watermark and
{wakeup_events, wakeup_watermark}.

Signed-off-by: Naveen N. Rao <naveen.n....@linux.vnet.ibm.com>
---
 include/uapi/linux/perf_event.h |  3 ++-
 kernel/events/core.c            | 18 +++++++++++-------
 kernel/events/ring_buffer.c     |  3 +++
 3 files changed, 16 insertions(+), 8 deletions(-)

diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index b1c0b187acfe..e5810b1d74a4 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -345,7 +345,8 @@ struct perf_event_attr {
                                context_switch :  1, /* context switch data */
                                write_backward :  1, /* Write ring buffer from 
end to beginning */
                                namespaces     :  1, /* include namespaces data 
*/
-                               __reserved_1   : 35;
+                               signal_on_wakeup : 1, /* send signal on wakeup 
*/
+                               __reserved_1   : 34;
 
        union {
                __u32           wakeup_events;    /* wakeup every n events */
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 6c4e523dc1e2..812fcfc767f4 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2679,7 +2679,8 @@ static int _perf_event_refresh(struct perf_event *event, 
int refresh)
        /*
         * not supported on inherited events
         */
-       if (event->attr.inherit || !is_sampling_event(event))
+       if (event->attr.inherit || event->attr.signal_on_wakeup ||
+                       !is_sampling_event(event))
                return -EINVAL;
 
        atomic_add(refresh, &event->event_limit);
@@ -7339,7 +7340,6 @@ static int __perf_event_overflow(struct perf_event *event,
                                   int throttle, struct perf_sample_data *data,
                                   struct pt_regs *regs)
 {
-       int events = atomic_read(&event->event_limit);
        int ret = 0;
 
        /*
@@ -7362,12 +7362,15 @@ static int __perf_event_overflow(struct perf_event 
*event,
         * events
         */
 
-       event->pending_kill = POLL_IN;
-       if (events && atomic_dec_and_test(&event->event_limit)) {
-               ret = 1;
-               event->pending_kill = POLL_HUP;
+       if (!event->attr.signal_on_wakeup) {
+               int events = atomic_read(&event->event_limit);
+               event->pending_kill = POLL_IN;
+               if (events && atomic_dec_and_test(&event->event_limit)) {
+                       ret = 1;
+                       event->pending_kill = POLL_HUP;
 
-               perf_event_disable_inatomic(event);
+                       perf_event_disable_inatomic(event);
+               }
        }
 
        READ_ONCE(event->overflow_handler)(event, data, regs);
@@ -10408,6 +10411,7 @@ perf_event_exit_event(struct perf_event *child_event,
                perf_group_detach(child_event);
        list_del_event(child_event, child_ctx);
        child_event->state = PERF_EVENT_STATE_EXIT; /* is_event_hup() */
+       child_event->pending_kill = POLL_HUP;
        raw_spin_unlock_irq(&child_ctx->lock);
 
        /*
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 2831480c63a2..4e7c728569a8 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -21,6 +21,9 @@ static void perf_output_wakeup(struct perf_output_handle 
*handle)
 {
        atomic_set(&handle->rb->poll, POLLIN);
 
+       if (handle->event->attr.signal_on_wakeup)
+               handle->event->pending_kill = POLL_IN;
+
        handle->event->pending_wakeup = 1;
        irq_work_queue(&handle->event->pending);
 }
-- 
2.13.1

Reply via email to