Process receive event from same process by default. Add a flag to be
able to receive event from all processes, this requires super user
permission.

Event using pid 0 to send the event to all processes, to keep the
default behavior of existing SMI events.

Signed-off-by: Philip Yang <philip.y...@amd.com>
Reviewed-by: Felix Kuehling <felix.kuehl...@amd.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c | 29 ++++++++++++++++-----
 1 file changed, 22 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
index ce78bbd360da..6b743068057d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
@@ -38,6 +38,8 @@ struct kfd_smi_client {
        uint64_t events;
        struct kfd_dev *dev;
        spinlock_t lock;
+       pid_t pid;
+       bool suser;
 };
 
 #define MAX_KFIFO_SIZE 1024
@@ -151,16 +153,27 @@ static int kfd_smi_ev_release(struct inode *inode, struct 
file *filep)
        return 0;
 }
 
-static void add_event_to_kfifo(struct kfd_dev *dev, unsigned int smi_event,
-                             char *event_msg, int len)
+static bool kfd_smi_ev_enabled(pid_t pid, struct kfd_smi_client *client,
+                              unsigned int event)
+{
+       uint64_t all = KFD_SMI_EVENT_MASK_FROM_INDEX(KFD_SMI_EVENT_ALL_PROCESS);
+       uint64_t events = READ_ONCE(client->events);
+
+       if (pid && client->pid != pid && !(client->suser && (events & all)))
+               return false;
+
+       return events & KFD_SMI_EVENT_MASK_FROM_INDEX(event);
+}
+
+static void add_event_to_kfifo(pid_t pid, struct kfd_dev *dev,
+                              unsigned int smi_event, char *event_msg, int len)
 {
        struct kfd_smi_client *client;
 
        rcu_read_lock();
 
        list_for_each_entry_rcu(client, &dev->smi_clients, list) {
-               if (!(READ_ONCE(client->events) &
-                               KFD_SMI_EVENT_MASK_FROM_INDEX(smi_event)))
+               if (!kfd_smi_ev_enabled(pid, client, smi_event))
                        continue;
                spin_lock(&client->lock);
                if (kfifo_avail(&client->fifo) >= len) {
@@ -203,7 +216,7 @@ void kfd_smi_event_update_gpu_reset(struct kfd_dev *dev, 
bool post_reset)
        len = snprintf(fifo_in, sizeof(fifo_in), "%x %x\n", event,
                                                dev->reset_seq_num);
 
-       add_event_to_kfifo(dev, event, fifo_in, len);
+       add_event_to_kfifo(0, dev, event, fifo_in, len);
 }
 
 void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev,
@@ -226,7 +239,7 @@ void kfd_smi_event_update_thermal_throttling(struct kfd_dev 
*dev,
                       KFD_SMI_EVENT_THERMAL_THROTTLE, throttle_bitmask,
                       amdgpu_dpm_get_thermal_throttling_counter(dev->adev));
 
-       add_event_to_kfifo(dev, KFD_SMI_EVENT_THERMAL_THROTTLE, fifo_in, len);
+       add_event_to_kfifo(0, dev, KFD_SMI_EVENT_THERMAL_THROTTLE, fifo_in, 
len);
 }
 
 void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid)
@@ -251,7 +264,7 @@ void kfd_smi_event_update_vmfault(struct kfd_dev *dev, 
uint16_t pasid)
        len = snprintf(fifo_in, sizeof(fifo_in), "%x %x:%s\n", 
KFD_SMI_EVENT_VMFAULT,
                task_info.pid, task_info.task_name);
 
-       add_event_to_kfifo(dev, KFD_SMI_EVENT_VMFAULT, fifo_in, len);
+       add_event_to_kfifo(0, dev, KFD_SMI_EVENT_VMFAULT, fifo_in, len);
 }
 
 int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd)
@@ -283,6 +296,8 @@ int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd)
        spin_lock_init(&client->lock);
        client->events = 0;
        client->dev = dev;
+       client->pid = current->pid;
+       client->suser = capable(CAP_SYS_ADMIN);
 
        spin_lock(&dev->smi_lock);
        list_add_rcu(&client->list, &dev->smi_clients);
-- 
2.17.1

Reply via email to