Am 2022-01-20 um 18:13 schrieb Philip Yang:
Process receive event from same process by default. Add a flag to be
able to receive event from all processes, this requires super user
permission.

Event with pid 0 send to all processes, to keep the default behavior of
existing SMI events.

Signed-off-by: Philip Yang <philip.y...@amd.com>

Reviewed-by: Felix Kuehling <felix.kuehl...@amd.com>


---
  drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c | 29 ++++++++++++++++-----
  1 file changed, 22 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
index 18ed1b72f0f7..68c93701c5f7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
@@ -37,6 +37,8 @@ struct kfd_smi_client {
        uint64_t events;
        struct kfd_dev *dev;
        spinlock_t lock;
+       pid_t pid;
+       bool suser;
  };
#define MAX_KFIFO_SIZE 1024
@@ -150,16 +152,27 @@ static int kfd_smi_ev_release(struct inode *inode, struct 
file *filep)
        return 0;
  }
-static void add_event_to_kfifo(struct kfd_dev *dev, unsigned int smi_event,
-                             char *event_msg, int len)
+static bool kfd_smi_ev_enabled(pid_t pid, struct kfd_smi_client *client,
+                              unsigned int event)
+{
+       uint64_t all = KFD_SMI_EVENT_MASK_FROM_INDEX(KFD_SMI_EVENT_ALL_PROCESS);
+       uint64_t events = READ_ONCE(client->events);
+
+       if (pid && client->pid != pid && !(client->suser && (events & all)))
+               return false;
+
+       return events & KFD_SMI_EVENT_MASK_FROM_INDEX(event);
+}
+
+static void add_event_to_kfifo(pid_t pid, struct kfd_dev *dev,
+                              unsigned int smi_event, char *event_msg, int len)
  {
        struct kfd_smi_client *client;
rcu_read_lock(); list_for_each_entry_rcu(client, &dev->smi_clients, list) {
-               if (!(READ_ONCE(client->events) &
-                               KFD_SMI_EVENT_MASK_FROM_INDEX(smi_event)))
+               if (!kfd_smi_ev_enabled(pid, client, smi_event))
                        continue;
                spin_lock(&client->lock);
                if (kfifo_avail(&client->fifo) >= len) {
@@ -202,7 +215,7 @@ void kfd_smi_event_update_gpu_reset(struct kfd_dev *dev, 
bool post_reset)
        len = snprintf(fifo_in, sizeof(fifo_in), "%x %x\n", event,
                                                dev->reset_seq_num);
- add_event_to_kfifo(dev, event, fifo_in, len);
+       add_event_to_kfifo(0, dev, event, fifo_in, len);
  }
void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev,
@@ -225,7 +238,7 @@ void kfd_smi_event_update_thermal_throttling(struct kfd_dev 
*dev,
                       KFD_SMI_EVENT_THERMAL_THROTTLE, throttle_bitmask,
                       amdgpu_dpm_get_thermal_throttling_counter(dev->adev));
- add_event_to_kfifo(dev, KFD_SMI_EVENT_THERMAL_THROTTLE, fifo_in, len);
+       add_event_to_kfifo(0, dev, KFD_SMI_EVENT_THERMAL_THROTTLE, fifo_in, 
len);
  }
void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid)
@@ -250,7 +263,7 @@ void kfd_smi_event_update_vmfault(struct kfd_dev *dev, 
uint16_t pasid)
        len = snprintf(fifo_in, sizeof(fifo_in), "%x %x:%s\n", 
KFD_SMI_EVENT_VMFAULT,
                task_info.pid, task_info.task_name);
- add_event_to_kfifo(dev, KFD_SMI_EVENT_VMFAULT, fifo_in, len);
+       add_event_to_kfifo(0, dev, KFD_SMI_EVENT_VMFAULT, fifo_in, len);
  }
int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd)
@@ -282,6 +295,8 @@ int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd)
        spin_lock_init(&client->lock);
        client->events = 0;
        client->dev = dev;
+       client->pid = current->pid;
+       client->suser = capable(CAP_SYS_ADMIN);
spin_lock(&dev->smi_lock);
        list_add_rcu(&client->list, &dev->smi_clients);

Reply via email to