Conceptually events are the right abstraction to handle the GPU
runtime PM state: as long as any event is pending the GPU can not
be idle. Events are also properly freed and reallocated when the
GPU has been reset after a hang.

Signed-off-by: Lucas Stach <l.st...@pengutronix.de>
---
 drivers/gpu/drm/etnaviv/etnaviv_gem.h        |  1 -
 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c |  3 ---
 drivers/gpu/drm/etnaviv/etnaviv_gpu.c        | 27 ++++++++++++--------
 3 files changed, 16 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h 
b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index baa81cbf701a..a42d260cac2c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -97,7 +97,6 @@ struct etnaviv_gem_submit {
        struct list_head node; /* GPU active submit list */
        struct etnaviv_cmdbuf cmdbuf;
        struct pid *pid;       /* submitting process */
-       bool runtime_resumed;
        u32 exec_state;
        u32 flags;
        unsigned int nr_pmrs;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 
b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 45403ea38906..2416c526f9b0 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -362,9 +362,6 @@ static void submit_cleanup(struct kref *kref)
                        container_of(kref, struct etnaviv_gem_submit, refcount);
        unsigned i;
 
-       if (submit->runtime_resumed)
-               pm_runtime_put_autosuspend(submit->gpu->dev);
-
        if (submit->cmdbuf.suballoc)
                etnaviv_cmdbuf_free(&submit->cmdbuf);
 
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c 
b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 4e18aa8566c6..54a1249c5bca 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -1139,7 +1139,8 @@ static int event_alloc(struct etnaviv_gpu *gpu, unsigned 
nr_events,
        unsigned int *events)
 {
        unsigned long timeout = msecs_to_jiffies(10 * 10000);
-       unsigned i, acquired = 0;
+       unsigned i, acquired = 0, rpm_count = 0;
+       int ret;
 
        for (i = 0; i < nr_events; i++) {
                unsigned long ret;
@@ -1148,6 +1149,7 @@ static int event_alloc(struct etnaviv_gpu *gpu, unsigned 
nr_events,
 
                if (!ret) {
                        dev_err(gpu->dev, "wait_for_completion_timeout failed");
+                       ret = -EBUSY;
                        goto out;
                }
 
@@ -1167,13 +1169,23 @@ static int event_alloc(struct etnaviv_gpu *gpu, 
unsigned nr_events,
 
        spin_unlock(&gpu->event_spinlock);
 
+       for (i = 0; i < nr_events; i++) {
+               ret = pm_runtime_resume_and_get(gpu->dev);
+               if (ret)
+                       goto out_rpm;
+               rpm_count++;
+       }
+
        return 0;
 
+out_rpm:
+       for (i = 0; i < rpm_count; i++)
+               pm_runtime_put_autosuspend(gpu->dev);
 out:
        for (i = 0; i < acquired; i++)
                complete(&gpu->event_free);
 
-       return -EBUSY;
+       return ret;
 }
 
 static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
@@ -1185,6 +1197,8 @@ static void event_free(struct etnaviv_gpu *gpu, unsigned 
int event)
                clear_bit(event, gpu->event_bitmap);
                complete(&gpu->event_free);
        }
+
+       pm_runtime_put_autosuspend(gpu->dev);
 }
 
 /*
@@ -1327,15 +1341,6 @@ struct dma_fence *etnaviv_gpu_submit(struct 
etnaviv_gem_submit *submit)
        unsigned int i, nr_events = 1, event[3];
        int ret;
 
-       if (!submit->runtime_resumed) {
-               ret = pm_runtime_get_sync(gpu->dev);
-               if (ret < 0) {
-                       pm_runtime_put_noidle(gpu->dev);
-                       return NULL;
-               }
-               submit->runtime_resumed = true;
-       }
-
        /*
         * if there are performance monitor requests we need to have
         * - a sync point to re-configure gpu and process ETNA_PM_PROCESS_PRE
-- 
2.39.2

Reply via email to