Keep track of the temporary rpm wakerefs used for user access to the
device, so that we can cancel them upon release and clearly identify any
leaks.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
Cc: Jani Nikula <jani.nik...@intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c            | 47 +++++++++++++---------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  5 ++-
 drivers/gpu/drm/i915/i915_gem_fence_reg.c  |  6 ++-
 drivers/gpu/drm/i915/i915_gem_gtt.c        | 22 ++++++----
 drivers/gpu/drm/i915/i915_gem_shrinker.c   | 32 +++++++++------
 drivers/gpu/drm/i915/intel_engine_cs.c     | 12 ++++--
 drivers/gpu/drm/i915/intel_uncore.c        |  5 ++-
 7 files changed, 81 insertions(+), 48 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 27f207cbabd9..e04dadeca879 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -786,6 +786,8 @@ fb_write_origin(struct drm_i915_gem_object *obj, unsigned 
int domain)
 
 void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
 {
+       intel_wakeref_t wakeref;
+
        /*
         * No actual flushing is required for the GTT write domain for reads
         * from the GTT domain. Writes to it "immediately" go to main memory
@@ -812,13 +814,13 @@ void i915_gem_flush_ggtt_writes(struct drm_i915_private 
*dev_priv)
 
        i915_gem_chipset_flush(dev_priv);
 
-       intel_runtime_pm_get(dev_priv);
+       wakeref = intel_runtime_pm_get(dev_priv);
        spin_lock_irq(&dev_priv->uncore.lock);
 
        POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE));
 
        spin_unlock_irq(&dev_priv->uncore.lock);
-       intel_runtime_pm_put_unchecked(dev_priv);
+       intel_runtime_pm_put(dev_priv, wakeref);
 }
 
 static void
@@ -1070,6 +1072,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
 {
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
        struct i915_ggtt *ggtt = &i915->ggtt;
+       intel_wakeref_t wakeref;
        struct drm_mm_node node;
        struct i915_vma *vma;
        void __user *user_data;
@@ -1080,7 +1083,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
        if (ret)
                return ret;
 
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
        vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
                                       PIN_MAPPABLE |
                                       PIN_NONFAULT |
@@ -1153,7 +1156,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
                i915_vma_unpin(vma);
        }
 out_unlock:
-       intel_runtime_pm_put_unchecked(i915);
+       intel_runtime_pm_put(i915, wakeref);
        mutex_unlock(&i915->drm.struct_mutex);
 
        return ret;
@@ -1254,6 +1257,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
 {
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
        struct i915_ggtt *ggtt = &i915->ggtt;
+       intel_wakeref_t wakeref;
        struct drm_mm_node node;
        struct i915_vma *vma;
        u64 remain, offset;
@@ -1272,13 +1276,14 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object 
*obj,
                 * This easily dwarfs any performance advantage from
                 * using the cache bypass of indirect GGTT access.
                 */
-               if (!intel_runtime_pm_get_if_in_use(i915)) {
+               wakeref = intel_runtime_pm_get_if_in_use(i915);
+               if (!wakeref) {
                        ret = -EFAULT;
                        goto out_unlock;
                }
        } else {
                /* No backing pages, no fallback, we must force GGTT access */
-               intel_runtime_pm_get(i915);
+               wakeref = intel_runtime_pm_get(i915);
        }
 
        vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
@@ -1360,7 +1365,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
                i915_vma_unpin(vma);
        }
 out_rpm:
-       intel_runtime_pm_put_unchecked(i915);
+       intel_runtime_pm_put(i915, wakeref);
 out_unlock:
        mutex_unlock(&i915->drm.struct_mutex);
        return ret;
@@ -1865,6 +1870,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
        bool write = area->vm_flags & VM_WRITE;
+       intel_wakeref_t wakeref;
        struct i915_vma *vma;
        pgoff_t page_offset;
        int ret;
@@ -1894,7 +1900,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
        if (ret)
                goto err;
 
-       intel_runtime_pm_get(dev_priv);
+       wakeref = intel_runtime_pm_get(dev_priv);
 
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
@@ -1972,7 +1978,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 err_unlock:
        mutex_unlock(&dev->struct_mutex);
 err_rpm:
-       intel_runtime_pm_put_unchecked(dev_priv);
+       intel_runtime_pm_put(dev_priv, wakeref);
        i915_gem_object_unpin_pages(obj);
 err:
        switch (ret) {
@@ -2045,6 +2051,7 @@ void
 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
 {
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
+       intel_wakeref_t wakeref;
 
        /* Serialisation between user GTT access and our code depends upon
         * revoking the CPU's PTE whilst the mutex is held. The next user
@@ -2055,7 +2062,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
         * wakeref.
         */
        lockdep_assert_held(&i915->drm.struct_mutex);
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
 
        if (!obj->userfault_count)
                goto out;
@@ -2072,7 +2079,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
        wmb();
 
 out:
-       intel_runtime_pm_put_unchecked(i915);
+       intel_runtime_pm_put(i915, wakeref);
 }
 
 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
@@ -4707,8 +4714,9 @@ static void __i915_gem_free_objects(struct 
drm_i915_private *i915,
                                    struct llist_node *freed)
 {
        struct drm_i915_gem_object *obj, *on;
+       intel_wakeref_t wakeref;
 
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
        llist_for_each_entry_safe(obj, on, freed, freed) {
                struct i915_vma *vma, *vn;
 
@@ -4769,7 +4777,7 @@ static void __i915_gem_free_objects(struct 
drm_i915_private *i915,
                if (on)
                        cond_resched();
        }
-       intel_runtime_pm_put_unchecked(i915);
+       intel_runtime_pm_put(i915, wakeref);
 }
 
 static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
@@ -4878,11 +4886,13 @@ void __i915_gem_object_release_unless_active(struct 
drm_i915_gem_object *obj)
 
 void i915_gem_sanitize(struct drm_i915_private *i915)
 {
+       intel_wakeref_t wakeref;
+
        GEM_TRACE("\n");
 
        mutex_lock(&i915->drm.struct_mutex);
 
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
        intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
 
        /*
@@ -4905,7 +4915,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
        intel_engines_sanitize(i915, false);
 
        intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
-       intel_runtime_pm_put_unchecked(i915);
+       intel_runtime_pm_put(i915, wakeref);
 
        i915_gem_contexts_lost(i915);
        mutex_unlock(&i915->drm.struct_mutex);
@@ -4913,11 +4923,12 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
 
 int i915_gem_suspend(struct drm_i915_private *i915)
 {
+       intel_wakeref_t wakeref;
        int ret;
 
        GEM_TRACE("\n");
 
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
        intel_suspend_gt_powersave(i915);
 
        mutex_lock(&i915->drm.struct_mutex);
@@ -4969,12 +4980,12 @@ int i915_gem_suspend(struct drm_i915_private *i915)
        if (WARN_ON(!intel_engines_are_idle(i915)))
                i915_gem_set_wedged(i915); /* no hope, discard everything */
 
-       intel_runtime_pm_put_unchecked(i915);
+       intel_runtime_pm_put(i915, wakeref);
        return 0;
 
 err_unlock:
        mutex_unlock(&i915->drm.struct_mutex);
-       intel_runtime_pm_put_unchecked(i915);
+       intel_runtime_pm_put(i915, wakeref);
        return ret;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c 
b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index a52fa42ed8b1..76bb1a89e530 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -2203,6 +2203,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
        struct i915_execbuffer eb;
        struct dma_fence *in_fence = NULL;
        struct sync_file *out_fence = NULL;
+       intel_wakeref_t wakeref;
        int out_fence_fd = -1;
        int err;
 
@@ -2273,7 +2274,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
         * wakeref that we hold until the GPU has been idle for at least
         * 100ms.
         */
-       intel_runtime_pm_get(eb.i915);
+       wakeref = intel_runtime_pm_get(eb.i915);
 
        err = i915_mutex_lock_interruptible(dev);
        if (err)
@@ -2425,7 +2426,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
                eb_release_vmas(&eb);
        mutex_unlock(&dev->struct_mutex);
 err_rpm:
-       intel_runtime_pm_put_unchecked(eb.i915);
+       intel_runtime_pm_put(eb.i915, wakeref);
        i915_gem_context_put(eb.ctx);
 err_destroy:
        eb_destroy(&eb);
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c 
b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index 1f72f5047945..e6edcd83450c 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -210,6 +210,7 @@ static void fence_write(struct drm_i915_fence_reg *fence,
 static int fence_update(struct drm_i915_fence_reg *fence,
                        struct i915_vma *vma)
 {
+       intel_wakeref_t wakeref;
        int ret;
 
        if (vma) {
@@ -257,9 +258,10 @@ static int fence_update(struct drm_i915_fence_reg *fence,
         * If the device is currently powered down, we will defer the write
         * to the runtime resume, see i915_gem_restore_fences().
         */
-       if (intel_runtime_pm_get_if_in_use(fence->i915)) {
+       wakeref = intel_runtime_pm_get_if_in_use(fence->i915);
+       if (wakeref) {
                fence_write(fence, vma);
-               intel_runtime_pm_put_unchecked(fence->i915);
+               intel_runtime_pm_put(fence->i915, wakeref);
        }
 
        if (vma) {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 6dac9614f7ba..4bec10286487 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2528,6 +2528,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
 {
        struct drm_i915_private *i915 = vma->vm->i915;
        struct drm_i915_gem_object *obj = vma->obj;
+       intel_wakeref_t wakeref;
        u32 pte_flags;
 
        /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
@@ -2535,9 +2536,9 @@ static int ggtt_bind_vma(struct i915_vma *vma,
        if (i915_gem_object_is_readonly(obj))
                pte_flags |= PTE_READ_ONLY;
 
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
        vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
-       intel_runtime_pm_put_unchecked(i915);
+       intel_runtime_pm_put(i915, wakeref);
 
        vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
 
@@ -2554,10 +2555,11 @@ static int ggtt_bind_vma(struct i915_vma *vma,
 static void ggtt_unbind_vma(struct i915_vma *vma)
 {
        struct drm_i915_private *i915 = vma->vm->i915;
+       intel_wakeref_t wakeref;
 
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
        vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
-       intel_runtime_pm_put_unchecked(i915);
+       intel_runtime_pm_put(i915, wakeref);
 }
 
 static int aliasing_gtt_bind_vma(struct i915_vma *vma,
@@ -2589,9 +2591,11 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
        }
 
        if (flags & I915_VMA_GLOBAL_BIND) {
-               intel_runtime_pm_get(i915);
+               intel_wakeref_t wakeref;
+
+               wakeref = intel_runtime_pm_get(i915);
                vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
-               intel_runtime_pm_put_unchecked(i915);
+               intel_runtime_pm_put(i915, wakeref);
        }
 
        return 0;
@@ -2602,9 +2606,11 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
        struct drm_i915_private *i915 = vma->vm->i915;
 
        if (vma->flags & I915_VMA_GLOBAL_BIND) {
-               intel_runtime_pm_get(i915);
+               intel_wakeref_t wakeref;
+
+               wakeref = intel_runtime_pm_get(i915);
                vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
-               intel_runtime_pm_put_unchecked(i915);
+               intel_runtime_pm_put(i915, wakeref);
        }
 
        if (vma->flags & I915_VMA_LOCAL_BIND) {
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c 
b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 16693dd4d019..bc230e43b98f 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -154,6 +154,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
                { &i915->mm.bound_list, I915_SHRINK_BOUND },
                { NULL, 0 },
        }, *phase;
+       intel_wakeref_t wakeref = 0;
        unsigned long count = 0;
        unsigned long scanned = 0;
        bool unlock;
@@ -183,9 +184,11 @@ i915_gem_shrink(struct drm_i915_private *i915,
         * device just to recover a little memory. If absolutely necessary,
         * we will force the wake during oom-notifier.
         */
-       if ((flags & I915_SHRINK_BOUND) &&
-           !intel_runtime_pm_get_if_in_use(i915))
-               flags &= ~I915_SHRINK_BOUND;
+       if (flags & I915_SHRINK_BOUND) {
+               wakeref = intel_runtime_pm_get_if_in_use(i915);
+               if (!wakeref)
+                       flags &= ~I915_SHRINK_BOUND;
+       }
 
        /*
         * As we may completely rewrite the (un)bound list whilst unbinding
@@ -266,7 +269,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
        }
 
        if (flags & I915_SHRINK_BOUND)
-               intel_runtime_pm_put_unchecked(i915);
+               intel_runtime_pm_put(i915, wakeref);
 
        i915_retire_requests(i915);
 
@@ -293,14 +296,15 @@ i915_gem_shrink(struct drm_i915_private *i915,
  */
 unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
 {
+       intel_wakeref_t wakeref;
        unsigned long freed;
 
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
        freed = i915_gem_shrink(i915, -1UL, NULL,
                                I915_SHRINK_BOUND |
                                I915_SHRINK_UNBOUND |
                                I915_SHRINK_ACTIVE);
-       intel_runtime_pm_put_unchecked(i915);
+       intel_runtime_pm_put(i915, wakeref);
 
        return freed;
 }
@@ -371,14 +375,16 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct 
shrink_control *sc)
                                         I915_SHRINK_BOUND |
                                         I915_SHRINK_UNBOUND);
        if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
-               intel_runtime_pm_get(i915);
+               intel_wakeref_t wakeref;
+
+               wakeref = intel_runtime_pm_get(i915);
                freed += i915_gem_shrink(i915,
                                         sc->nr_to_scan - sc->nr_scanned,
                                         &sc->nr_scanned,
                                         I915_SHRINK_ACTIVE |
                                         I915_SHRINK_BOUND |
                                         I915_SHRINK_UNBOUND);
-               intel_runtime_pm_put_unchecked(i915);
+               intel_runtime_pm_put(i915, wakeref);
        }
 
        shrinker_unlock(i915, unlock);
@@ -418,12 +424,13 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned 
long event, void *ptr)
                container_of(nb, struct drm_i915_private, mm.oom_notifier);
        struct drm_i915_gem_object *obj;
        unsigned long unevictable, bound, unbound, freed_pages;
+       intel_wakeref_t wakeref;
 
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
        freed_pages = i915_gem_shrink(i915, -1UL, NULL,
                                      I915_SHRINK_BOUND |
                                      I915_SHRINK_UNBOUND);
-       intel_runtime_pm_put_unchecked(i915);
+       intel_runtime_pm_put(i915, wakeref);
 
        /* Because we may be allocating inside our own driver, we cannot
         * assert that there are no objects with pinned pages that are not
@@ -461,6 +468,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned 
long event, void *ptr
                container_of(nb, struct drm_i915_private, mm.vmap_notifier);
        struct i915_vma *vma, *next;
        unsigned long freed_pages = 0;
+       intel_wakeref_t wakeref;
        bool unlock;
        int ret;
 
@@ -474,12 +482,12 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, 
unsigned long event, void *ptr
        if (ret)
                goto out;
 
-       intel_runtime_pm_get(i915);
+       wakeref = intel_runtime_pm_get(i915);
        freed_pages += i915_gem_shrink(i915, -1UL, NULL,
                                       I915_SHRINK_BOUND |
                                       I915_SHRINK_UNBOUND |
                                       I915_SHRINK_VMAPS);
-       intel_runtime_pm_put_unchecked(i915);
+       intel_runtime_pm_put(i915, wakeref);
 
        /* We also want to clear any cached iomaps as they wrap vmap */
        list_for_each_entry_safe(vma, next,
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c 
b/drivers/gpu/drm/i915/intel_engine_cs.c
index 85131166589c..bf4dae2649ab 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -913,10 +913,12 @@ void intel_engine_get_instdone(struct intel_engine_cs 
*engine,
 static bool ring_is_idle(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
+       intel_wakeref_t wakeref;
        bool idle = true;
 
        /* If the whole device is asleep, the engine must be idle */
-       if (!intel_runtime_pm_get_if_in_use(dev_priv))
+       wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
+       if (!wakeref)
                return true;
 
        /* First check that no commands are left in the ring */
@@ -928,7 +930,7 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
        if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
                idle = false;
 
-       intel_runtime_pm_put_unchecked(dev_priv);
+       intel_runtime_pm_put(dev_priv, wakeref);
 
        return idle;
 }
@@ -1425,6 +1427,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
        const struct intel_engine_execlists * const execlists = 
&engine->execlists;
        struct i915_gpu_error * const error = &engine->i915->gpu_error;
        struct i915_request *rq, *last;
+       intel_wakeref_t wakeref;
        unsigned long flags;
        struct rb_node *rb;
        int count;
@@ -1483,9 +1486,10 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 
        rcu_read_unlock();
 
-       if (intel_runtime_pm_get_if_in_use(engine->i915)) {
+       wakeref = intel_runtime_pm_get_if_in_use(engine->i915);
+       if (wakeref) {
                intel_engine_print_registers(engine, m);
-               intel_runtime_pm_put_unchecked(engine->i915);
+               intel_runtime_pm_put(engine->i915, wakeref);
        } else {
                drm_printf(m, "\tDevice is asleep; skipping register dump\n");
        }
diff --git a/drivers/gpu/drm/i915/intel_uncore.c 
b/drivers/gpu/drm/i915/intel_uncore.c
index 8d4c76ac0e7d..d494d92da02c 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1670,6 +1670,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_reg_read *reg = data;
        struct reg_whitelist const *entry;
+       intel_wakeref_t wakeref;
        unsigned int flags;
        int remain;
        int ret = 0;
@@ -1695,7 +1696,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
 
        flags = reg->offset & (entry->size - 1);
 
-       intel_runtime_pm_get(dev_priv);
+       wakeref = intel_runtime_pm_get(dev_priv);
        if (entry->size == 8 && flags == I915_REG_READ_8B_WA)
                reg->val = I915_READ64_2x32(entry->offset_ldw,
                                            entry->offset_udw);
@@ -1709,7 +1710,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
                reg->val = I915_READ8(entry->offset_ldw);
        else
                ret = -EINVAL;
-       intel_runtime_pm_put_unchecked(dev_priv);
+       intel_runtime_pm_put(dev_priv, wakeref);
 
        return ret;
 }
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to