Re: [Intel-gfx] [PATCH 25/34] drm/i915: Track active timelines

2019-01-23 Thread Chris Wilson
Quoting John Harrison (2019-01-23 22:32:54)
> On 1/22/2019 07:17, Chris Wilson wrote:
> > Quoting Tvrtko Ursulin (2019-01-22 14:56:32)
> >> On 21/01/2019 22:21, Chris Wilson wrote:
> >>> +static void timeline_active(struct i915_timeline *tl)
> >>> +{
> >>> + struct i915_gt_timelines *gt = >i915->gt.timelines;
> >>> +
> >>> + mutex_lock(>mutex);
> >>> + list_add(>link, >active_list);
> >>> + mutex_unlock(>mutex);
> >>> +}
> >>> +
> >>> +static void timeline_inactive(struct i915_timeline *tl)
> >>> +{
> >>> + struct i915_gt_timelines *gt = >i915->gt.timelines;
> >>> +
> >>> + mutex_lock(>mutex);
> >>> + list_del(>link);
> >>> + mutex_unlock(>mutex);
> >>> +}
> >> Bike shedding comments only:
> >> Would it be better to use a verb suffix? Even though timeline_activate
> >> also wouldn't sound perfect. Since it is file local - activate_timeline?
> >> Or even just inline to pin/unpin. Unless more gets put into them later..
> > Haven't got any plans for more here, yet, and was thinking this is a
> > pinned_list myself. I picked active_list since I was using 'active'
> > elsewhere for active_ring, active_engines, active_contexts, etc.
> >
> > I didn't like activate/deactivate enough to switch, and was trying to
> > avoid reusing pin/unpin along this path:
> >   i915_timeline_pin -> timeline_pin
> > begged confusion
> >
> > [snip]
> >> Never mind the bikeshedding comments:
> > There's time enough for someone to open a new pot of paint.
> I agree that having a verb in there would make things clearer. Maybe 
> timeline_make_(in)active? Or timeline_mark_(in)active?

mark more so than make (since the action we are doing is external).

How about

timeline_track_active()
timeline_untrack_active() / timeline_cancel_active()

or
timeline_add_to_active()
timeline_remove_from_active()

Finally!
-Chris
___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx


Re: [Intel-gfx] [PATCH 25/34] drm/i915: Track active timelines

2019-01-23 Thread John Harrison

On 1/22/2019 07:17, Chris Wilson wrote:

Quoting Tvrtko Ursulin (2019-01-22 14:56:32)

On 21/01/2019 22:21, Chris Wilson wrote:

Now that we pin timelines around use, we have a clearly defined lifetime
and convenient points at which we can track only the active timelines.
This allows us to reduce the list iteration to only consider those
active timelines and not all.

Signed-off-by: Chris Wilson 
---
   drivers/gpu/drm/i915/i915_drv.h  |  2 +-
   drivers/gpu/drm/i915/i915_gem.c  |  4 +--
   drivers/gpu/drm/i915/i915_reset.c|  2 +-
   drivers/gpu/drm/i915/i915_timeline.c | 39 ++--
   4 files changed, 29 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index c00eaf2889fb..5577e0e1034f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1977,7 +1977,7 @@ struct drm_i915_private {
   
   struct i915_gt_timelines {

   struct mutex mutex; /* protects list, tainted by GPU */
- struct list_head list;
+ struct list_head active_list;
   
   /* Pack multiple timelines' seqnos into the same page */

   spinlock_t hwsp_lock;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4e0de22f0166..9c499edb4c13 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3246,7 +3246,7 @@ wait_for_timelines(struct drm_i915_private *i915,
   return timeout;
   
   mutex_lock(>mutex);

- list_for_each_entry(tl, >list, link) {
+ list_for_each_entry(tl, >active_list, link) {
   struct i915_request *rq;
   
   rq = i915_gem_active_get_unlocked(>last_request);

@@ -3274,7 +3274,7 @@ wait_for_timelines(struct drm_i915_private *i915,
   
   /* restart after reacquiring the lock */

   mutex_lock(>mutex);
- tl = list_entry(>list, typeof(*tl), link);
+ tl = list_entry(>active_list, typeof(*tl), link);
   }
   mutex_unlock(>mutex);
   
diff --git a/drivers/gpu/drm/i915/i915_reset.c b/drivers/gpu/drm/i915/i915_reset.c

index 09edf488f711..9b9169508139 100644
--- a/drivers/gpu/drm/i915/i915_reset.c
+++ b/drivers/gpu/drm/i915/i915_reset.c
@@ -852,7 +852,7 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
* No more can be submitted until we reset the wedged bit.
*/
   mutex_lock(>gt.timelines.mutex);
- list_for_each_entry(tl, >gt.timelines.list, link) {
+ list_for_each_entry(tl, >gt.timelines.active_list, link) {
   struct i915_request *rq;
   long timeout;
   
diff --git a/drivers/gpu/drm/i915/i915_timeline.c b/drivers/gpu/drm/i915/i915_timeline.c

index 69ee33dfa340..007348b1b469 100644
--- a/drivers/gpu/drm/i915/i915_timeline.c
+++ b/drivers/gpu/drm/i915/i915_timeline.c
@@ -117,7 +117,6 @@ int i915_timeline_init(struct drm_i915_private *i915,
  const char *name,
  struct i915_vma *hwsp)
   {
- struct i915_gt_timelines *gt = >gt.timelines;
   void *vaddr;
   
   /*

@@ -161,10 +160,6 @@ int i915_timeline_init(struct drm_i915_private *i915,
   
   i915_syncmap_init(>sync);
   
- mutex_lock(>mutex);

- list_add(>link, >list);
- mutex_unlock(>mutex);
-
   return 0;
   }
   
@@ -173,7 +168,7 @@ void i915_timelines_init(struct drm_i915_private *i915)

   struct i915_gt_timelines *gt = >gt.timelines;
   
   mutex_init(>mutex);

- INIT_LIST_HEAD(>list);
+ INIT_LIST_HEAD(>active_list);
   
   spin_lock_init(>hwsp_lock);

   INIT_LIST_HEAD(>hwsp_free_list);
@@ -182,6 +177,24 @@ void i915_timelines_init(struct drm_i915_private *i915)
   i915_gem_shrinker_taints_mutex(i915, >mutex);
   }
   
+static void timeline_active(struct i915_timeline *tl)

+{
+ struct i915_gt_timelines *gt = >i915->gt.timelines;
+
+ mutex_lock(>mutex);
+ list_add(>link, >active_list);
+ mutex_unlock(>mutex);
+}
+
+static void timeline_inactive(struct i915_timeline *tl)
+{
+ struct i915_gt_timelines *gt = >i915->gt.timelines;
+
+ mutex_lock(>mutex);
+ list_del(>link);
+ mutex_unlock(>mutex);
+}

Bike shedding comments only:
Would it be better to use a verb suffix? Even though timeline_activate
also wouldn't sound perfect. Since it is file local - activate_timeline?
Or even just inline to pin/unpin. Unless more gets put into them later..

Haven't got any plans for more here, yet, and was thinking this is a
pinned_list myself. I picked active_list since I was using 'active'
elsewhere for active_ring, active_engines, active_contexts, etc.

I didn't like activate/deactivate enough to switch, and was trying to
avoid reusing pin/unpin along this path:
i915_timeline_pin -> timeline_pin
begged confusion

[snip]

Never mind the bikeshedding comments:

There's time enough for someone to 

Re: [Intel-gfx] [PATCH 25/34] drm/i915: Track active timelines

2019-01-22 Thread Chris Wilson
Quoting Tvrtko Ursulin (2019-01-22 14:56:32)
> 
> On 21/01/2019 22:21, Chris Wilson wrote:
> > Now that we pin timelines around use, we have a clearly defined lifetime
> > and convenient points at which we can track only the active timelines.
> > This allows us to reduce the list iteration to only consider those
> > active timelines and not all.
> > 
> > Signed-off-by: Chris Wilson 
> > ---
> >   drivers/gpu/drm/i915/i915_drv.h  |  2 +-
> >   drivers/gpu/drm/i915/i915_gem.c  |  4 +--
> >   drivers/gpu/drm/i915/i915_reset.c|  2 +-
> >   drivers/gpu/drm/i915/i915_timeline.c | 39 ++--
> >   4 files changed, 29 insertions(+), 18 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/i915_drv.h 
> > b/drivers/gpu/drm/i915/i915_drv.h
> > index c00eaf2889fb..5577e0e1034f 100644
> > --- a/drivers/gpu/drm/i915/i915_drv.h
> > +++ b/drivers/gpu/drm/i915/i915_drv.h
> > @@ -1977,7 +1977,7 @@ struct drm_i915_private {
> >   
> >   struct i915_gt_timelines {
> >   struct mutex mutex; /* protects list, tainted by GPU 
> > */
> > - struct list_head list;
> > + struct list_head active_list;
> >   
> >   /* Pack multiple timelines' seqnos into the same page 
> > */
> >   spinlock_t hwsp_lock;
> > diff --git a/drivers/gpu/drm/i915/i915_gem.c 
> > b/drivers/gpu/drm/i915/i915_gem.c
> > index 4e0de22f0166..9c499edb4c13 100644
> > --- a/drivers/gpu/drm/i915/i915_gem.c
> > +++ b/drivers/gpu/drm/i915/i915_gem.c
> > @@ -3246,7 +3246,7 @@ wait_for_timelines(struct drm_i915_private *i915,
> >   return timeout;
> >   
> >   mutex_lock(>mutex);
> > - list_for_each_entry(tl, >list, link) {
> > + list_for_each_entry(tl, >active_list, link) {
> >   struct i915_request *rq;
> >   
> >   rq = i915_gem_active_get_unlocked(>last_request);
> > @@ -3274,7 +3274,7 @@ wait_for_timelines(struct drm_i915_private *i915,
> >   
> >   /* restart after reacquiring the lock */
> >   mutex_lock(>mutex);
> > - tl = list_entry(>list, typeof(*tl), link);
> > + tl = list_entry(>active_list, typeof(*tl), link);
> >   }
> >   mutex_unlock(>mutex);
> >   
> > diff --git a/drivers/gpu/drm/i915/i915_reset.c 
> > b/drivers/gpu/drm/i915/i915_reset.c
> > index 09edf488f711..9b9169508139 100644
> > --- a/drivers/gpu/drm/i915/i915_reset.c
> > +++ b/drivers/gpu/drm/i915/i915_reset.c
> > @@ -852,7 +852,7 @@ bool i915_gem_unset_wedged(struct drm_i915_private 
> > *i915)
> >* No more can be submitted until we reset the wedged bit.
> >*/
> >   mutex_lock(>gt.timelines.mutex);
> > - list_for_each_entry(tl, >gt.timelines.list, link) {
> > + list_for_each_entry(tl, >gt.timelines.active_list, link) {
> >   struct i915_request *rq;
> >   long timeout;
> >   
> > diff --git a/drivers/gpu/drm/i915/i915_timeline.c 
> > b/drivers/gpu/drm/i915/i915_timeline.c
> > index 69ee33dfa340..007348b1b469 100644
> > --- a/drivers/gpu/drm/i915/i915_timeline.c
> > +++ b/drivers/gpu/drm/i915/i915_timeline.c
> > @@ -117,7 +117,6 @@ int i915_timeline_init(struct drm_i915_private *i915,
> >  const char *name,
> >  struct i915_vma *hwsp)
> >   {
> > - struct i915_gt_timelines *gt = >gt.timelines;
> >   void *vaddr;
> >   
> >   /*
> > @@ -161,10 +160,6 @@ int i915_timeline_init(struct drm_i915_private *i915,
> >   
> >   i915_syncmap_init(>sync);
> >   
> > - mutex_lock(>mutex);
> > - list_add(>link, >list);
> > - mutex_unlock(>mutex);
> > -
> >   return 0;
> >   }
> >   
> > @@ -173,7 +168,7 @@ void i915_timelines_init(struct drm_i915_private *i915)
> >   struct i915_gt_timelines *gt = >gt.timelines;
> >   
> >   mutex_init(>mutex);
> > - INIT_LIST_HEAD(>list);
> > + INIT_LIST_HEAD(>active_list);
> >   
> >   spin_lock_init(>hwsp_lock);
> >   INIT_LIST_HEAD(>hwsp_free_list);
> > @@ -182,6 +177,24 @@ void i915_timelines_init(struct drm_i915_private *i915)
> >   i915_gem_shrinker_taints_mutex(i915, >mutex);
> >   }
> >   
> > +static void timeline_active(struct i915_timeline *tl)
> > +{
> > + struct i915_gt_timelines *gt = >i915->gt.timelines;
> > +
> > + mutex_lock(>mutex);
> > + list_add(>link, >active_list);
> > + mutex_unlock(>mutex);
> > +}
> > +
> > +static void timeline_inactive(struct i915_timeline *tl)
> > +{
> > + struct i915_gt_timelines *gt = >i915->gt.timelines;
> > +
> > + mutex_lock(>mutex);
> > + list_del(>link);
> > + mutex_unlock(>mutex);
> > +}
> 
> Bike shedding comments only:
> Would it be better to use a verb suffix? Even though timeline_activate 
> also wouldn't sound perfect. Since it is file local - activate_timeline? 
> Or even just inline to pin/unpin. Unless more gets put into them later..

Haven't got any plans for more here, yet, and was 

Re: [Intel-gfx] [PATCH 25/34] drm/i915: Track active timelines

2019-01-22 Thread Tvrtko Ursulin


On 21/01/2019 22:21, Chris Wilson wrote:

Now that we pin timelines around use, we have a clearly defined lifetime
and convenient points at which we can track only the active timelines.
This allows us to reduce the list iteration to only consider those
active timelines and not all.

Signed-off-by: Chris Wilson 
---
  drivers/gpu/drm/i915/i915_drv.h  |  2 +-
  drivers/gpu/drm/i915/i915_gem.c  |  4 +--
  drivers/gpu/drm/i915/i915_reset.c|  2 +-
  drivers/gpu/drm/i915/i915_timeline.c | 39 ++--
  4 files changed, 29 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index c00eaf2889fb..5577e0e1034f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1977,7 +1977,7 @@ struct drm_i915_private {
  
  		struct i915_gt_timelines {

struct mutex mutex; /* protects list, tainted by GPU */
-   struct list_head list;
+   struct list_head active_list;
  
  			/* Pack multiple timelines' seqnos into the same page */

spinlock_t hwsp_lock;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4e0de22f0166..9c499edb4c13 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3246,7 +3246,7 @@ wait_for_timelines(struct drm_i915_private *i915,
return timeout;
  
  	mutex_lock(>mutex);

-   list_for_each_entry(tl, >list, link) {
+   list_for_each_entry(tl, >active_list, link) {
struct i915_request *rq;
  
  		rq = i915_gem_active_get_unlocked(>last_request);

@@ -3274,7 +3274,7 @@ wait_for_timelines(struct drm_i915_private *i915,
  
  		/* restart after reacquiring the lock */

mutex_lock(>mutex);
-   tl = list_entry(>list, typeof(*tl), link);
+   tl = list_entry(>active_list, typeof(*tl), link);
}
mutex_unlock(>mutex);
  
diff --git a/drivers/gpu/drm/i915/i915_reset.c b/drivers/gpu/drm/i915/i915_reset.c

index 09edf488f711..9b9169508139 100644
--- a/drivers/gpu/drm/i915/i915_reset.c
+++ b/drivers/gpu/drm/i915/i915_reset.c
@@ -852,7 +852,7 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
 * No more can be submitted until we reset the wedged bit.
 */
mutex_lock(>gt.timelines.mutex);
-   list_for_each_entry(tl, >gt.timelines.list, link) {
+   list_for_each_entry(tl, >gt.timelines.active_list, link) {
struct i915_request *rq;
long timeout;
  
diff --git a/drivers/gpu/drm/i915/i915_timeline.c b/drivers/gpu/drm/i915/i915_timeline.c

index 69ee33dfa340..007348b1b469 100644
--- a/drivers/gpu/drm/i915/i915_timeline.c
+++ b/drivers/gpu/drm/i915/i915_timeline.c
@@ -117,7 +117,6 @@ int i915_timeline_init(struct drm_i915_private *i915,
   const char *name,
   struct i915_vma *hwsp)
  {
-   struct i915_gt_timelines *gt = >gt.timelines;
void *vaddr;
  
  	/*

@@ -161,10 +160,6 @@ int i915_timeline_init(struct drm_i915_private *i915,
  
  	i915_syncmap_init(>sync);
  
-	mutex_lock(>mutex);

-   list_add(>link, >list);
-   mutex_unlock(>mutex);
-
return 0;
  }
  
@@ -173,7 +168,7 @@ void i915_timelines_init(struct drm_i915_private *i915)

struct i915_gt_timelines *gt = >gt.timelines;
  
  	mutex_init(>mutex);

-   INIT_LIST_HEAD(>list);
+   INIT_LIST_HEAD(>active_list);
  
  	spin_lock_init(>hwsp_lock);

INIT_LIST_HEAD(>hwsp_free_list);
@@ -182,6 +177,24 @@ void i915_timelines_init(struct drm_i915_private *i915)
i915_gem_shrinker_taints_mutex(i915, >mutex);
  }
  
+static void timeline_active(struct i915_timeline *tl)

+{
+   struct i915_gt_timelines *gt = >i915->gt.timelines;
+
+   mutex_lock(>mutex);
+   list_add(>link, >active_list);
+   mutex_unlock(>mutex);
+}
+
+static void timeline_inactive(struct i915_timeline *tl)
+{
+   struct i915_gt_timelines *gt = >i915->gt.timelines;
+
+   mutex_lock(>mutex);
+   list_del(>link);
+   mutex_unlock(>mutex);
+}


Bike shedding comments only:
Would it be better to use a verb suffix? Even though timeline_activate 
also wouldn't sound perfect. Since it is file local - activate_timeline? 
Or even just inline to pin/unpin. Unless more gets put into them later..



+
  /**
   * i915_timelines_park - called when the driver idles
   * @i915: the drm_i915_private device
@@ -198,7 +211,7 @@ void i915_timelines_park(struct drm_i915_private *i915)
struct i915_timeline *timeline;
  
  	mutex_lock(>mutex);

-   list_for_each_entry(timeline, >list, link) {
+   list_for_each_entry(timeline, >active_list, link) {
/*
 * All known fences are completed so we can scrap
 * the current sync point tracking and start afresh,
@@ -212,15 +225,9 @@ void i915_timelines_park(struct 

[Intel-gfx] [PATCH 25/34] drm/i915: Track active timelines

2019-01-21 Thread Chris Wilson
Now that we pin timelines around use, we have a clearly defined lifetime
and convenient points at which we can track only the active timelines.
This allows us to reduce the list iteration to only consider those
active timelines and not all.

Signed-off-by: Chris Wilson 
---
 drivers/gpu/drm/i915/i915_drv.h  |  2 +-
 drivers/gpu/drm/i915/i915_gem.c  |  4 +--
 drivers/gpu/drm/i915/i915_reset.c|  2 +-
 drivers/gpu/drm/i915/i915_timeline.c | 39 ++--
 4 files changed, 29 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index c00eaf2889fb..5577e0e1034f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1977,7 +1977,7 @@ struct drm_i915_private {
 
struct i915_gt_timelines {
struct mutex mutex; /* protects list, tainted by GPU */
-   struct list_head list;
+   struct list_head active_list;
 
/* Pack multiple timelines' seqnos into the same page */
spinlock_t hwsp_lock;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4e0de22f0166..9c499edb4c13 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3246,7 +3246,7 @@ wait_for_timelines(struct drm_i915_private *i915,
return timeout;
 
mutex_lock(>mutex);
-   list_for_each_entry(tl, >list, link) {
+   list_for_each_entry(tl, >active_list, link) {
struct i915_request *rq;
 
rq = i915_gem_active_get_unlocked(>last_request);
@@ -3274,7 +3274,7 @@ wait_for_timelines(struct drm_i915_private *i915,
 
/* restart after reacquiring the lock */
mutex_lock(>mutex);
-   tl = list_entry(>list, typeof(*tl), link);
+   tl = list_entry(>active_list, typeof(*tl), link);
}
mutex_unlock(>mutex);
 
diff --git a/drivers/gpu/drm/i915/i915_reset.c 
b/drivers/gpu/drm/i915/i915_reset.c
index 09edf488f711..9b9169508139 100644
--- a/drivers/gpu/drm/i915/i915_reset.c
+++ b/drivers/gpu/drm/i915/i915_reset.c
@@ -852,7 +852,7 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
 * No more can be submitted until we reset the wedged bit.
 */
mutex_lock(>gt.timelines.mutex);
-   list_for_each_entry(tl, >gt.timelines.list, link) {
+   list_for_each_entry(tl, >gt.timelines.active_list, link) {
struct i915_request *rq;
long timeout;
 
diff --git a/drivers/gpu/drm/i915/i915_timeline.c 
b/drivers/gpu/drm/i915/i915_timeline.c
index 69ee33dfa340..007348b1b469 100644
--- a/drivers/gpu/drm/i915/i915_timeline.c
+++ b/drivers/gpu/drm/i915/i915_timeline.c
@@ -117,7 +117,6 @@ int i915_timeline_init(struct drm_i915_private *i915,
   const char *name,
   struct i915_vma *hwsp)
 {
-   struct i915_gt_timelines *gt = >gt.timelines;
void *vaddr;
 
/*
@@ -161,10 +160,6 @@ int i915_timeline_init(struct drm_i915_private *i915,
 
i915_syncmap_init(>sync);
 
-   mutex_lock(>mutex);
-   list_add(>link, >list);
-   mutex_unlock(>mutex);
-
return 0;
 }
 
@@ -173,7 +168,7 @@ void i915_timelines_init(struct drm_i915_private *i915)
struct i915_gt_timelines *gt = >gt.timelines;
 
mutex_init(>mutex);
-   INIT_LIST_HEAD(>list);
+   INIT_LIST_HEAD(>active_list);
 
spin_lock_init(>hwsp_lock);
INIT_LIST_HEAD(>hwsp_free_list);
@@ -182,6 +177,24 @@ void i915_timelines_init(struct drm_i915_private *i915)
i915_gem_shrinker_taints_mutex(i915, >mutex);
 }
 
+static void timeline_active(struct i915_timeline *tl)
+{
+   struct i915_gt_timelines *gt = >i915->gt.timelines;
+
+   mutex_lock(>mutex);
+   list_add(>link, >active_list);
+   mutex_unlock(>mutex);
+}
+
+static void timeline_inactive(struct i915_timeline *tl)
+{
+   struct i915_gt_timelines *gt = >i915->gt.timelines;
+
+   mutex_lock(>mutex);
+   list_del(>link);
+   mutex_unlock(>mutex);
+}
+
 /**
  * i915_timelines_park - called when the driver idles
  * @i915: the drm_i915_private device
@@ -198,7 +211,7 @@ void i915_timelines_park(struct drm_i915_private *i915)
struct i915_timeline *timeline;
 
mutex_lock(>mutex);
-   list_for_each_entry(timeline, >list, link) {
+   list_for_each_entry(timeline, >active_list, link) {
/*
 * All known fences are completed so we can scrap
 * the current sync point tracking and start afresh,
@@ -212,15 +225,9 @@ void i915_timelines_park(struct drm_i915_private *i915)
 
 void i915_timeline_fini(struct i915_timeline *timeline)
 {
-   struct i915_gt_timelines *gt = >i915->gt.timelines;
-
GEM_BUG_ON(timeline->pin_count);
GEM_BUG_ON(!list_empty(>requests));
 
-