Re: [PATCH 03/10] drm/vkms: Rename vkms_output.state_lock to crc_lock

2019-06-13 Thread Daniel Vetter
On Wed, Jun 12, 2019 at 10:38:23AM -0300, Rodrigo Siqueira wrote:
> On Thu, Jun 6, 2019 at 7:28 PM Daniel Vetter  wrote:
> >
> > Plus add a comment about what it actually protects. It's very little.
> >
> > Signed-off-by: Daniel Vetter 
> > Cc: Rodrigo Siqueira 
> > Cc: Haneen Mohammed 
> > Cc: Daniel Vetter 
> > ---
> >  drivers/gpu/drm/vkms/vkms_crc.c  | 4 ++--
> >  drivers/gpu/drm/vkms/vkms_crtc.c | 6 +++---
> >  drivers/gpu/drm/vkms/vkms_drv.h  | 5 +++--
> >  3 files changed, 8 insertions(+), 7 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/vkms/vkms_crc.c 
> > b/drivers/gpu/drm/vkms/vkms_crc.c
> > index 883e36fe7b6e..96806cd35ad4 100644
> > --- a/drivers/gpu/drm/vkms/vkms_crc.c
> > +++ b/drivers/gpu/drm/vkms/vkms_crc.c
> > @@ -168,14 +168,14 @@ void vkms_crc_work_handle(struct work_struct *work)
> > u64 frame_start, frame_end;
> > bool crc_pending;
> >
> > -   spin_lock_irq(>state_lock);
> > +   spin_lock_irq(>crc_lock);
> > frame_start = crtc_state->frame_start;
> > frame_end = crtc_state->frame_end;
> > crc_pending = crtc_state->crc_pending;
> > crtc_state->frame_start = 0;
> > crtc_state->frame_end = 0;
> > crtc_state->crc_pending = false;
> > -   spin_unlock_irq(>state_lock);
> > +   spin_unlock_irq(>crc_lock);
> >
> > /*
> >  * We raced with the vblank hrtimer and previous work already 
> > computed
> > diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c 
> > b/drivers/gpu/drm/vkms/vkms_crtc.c
> > index c727d8486e97..55b16d545fe7 100644
> > --- a/drivers/gpu/drm/vkms/vkms_crtc.c
> > +++ b/drivers/gpu/drm/vkms/vkms_crtc.c
> > @@ -29,7 +29,7 @@ static enum hrtimer_restart vkms_vblank_simulate(struct 
> > hrtimer *timer)
> > /* update frame_start only if a queued 
> > vkms_crc_work_handle()
> >  * has read the data
> >  */
> > -   spin_lock(>state_lock);
> > +   spin_lock(>crc_lock);
> > if (!state->crc_pending)
> > state->frame_start = frame;
> > else
> > @@ -37,7 +37,7 @@ static enum hrtimer_restart vkms_vblank_simulate(struct 
> > hrtimer *timer)
> >  state->frame_start, frame);
> > state->frame_end = frame;
> > state->crc_pending = true;
> > -   spin_unlock(>state_lock);
> > +   spin_unlock(>crc_lock);
> >
> > ret = queue_work(output->crc_workq, >crc_work);
> > if (!ret)
> > @@ -224,7 +224,7 @@ int vkms_crtc_init(struct drm_device *dev, struct 
> > drm_crtc *crtc,
> > drm_crtc_helper_add(crtc, _crtc_helper_funcs);
> >
> > spin_lock_init(_out->lock);
> > -   spin_lock_init(_out->state_lock);
> > +   spin_lock_init(_out->crc_lock);
> >
> > vkms_out->crc_workq = alloc_ordered_workqueue("vkms_crc_workq", 0);
> > if (!vkms_out->crc_workq)
> > diff --git a/drivers/gpu/drm/vkms/vkms_drv.h 
> > b/drivers/gpu/drm/vkms/vkms_drv.h
> > index 3c7e06b19efd..43d3f98289fe 100644
> > --- a/drivers/gpu/drm/vkms/vkms_drv.h
> > +++ b/drivers/gpu/drm/vkms/vkms_drv.h
> > @@ -57,6 +57,7 @@ struct vkms_crtc_state {
> > struct drm_crtc_state base;
> > struct work_struct crc_work;
> >
> > +   /* below three are protected by vkms_output.crc_lock */
> > bool crc_pending;
> > u64 frame_start;
> > u64 frame_end;
> > @@ -74,8 +75,8 @@ struct vkms_output {
> > struct workqueue_struct *crc_workq;
> > /* protects concurrent access to crc_data */
> > spinlock_t lock;
> 
> It's not really specific to this patch, but after reviewing it, I was
> thinking about the use of the 'lock' field in the struct vkms_output.
> Do we really need it? It looks like that crc_lock just replaced it.

Yeah they're a bit redundant, but the other way round. crc_lock is per
vkms_output_state, and we constantly recreate that one. So if we'd want to
remove one of those locks, that's the one that needs to go. Only
vkms_output->lock is global.

I figured not something we absolutely have to do right away :-)

> Additionally, going a little bit deeper in the lock field at struct
> vkms_output, I was also asking myself: what critical area do we want
> to protect with this lock? After inspecting the use of this lock, I
> noticed two different places that use it:
> 
> 1. In the function vkms_vblank_simulate()
> 
> Note that we cover a vast area with ‘output->lock’. IMHO we just need
> to protect the state variable (line “state = output->crc_state;”) and
> we can also use crc_lock for this case. Make sense?

Hm very tricky, but good point. Going through that entire function, as it
is after my patch series:

- hrtimer_forward_now: I think with all the patches to fix the ordering it
  should be safe to take that out of the vkms_output->lock. in the
  get_vblank_timestamp function we also don't take the lock, so 

Re: [PATCH 03/10] drm/vkms: Rename vkms_output.state_lock to crc_lock

2019-06-12 Thread Rodrigo Siqueira
On Thu, Jun 6, 2019 at 7:28 PM Daniel Vetter  wrote:
>
> Plus add a comment about what it actually protects. It's very little.
>
> Signed-off-by: Daniel Vetter 
> Cc: Rodrigo Siqueira 
> Cc: Haneen Mohammed 
> Cc: Daniel Vetter 
> ---
>  drivers/gpu/drm/vkms/vkms_crc.c  | 4 ++--
>  drivers/gpu/drm/vkms/vkms_crtc.c | 6 +++---
>  drivers/gpu/drm/vkms/vkms_drv.h  | 5 +++--
>  3 files changed, 8 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/gpu/drm/vkms/vkms_crc.c b/drivers/gpu/drm/vkms/vkms_crc.c
> index 883e36fe7b6e..96806cd35ad4 100644
> --- a/drivers/gpu/drm/vkms/vkms_crc.c
> +++ b/drivers/gpu/drm/vkms/vkms_crc.c
> @@ -168,14 +168,14 @@ void vkms_crc_work_handle(struct work_struct *work)
> u64 frame_start, frame_end;
> bool crc_pending;
>
> -   spin_lock_irq(>state_lock);
> +   spin_lock_irq(>crc_lock);
> frame_start = crtc_state->frame_start;
> frame_end = crtc_state->frame_end;
> crc_pending = crtc_state->crc_pending;
> crtc_state->frame_start = 0;
> crtc_state->frame_end = 0;
> crtc_state->crc_pending = false;
> -   spin_unlock_irq(>state_lock);
> +   spin_unlock_irq(>crc_lock);
>
> /*
>  * We raced with the vblank hrtimer and previous work already computed
> diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c 
> b/drivers/gpu/drm/vkms/vkms_crtc.c
> index c727d8486e97..55b16d545fe7 100644
> --- a/drivers/gpu/drm/vkms/vkms_crtc.c
> +++ b/drivers/gpu/drm/vkms/vkms_crtc.c
> @@ -29,7 +29,7 @@ static enum hrtimer_restart vkms_vblank_simulate(struct 
> hrtimer *timer)
> /* update frame_start only if a queued vkms_crc_work_handle()
>  * has read the data
>  */
> -   spin_lock(>state_lock);
> +   spin_lock(>crc_lock);
> if (!state->crc_pending)
> state->frame_start = frame;
> else
> @@ -37,7 +37,7 @@ static enum hrtimer_restart vkms_vblank_simulate(struct 
> hrtimer *timer)
>  state->frame_start, frame);
> state->frame_end = frame;
> state->crc_pending = true;
> -   spin_unlock(>state_lock);
> +   spin_unlock(>crc_lock);
>
> ret = queue_work(output->crc_workq, >crc_work);
> if (!ret)
> @@ -224,7 +224,7 @@ int vkms_crtc_init(struct drm_device *dev, struct 
> drm_crtc *crtc,
> drm_crtc_helper_add(crtc, _crtc_helper_funcs);
>
> spin_lock_init(_out->lock);
> -   spin_lock_init(_out->state_lock);
> +   spin_lock_init(_out->crc_lock);
>
> vkms_out->crc_workq = alloc_ordered_workqueue("vkms_crc_workq", 0);
> if (!vkms_out->crc_workq)
> diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
> index 3c7e06b19efd..43d3f98289fe 100644
> --- a/drivers/gpu/drm/vkms/vkms_drv.h
> +++ b/drivers/gpu/drm/vkms/vkms_drv.h
> @@ -57,6 +57,7 @@ struct vkms_crtc_state {
> struct drm_crtc_state base;
> struct work_struct crc_work;
>
> +   /* below three are protected by vkms_output.crc_lock */
> bool crc_pending;
> u64 frame_start;
> u64 frame_end;
> @@ -74,8 +75,8 @@ struct vkms_output {
> struct workqueue_struct *crc_workq;
> /* protects concurrent access to crc_data */
> spinlock_t lock;

It's not really specific to this patch, but after reviewing it, I was
thinking about the use of the 'lock' field in the struct vkms_output.
Do we really need it? It looks like that crc_lock just replaced it.

Additionally, going a little bit deeper in the lock field at struct
vkms_output, I was also asking myself: what critical area do we want
to protect with this lock? After inspecting the use of this lock, I
noticed two different places that use it:

1. In the function vkms_vblank_simulate()

Note that we cover a vast area with ‘output->lock’. IMHO we just need
to protect the state variable (line “state = output->crc_state;”) and
we can also use crc_lock for this case. Make sense?

2. In the function vkms_crtc_atomic_begin()

We also hold output->lock in the vkms_crtc_atomic_begin() and just
release it at vkms_crtc_atomic_flush(). Do we still need it?

> -   /* protects concurrent access to crtc_state */
> -   spinlock_t state_lock;
> +
> +   spinlock_t crc_lock;
>  };

Maybe add a kernel doc on top of crc_lock?

>
>  struct vkms_device {
> --
> 2.20.1
>


-- 

Rodrigo Siqueira
https://siqueira.tech
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

[PATCH 03/10] drm/vkms: Rename vkms_output.state_lock to crc_lock

2019-06-06 Thread Daniel Vetter
Plus add a comment about what it actually protects. It's very little.

Signed-off-by: Daniel Vetter 
Cc: Rodrigo Siqueira 
Cc: Haneen Mohammed 
Cc: Daniel Vetter 
---
 drivers/gpu/drm/vkms/vkms_crc.c  | 4 ++--
 drivers/gpu/drm/vkms/vkms_crtc.c | 6 +++---
 drivers/gpu/drm/vkms/vkms_drv.h  | 5 +++--
 3 files changed, 8 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/vkms/vkms_crc.c b/drivers/gpu/drm/vkms/vkms_crc.c
index 883e36fe7b6e..96806cd35ad4 100644
--- a/drivers/gpu/drm/vkms/vkms_crc.c
+++ b/drivers/gpu/drm/vkms/vkms_crc.c
@@ -168,14 +168,14 @@ void vkms_crc_work_handle(struct work_struct *work)
u64 frame_start, frame_end;
bool crc_pending;
 
-   spin_lock_irq(>state_lock);
+   spin_lock_irq(>crc_lock);
frame_start = crtc_state->frame_start;
frame_end = crtc_state->frame_end;
crc_pending = crtc_state->crc_pending;
crtc_state->frame_start = 0;
crtc_state->frame_end = 0;
crtc_state->crc_pending = false;
-   spin_unlock_irq(>state_lock);
+   spin_unlock_irq(>crc_lock);
 
/*
 * We raced with the vblank hrtimer and previous work already computed
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index c727d8486e97..55b16d545fe7 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -29,7 +29,7 @@ static enum hrtimer_restart vkms_vblank_simulate(struct 
hrtimer *timer)
/* update frame_start only if a queued vkms_crc_work_handle()
 * has read the data
 */
-   spin_lock(>state_lock);
+   spin_lock(>crc_lock);
if (!state->crc_pending)
state->frame_start = frame;
else
@@ -37,7 +37,7 @@ static enum hrtimer_restart vkms_vblank_simulate(struct 
hrtimer *timer)
 state->frame_start, frame);
state->frame_end = frame;
state->crc_pending = true;
-   spin_unlock(>state_lock);
+   spin_unlock(>crc_lock);
 
ret = queue_work(output->crc_workq, >crc_work);
if (!ret)
@@ -224,7 +224,7 @@ int vkms_crtc_init(struct drm_device *dev, struct drm_crtc 
*crtc,
drm_crtc_helper_add(crtc, _crtc_helper_funcs);
 
spin_lock_init(_out->lock);
-   spin_lock_init(_out->state_lock);
+   spin_lock_init(_out->crc_lock);
 
vkms_out->crc_workq = alloc_ordered_workqueue("vkms_crc_workq", 0);
if (!vkms_out->crc_workq)
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 3c7e06b19efd..43d3f98289fe 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -57,6 +57,7 @@ struct vkms_crtc_state {
struct drm_crtc_state base;
struct work_struct crc_work;
 
+   /* below three are protected by vkms_output.crc_lock */
bool crc_pending;
u64 frame_start;
u64 frame_end;
@@ -74,8 +75,8 @@ struct vkms_output {
struct workqueue_struct *crc_workq;
/* protects concurrent access to crc_data */
spinlock_t lock;
-   /* protects concurrent access to crtc_state */
-   spinlock_t state_lock;
+
+   spinlock_t crc_lock;
 };
 
 struct vkms_device {
-- 
2.20.1

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel