Re: [Intel-gfx] [PATCH 3/3] drm/i915: Flush extra hard after writing relocations through the GTT

2019-08-01 Thread Kumar Valsan, Prathap
On Tue, Jul 30, 2019 at 12:21:51PM +0100, Chris Wilson wrote:
> Recently discovered in commit bdae33b8b82b ("drm/i915: Use maximum write
> flush for pwrite_gtt") was that we needed to our full write barrier
> before changing the GGTT PTE to ensure that our indirect writes through
> the GTT landed before the PTE changed (and the writes end up in a
> different page). That also applies to our GGTT relocation path.
> 
> Signed-off-by: Chris Wilson 
> Cc: sta...@vger.kernel.org

Reviewed-by: Prathap Kumar Valsan 
> ---
>  drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 9 +
>  1 file changed, 5 insertions(+), 4 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 
> b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
> index 8a2047c4e7c3..01901dad33f7 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
> @@ -1019,11 +1019,12 @@ static void reloc_cache_reset(struct reloc_cache 
> *cache)
>   kunmap_atomic(vaddr);
>   i915_gem_object_finish_access((struct drm_i915_gem_object 
> *)cache->node.mm);
>   } else {
> - wmb();
> + struct i915_ggtt *ggtt = cache_to_ggtt(cache);
> +
> + intel_gt_flush_ggtt_writes(ggtt->vm.gt);
>   io_mapping_unmap_atomic((void __iomem *)vaddr);
> - if (cache->node.allocated) {
> - struct i915_ggtt *ggtt = cache_to_ggtt(cache);
>  
> + if (cache->node.allocated) {
>   ggtt->vm.clear_range(>vm,
>cache->node.start,
>cache->node.size);
> @@ -1078,6 +1079,7 @@ static void *reloc_iomap(struct drm_i915_gem_object 
> *obj,
>   void *vaddr;
>  
>   if (cache->vaddr) {
> + intel_gt_flush_ggtt_writes(ggtt->vm.gt);
>   io_mapping_unmap_atomic((void __force __iomem *) 
> unmask_page(cache->vaddr));
>   } else {
>   struct i915_vma *vma;
> @@ -1119,7 +1121,6 @@ static void *reloc_iomap(struct drm_i915_gem_object 
> *obj,
>  
>   offset = cache->node.start;
>   if (cache->node.allocated) {
> - wmb();
>   ggtt->vm.insert_page(>vm,
>i915_gem_object_get_dma_address(obj, page),
>offset, I915_CACHE_NONE, 0);
> -- 
> 2.22.0
> 
> ___
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Re: [Intel-gfx] [PATCH 3/3] drm/i915: Flush extra hard after writing relocations through the GTT

2019-08-01 Thread Chris Wilson
Quoting Kumar Valsan, Prathap (2019-08-01 21:33:44)
> On Tue, Jul 30, 2019 at 12:21:51PM +0100, Chris Wilson wrote:
> > Recently discovered in commit bdae33b8b82b ("drm/i915: Use maximum write
> > flush for pwrite_gtt") was that we needed to our full write barrier
> > before changing the GGTT PTE to ensure that our indirect writes through
> > the GTT landed before the PTE changed (and the writes end up in a
> > different page). That also applies to our GGTT relocation path.
> 
> Chris,
> 
> As i understand, changing the GGTT PTE also an indirect write. If so, isn't a 
> wmb()
> should be good enough.

Ha! If only that was true.
-Chris
___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Re: [Intel-gfx] [PATCH 3/3] drm/i915: Flush extra hard after writing relocations through the GTT

2019-08-01 Thread Kumar Valsan, Prathap
On Tue, Jul 30, 2019 at 12:21:51PM +0100, Chris Wilson wrote:
> Recently discovered in commit bdae33b8b82b ("drm/i915: Use maximum write
> flush for pwrite_gtt") was that we needed to our full write barrier
> before changing the GGTT PTE to ensure that our indirect writes through
> the GTT landed before the PTE changed (and the writes end up in a
> different page). That also applies to our GGTT relocation path.

Chris,

As i understand, changing the GGTT PTE also an indirect write. If so, isn't a 
wmb()
should be good enough.

Thanks,
Prathap
> 
> Signed-off-by: Chris Wilson 
> Cc: sta...@vger.kernel.org
> ---
>  drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 9 +
>  1 file changed, 5 insertions(+), 4 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 
> b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
> index 8a2047c4e7c3..01901dad33f7 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
> @@ -1019,11 +1019,12 @@ static void reloc_cache_reset(struct reloc_cache 
> *cache)
>   kunmap_atomic(vaddr);
>   i915_gem_object_finish_access((struct drm_i915_gem_object 
> *)cache->node.mm);
>   } else {
> - wmb();
> + struct i915_ggtt *ggtt = cache_to_ggtt(cache);
> +
> + intel_gt_flush_ggtt_writes(ggtt->vm.gt);
>   io_mapping_unmap_atomic((void __iomem *)vaddr);
> - if (cache->node.allocated) {
> - struct i915_ggtt *ggtt = cache_to_ggtt(cache);
>  
> + if (cache->node.allocated) {
>   ggtt->vm.clear_range(>vm,
>cache->node.start,
>cache->node.size);
> @@ -1078,6 +1079,7 @@ static void *reloc_iomap(struct drm_i915_gem_object 
> *obj,
>   void *vaddr;
>  
>   if (cache->vaddr) {
> + intel_gt_flush_ggtt_writes(ggtt->vm.gt);
>   io_mapping_unmap_atomic((void __force __iomem *) 
> unmask_page(cache->vaddr));
>   } else {
>   struct i915_vma *vma;
> @@ -1119,7 +1121,6 @@ static void *reloc_iomap(struct drm_i915_gem_object 
> *obj,
>  
>   offset = cache->node.start;
>   if (cache->node.allocated) {
> - wmb();
>   ggtt->vm.insert_page(>vm,
>i915_gem_object_get_dma_address(obj, page),
>offset, I915_CACHE_NONE, 0);
> -- 
> 2.22.0
> 
> ___
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

[Intel-gfx] [PATCH 3/3] drm/i915: Flush extra hard after writing relocations through the GTT

2019-07-30 Thread Chris Wilson
Recently discovered in commit bdae33b8b82b ("drm/i915: Use maximum write
flush for pwrite_gtt") was that we needed to our full write barrier
before changing the GGTT PTE to ensure that our indirect writes through
the GTT landed before the PTE changed (and the writes end up in a
different page). That also applies to our GGTT relocation path.

Signed-off-by: Chris Wilson 
Cc: sta...@vger.kernel.org
---
 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 9 +
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 
b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 8a2047c4e7c3..01901dad33f7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -1019,11 +1019,12 @@ static void reloc_cache_reset(struct reloc_cache *cache)
kunmap_atomic(vaddr);
i915_gem_object_finish_access((struct drm_i915_gem_object 
*)cache->node.mm);
} else {
-   wmb();
+   struct i915_ggtt *ggtt = cache_to_ggtt(cache);
+
+   intel_gt_flush_ggtt_writes(ggtt->vm.gt);
io_mapping_unmap_atomic((void __iomem *)vaddr);
-   if (cache->node.allocated) {
-   struct i915_ggtt *ggtt = cache_to_ggtt(cache);
 
+   if (cache->node.allocated) {
ggtt->vm.clear_range(>vm,
 cache->node.start,
 cache->node.size);
@@ -1078,6 +1079,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
void *vaddr;
 
if (cache->vaddr) {
+   intel_gt_flush_ggtt_writes(ggtt->vm.gt);
io_mapping_unmap_atomic((void __force __iomem *) 
unmask_page(cache->vaddr));
} else {
struct i915_vma *vma;
@@ -1119,7 +1121,6 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
 
offset = cache->node.start;
if (cache->node.allocated) {
-   wmb();
ggtt->vm.insert_page(>vm,
 i915_gem_object_get_dma_address(obj, page),
 offset, I915_CACHE_NONE, 0);
-- 
2.22.0

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx