Less IFETCH latency on misses. Shader code is write once read many,
so GTT doesn't make much sense anyway.

If it turns out to fragment the CPU visible VRAM too much, we can upload with 
SDMA.

Signed-off-by: Bas Nieuwenhuizen <ba...@google.com>
---
 src/amd/vulkan/radv_pipeline.c       | 2 +-
 src/amd/vulkan/radv_pipeline_cache.c | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/src/amd/vulkan/radv_pipeline.c b/src/amd/vulkan/radv_pipeline.c
index 723c32c4aaf..ce228df04a8 100644
--- a/src/amd/vulkan/radv_pipeline.c
+++ b/src/amd/vulkan/radv_pipeline.c
@@ -410,7 +410,7 @@ static void radv_fill_shader_variant(struct radv_device 
*device,
                S_00B848_FLOAT_MODE(variant->config.float_mode);
 
        variant->bo = device->ws->buffer_create(device->ws, binary->code_size, 
256,
-                                               RADEON_DOMAIN_GTT, 
RADEON_FLAG_CPU_ACCESS);
+                                               RADEON_DOMAIN_VRAM, 
RADEON_FLAG_CPU_ACCESS);
 
        void *ptr = device->ws->buffer_map(variant->bo);
        memcpy(ptr, binary->code, binary->code_size);
diff --git a/src/amd/vulkan/radv_pipeline_cache.c 
b/src/amd/vulkan/radv_pipeline_cache.c
index 7fc4e781ac3..703400b5573 100644
--- a/src/amd/vulkan/radv_pipeline_cache.c
+++ b/src/amd/vulkan/radv_pipeline_cache.c
@@ -171,7 +171,7 @@ radv_create_shader_variant_from_pipeline_cache(struct 
radv_device *device,
                variant->ref_count = 1;
 
                variant->bo = device->ws->buffer_create(device->ws, 
entry->code_size, 256,
-                                               RADEON_DOMAIN_GTT, 
RADEON_FLAG_CPU_ACCESS);
+                                               RADEON_DOMAIN_VRAM, 
RADEON_FLAG_CPU_ACCESS);
 
                void *ptr = device->ws->buffer_map(variant->bo);
                memcpy(ptr, entry->code, entry->code_size);
-- 
2.11.1

_______________________________________________
mesa-dev mailing list
mesa-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/mesa-dev

Reply via email to