Instead of reserving a number of GTT pages for VCE 1.0 this
commit now uses amdgpu_gtt_mgr_alloc_entries to allocate
the pages when initializing vce 1.0.

While at it remove the "does the VCPU BO already have a
32-bit address" check as suggested by Timur.

This decouples vce init from gtt init.

Signed-off-by: Pierre-Eric Pelloux-Prayer <[email protected]>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c |  1 -
 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c     | 18 ------------
 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h     |  2 +-
 drivers/gpu/drm/amd/amdgpu/vce_v1_0.c       | 32 +++++++++++----------
 4 files changed, 18 insertions(+), 35 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index dd9b845d5783..f2e89fb4b666 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -332,7 +332,6 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, 
uint64_t gtt_size)
        ttm_resource_manager_init(man, &adev->mman.bdev, gtt_size);
 
        start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
-       start += amdgpu_vce_required_gart_pages(adev);
        size = (adev->gmc.gart_size >> PAGE_SHIFT) - start;
        drm_mm_init(&mgr->mm, start, size);
        spin_lock_init(&mgr->lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index a7d8f1ce6ac2..eb4a15db2ef2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -450,24 +450,6 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, 
struct drm_file *filp)
        }
 }
 
-/**
- * amdgpu_vce_required_gart_pages() - gets number of GART pages required by VCE
- *
- * @adev: amdgpu_device pointer
- *
- * Returns how many GART pages we need before GTT for the VCE IP block.
- * For VCE1, see vce_v1_0_ensure_vcpu_bo_32bit_addr for details.
- * For VCE2+, this is not needed so return zero.
- */
-u32 amdgpu_vce_required_gart_pages(struct amdgpu_device *adev)
-{
-       /* VCE IP block not added yet, so can't use amdgpu_ip_version */
-       if (adev->family == AMDGPU_FAMILY_SI)
-               return 512;
-
-       return 0;
-}
-
 /**
  * amdgpu_vce_get_create_msg - generate a VCE create msg
  *
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index 1c3464ce5037..a59d87e09004 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -52,6 +52,7 @@ struct amdgpu_vce {
        uint32_t                srbm_soft_reset;
        unsigned                num_rings;
        uint32_t                keyselect;
+       struct drm_mm_node      node;
 };
 
 int amdgpu_vce_early_init(struct amdgpu_device *adev);
@@ -61,7 +62,6 @@ int amdgpu_vce_entity_init(struct amdgpu_device *adev, struct 
amdgpu_ring *ring)
 int amdgpu_vce_suspend(struct amdgpu_device *adev);
 int amdgpu_vce_resume(struct amdgpu_device *adev);
 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file 
*filp);
-u32 amdgpu_vce_required_gart_pages(struct amdgpu_device *adev);
 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, struct amdgpu_job 
*job,
                             struct amdgpu_ib *ib);
 int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p,
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v1_0.c 
b/drivers/gpu/drm/amd/amdgpu/vce_v1_0.c
index 9ae424618556..bca34a30dbf3 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v1_0.c
@@ -47,11 +47,6 @@
 #define VCE_V1_0_DATA_SIZE     (7808 * (AMDGPU_MAX_VCE_HANDLES + 1))
 #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK  0x02
 
-#define VCE_V1_0_GART_PAGE_START \
-       (AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS)
-#define VCE_V1_0_GART_ADDR_START \
-       (VCE_V1_0_GART_PAGE_START * AMDGPU_GPU_PAGE_SIZE)
-
 static void vce_v1_0_set_ring_funcs(struct amdgpu_device *adev);
 static void vce_v1_0_set_irq_funcs(struct amdgpu_device *adev);
 
@@ -541,21 +536,24 @@ static int vce_v1_0_ensure_vcpu_bo_32bit_addr(struct 
amdgpu_device *adev)
        u64 num_pages = ALIGN(bo_size, AMDGPU_GPU_PAGE_SIZE) / 
AMDGPU_GPU_PAGE_SIZE;
        u64 pa = amdgpu_gmc_vram_pa(adev, adev->vce.vcpu_bo);
        u64 flags = AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | 
AMDGPU_PTE_VALID;
+       u64 vce_gart_start;
+       int r;
 
-       /*
-        * Check if the VCPU BO already has a 32-bit address.
-        * Eg. if MC is configured to put VRAM in the low address range.
-        */
-       if (gpu_addr <= max_vcpu_bo_addr)
-               return 0;
+       r = amdgpu_gtt_mgr_alloc_entries(&adev->mman.gtt_mgr,
+                                        &adev->vce.node, num_pages,
+                                        DRM_MM_INSERT_LOW);
+       if (r)
+               return r;
+
+       vce_gart_start = adev->vce.node.start * AMDGPU_GPU_PAGE_SIZE;
 
        /* Check if we can map the VCPU BO in GART to a 32-bit address. */
-       if (adev->gmc.gart_start + VCE_V1_0_GART_ADDR_START > max_vcpu_bo_addr)
+       if (adev->gmc.gart_start + vce_gart_start > max_vcpu_bo_addr)
                return -EINVAL;
 
-       amdgpu_gart_map_vram_range(adev, pa, VCE_V1_0_GART_PAGE_START,
+       amdgpu_gart_map_vram_range(adev, pa, adev->vce.node.start,
                                   num_pages, flags, adev->gart.ptr);
-       adev->vce.gpu_addr = adev->gmc.gart_start + VCE_V1_0_GART_ADDR_START;
+       adev->vce.gpu_addr = adev->gmc.gart_start + vce_gart_start;
        if (adev->vce.gpu_addr > max_vcpu_bo_addr)
                return -EINVAL;
 
@@ -610,7 +608,11 @@ static int vce_v1_0_sw_fini(struct amdgpu_ip_block 
*ip_block)
        if (r)
                return r;
 
-       return amdgpu_vce_sw_fini(adev);
+       r = amdgpu_vce_sw_fini(adev);
+
+       amdgpu_gtt_mgr_free_entries(&adev->mman.gtt_mgr, &adev->vce.node);
+
+       return r;
 }
 
 /**
-- 
2.43.0

Reply via email to