Store it in the job rather than as local variables. No intended functional change. Needed to help separate the vm flush and emit logic.
Signed-off-by: Alex Deucher <[email protected]> --- drivers/gpu/drm/amd/amdgpu/amdgpu_job.h | 3 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 54 ++++++++++++------------- 2 files changed, 30 insertions(+), 27 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h index 21e1941ce356a..d53c13322a648 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h @@ -105,6 +105,9 @@ struct amdgpu_job { bool need_pipe_sync; u32 pipe_sync_seq; bool need_ctx_switch; + bool vm_flush_needed; + bool cleaner_shader_needed; + bool pasid_mapping_needed; uint32_t num_ibs; struct amdgpu_ib ibs[]; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index cea359f2084ca..e480a65dbdb1c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -774,42 +774,40 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) unsigned vmhub = ring->vm_hub; struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; struct amdgpu_vmid *id = &id_mgr->ids[job->vmid]; - bool spm_update_needed = job->spm_update_needed; - bool gds_switch_needed = ring->funcs->emit_gds_switch && - job->gds_switch_needed; - bool vm_flush_needed = job->vm_needs_flush; - bool cleaner_shader_needed = false; - bool pasid_mapping_needed = false; struct dma_fence *fence = NULL; unsigned int patch; int r; + job->gds_switch_needed = ring->funcs->emit_gds_switch && + job->gds_switch_needed; + job->vm_flush_needed = job->vm_needs_flush; + if (amdgpu_vmid_had_gpu_reset(adev, id)) { - gds_switch_needed = true; - vm_flush_needed = true; - pasid_mapping_needed = true; - spm_update_needed = true; + job->gds_switch_needed = true; + job->vm_flush_needed = true; + job->pasid_mapping_needed = true; + job->spm_update_needed = true; } mutex_lock(&id_mgr->lock); if (id->pasid != job->pasid || !id->pasid_mapping || !dma_fence_is_signaled(id->pasid_mapping)) - pasid_mapping_needed = true; + job->pasid_mapping_needed = true; mutex_unlock(&id_mgr->lock); - gds_switch_needed &= !!ring->funcs->emit_gds_switch; - vm_flush_needed &= !!ring->funcs->emit_vm_flush && - job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET; - pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping && + job->gds_switch_needed &= !!ring->funcs->emit_gds_switch; + job->vm_flush_needed &= !!ring->funcs->emit_vm_flush && + job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET; + job->pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping && ring->funcs->emit_wreg; - cleaner_shader_needed = job->run_cleaner_shader && + job->cleaner_shader_needed = job->run_cleaner_shader && adev->gfx.enable_cleaner_shader && ring->funcs->emit_cleaner_shader && job->base.s_fence && &job->base.s_fence->scheduled == isolation->spearhead; - if (!vm_flush_needed && !gds_switch_needed && !job->need_pipe_sync && - !cleaner_shader_needed) + if (!job->vm_flush_needed && !job->gds_switch_needed && !job->need_pipe_sync && + !job->cleaner_shader_needed) return 0; amdgpu_ring_ib_begin(ring); @@ -820,29 +818,31 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) if (job->need_pipe_sync) amdgpu_ring_emit_pipeline_sync(ring, job->pipe_sync_seq); - if (cleaner_shader_needed) + if (job->cleaner_shader_needed) ring->funcs->emit_cleaner_shader(ring); - if (vm_flush_needed) { + if (job->vm_flush_needed) { trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr); amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr); } - if (pasid_mapping_needed) + if (job->pasid_mapping_needed) amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid); - if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid) + if (job->spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid) adev->gfx.rlc.funcs->update_spm_vmid(adev, ring->xcc_id, ring, job->vmid); if (ring->funcs->emit_gds_switch && - gds_switch_needed) { + job->gds_switch_needed) { amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base, job->gds_size, job->gws_base, job->gws_size, job->oa_base, job->oa_size); } - if (vm_flush_needed || pasid_mapping_needed || cleaner_shader_needed) { + if (job->vm_flush_needed || + job->pasid_mapping_needed || + job->cleaner_shader_needed) { r = amdgpu_fence_emit(ring, job->hw_vm_fence, 0); if (r) return r; @@ -851,7 +851,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) dma_fence_get(fence); } - if (vm_flush_needed) { + if (job->vm_flush_needed) { mutex_lock(&id_mgr->lock); dma_fence_put(id->last_flush); id->last_flush = dma_fence_get(fence); @@ -860,7 +860,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) mutex_unlock(&id_mgr->lock); } - if (pasid_mapping_needed) { + if (job->pasid_mapping_needed) { mutex_lock(&id_mgr->lock); id->pasid = job->pasid; dma_fence_put(id->pasid_mapping); @@ -872,7 +872,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) * Make sure that all other submissions wait for the cleaner shader to * finish before we push them to the HW. */ - if (cleaner_shader_needed) { + if (job->cleaner_shader_needed) { trace_amdgpu_cleaner_shader(ring, fence); mutex_lock(&adev->enforce_isolation_mutex); dma_fence_put(isolation->spearhead); -- 2.52.0
