Currently it works well for 32-bit address. But it will cause that the upper 32-bit address is incorrect if the 48-bit address buffer is allocated.
Signed-off-by: Zhao Yakui <yakui.z...@intel.com> --- src/gen75_vpp_vebox.c | 39 +++++++------------- src/gen8_mfc.c | 73 ++++++++++++++++--------------------- src/gen8_mfd.c | 89 +++++++++++++++++++++++----------------------- src/gen8_post_processing.c | 3 +- src/gen8_vme.c | 9 ++--- src/gen9_mfc_hevc.c | 13 +++---- src/gen9_mfd.c | 4 +-- src/gen9_vme.c | 12 +++---- 8 files changed, 101 insertions(+), 141 deletions(-) diff --git a/src/gen75_vpp_vebox.c b/src/gen75_vpp_vebox.c index eee8e76..f6f541a 100644 --- a/src/gen75_vpp_vebox.c +++ b/src/gen75_vpp_vebox.c @@ -1882,29 +1882,22 @@ void bdw_veb_state_command(VADriverContextP ctx, struct intel_vebox_context *pro 0 << 1 | // ColorGamutCompressionEnable 0 ) ; // ColorGamutExpansionEnable. - OUT_RELOC(batch, + OUT_RELOC64(batch, proc_ctx->dndi_state_table.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0); - OUT_VEB_BATCH(batch, 0); - - OUT_RELOC(batch, + OUT_RELOC64(batch, proc_ctx->iecp_state_table.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0); - OUT_VEB_BATCH(batch, 0); - - OUT_RELOC(batch, + OUT_RELOC64(batch, proc_ctx->gamut_state_table.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0); - OUT_VEB_BATCH(batch, 0); - - OUT_RELOC(batch, + OUT_RELOC64(batch, proc_ctx->vertex_state_table.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0); - OUT_VEB_BATCH(batch, 0); OUT_VEB_BATCH(batch, 0);/*caputre pipe state pointer*/ OUT_VEB_BATCH(batch, 0); @@ -1927,45 +1920,37 @@ void bdw_veb_dndi_iecp_command(VADriverContextP ctx, struct intel_vebox_context OUT_VEB_BATCH(batch, VEB_DNDI_IECP_STATE | (0x14 - 2));//DWord 0 OUT_VEB_BATCH(batch, (width64 - 1)); - OUT_RELOC(batch, + OUT_RELOC64(batch, proc_ctx->frame_store[FRAME_IN_CURRENT].obj_surface->bo, I915_GEM_DOMAIN_RENDER, 0, frame_ctrl_bits);//DWord 2 - OUT_VEB_BATCH(batch,0);//DWord 3 - OUT_RELOC(batch, + OUT_RELOC64(batch, proc_ctx->frame_store[FRAME_IN_PREVIOUS].obj_surface->bo, I915_GEM_DOMAIN_RENDER, 0, frame_ctrl_bits);//DWord 4 - OUT_VEB_BATCH(batch,0);//DWord 5 - OUT_RELOC(batch, + OUT_RELOC64(batch, proc_ctx->frame_store[FRAME_IN_STMM].obj_surface->bo, I915_GEM_DOMAIN_RENDER, 0, frame_ctrl_bits);//DWord 6 - OUT_VEB_BATCH(batch,0);//DWord 7 - OUT_RELOC(batch, + OUT_RELOC64(batch, proc_ctx->frame_store[FRAME_OUT_STMM].obj_surface->bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);//DWord 8 - OUT_VEB_BATCH(batch,0);//DWord 9 - OUT_RELOC(batch, + OUT_RELOC64(batch, proc_ctx->frame_store[FRAME_OUT_CURRENT_DN].obj_surface->bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);//DWord 10 - OUT_VEB_BATCH(batch,0);//DWord 11 - OUT_RELOC(batch, + OUT_RELOC64(batch, proc_ctx->frame_store[FRAME_OUT_CURRENT].obj_surface->bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);//DWord 12 - OUT_VEB_BATCH(batch,0);//DWord 13 - OUT_RELOC(batch, + OUT_RELOC64(batch, proc_ctx->frame_store[FRAME_OUT_PREVIOUS].obj_surface->bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);//DWord 14 - OUT_VEB_BATCH(batch,0);//DWord 15 - OUT_RELOC(batch, + OUT_RELOC64(batch, proc_ctx->frame_store[FRAME_OUT_STATISTIC].obj_surface->bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, frame_ctrl_bits);//DWord 16 - OUT_VEB_BATCH(batch,0);//DWord 17 OUT_VEB_BATCH(batch,0);//DWord 18 OUT_VEB_BATCH(batch,0);//DWord 19 diff --git a/src/gen8_mfc.c b/src/gen8_mfc.c index 9b50f9a..bac1e2f 100644 --- a/src/gen8_mfc.c +++ b/src/gen8_mfc.c @@ -249,11 +249,10 @@ gen8_mfc_ind_obj_base_addr_state(VADriverContextP ctx, /* the DW4-5 is the MFX upper bound */ if (encoder_context->codec == CODEC_VP8) { - OUT_BCS_RELOC(batch, + OUT_BCS_RELOC64(batch, mfc_context->mfc_indirect_pak_bse_object.bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, mfc_context->mfc_indirect_pak_bse_object.end_offset); - OUT_BCS_BATCH(batch, 0); } else { OUT_BCS_BATCH(batch, 0); OUT_BCS_BATCH(batch, 0); @@ -262,11 +261,9 @@ gen8_mfc_ind_obj_base_addr_state(VADriverContextP ctx, if(encoder_context->codec != CODEC_JPEG) { vme_size = vme_context->vme_output.size_block * vme_context->vme_output.num_blocks; /* the DW6-10 is for MFX Indirect MV Object Base Address */ - OUT_BCS_RELOC(batch, vme_context->vme_output.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0); - OUT_BCS_BATCH(batch, 0); + OUT_BCS_RELOC64(batch, vme_context->vme_output.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0); OUT_BCS_BATCH(batch, i965->intel.mocs_state); - OUT_BCS_RELOC(batch, vme_context->vme_output.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, vme_size); - OUT_BCS_BATCH(batch, 0); + OUT_BCS_RELOC64(batch, vme_context->vme_output.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, vme_size); } else { /* No VME for JPEG */ OUT_BCS_BATCH(batch, 0); @@ -292,18 +289,16 @@ gen8_mfc_ind_obj_base_addr_state(VADriverContextP ctx, /* the DW21-25 is for MFC Indirect PAK-BSE Object Base Address for Encoder*/ bse_offset = (encoder_context->codec == CODEC_JPEG) ? (mfc_context->mfc_indirect_pak_bse_object.offset) : 0; - OUT_BCS_RELOC(batch, + OUT_BCS_RELOC64(batch, mfc_context->mfc_indirect_pak_bse_object.bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, bse_offset); - OUT_BCS_BATCH(batch, 0); OUT_BCS_BATCH(batch, i965->intel.mocs_state); - OUT_BCS_RELOC(batch, + OUT_BCS_RELOC64(batch, mfc_context->mfc_indirect_pak_bse_object.bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, mfc_context->mfc_indirect_pak_bse_object.end_offset); - OUT_BCS_BATCH(batch, 0); ADVANCE_BCS_BATCH(batch); } @@ -654,79 +649,76 @@ gen8_mfc_pipe_buf_addr_state(VADriverContextP ctx, /* the DW1-3 is for pre_deblocking */ if (mfc_context->pre_deblocking_output.bo) - OUT_BCS_RELOC(batch, mfc_context->pre_deblocking_output.bo, + OUT_BCS_RELOC64(batch, mfc_context->pre_deblocking_output.bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0); - else + else { + OUT_BCS_BATCH(batch, 0); OUT_BCS_BATCH(batch, 0); /* pre output addr */ - OUT_BCS_BATCH(batch, 0); + } OUT_BCS_BATCH(batch, i965->intel.mocs_state); /* the DW4-6 is for the post_deblocking */ if (mfc_context->post_deblocking_output.bo) - OUT_BCS_RELOC(batch, mfc_context->post_deblocking_output.bo, + OUT_BCS_RELOC64(batch, mfc_context->post_deblocking_output.bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0); /* post output addr */ - else + else { OUT_BCS_BATCH(batch, 0); + OUT_BCS_BATCH(batch, 0); + } - OUT_BCS_BATCH(batch, 0); OUT_BCS_BATCH(batch, i965->intel.mocs_state); /* the DW7-9 is for the uncompressed_picture */ - OUT_BCS_RELOC(batch, mfc_context->uncompressed_picture_source.bo, + OUT_BCS_RELOC64(batch, mfc_context->uncompressed_picture_source.bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0); /* uncompressed data */ - OUT_BCS_BATCH(batch, 0); OUT_BCS_BATCH(batch, i965->intel.mocs_state); /* the DW10-12 is for the mb status */ - OUT_BCS_RELOC(batch, mfc_context->macroblock_status_buffer.bo, + OUT_BCS_RELOC64(batch, mfc_context->macroblock_status_buffer.bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0); /* StreamOut data*/ - OUT_BCS_BATCH(batch, 0); OUT_BCS_BATCH(batch, i965->intel.mocs_state); /* the DW13-15 is for the intra_row_store_scratch */ - OUT_BCS_RELOC(batch, mfc_context->intra_row_store_scratch_buffer.bo, + OUT_BCS_RELOC64(batch, mfc_context->intra_row_store_scratch_buffer.bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0); - OUT_BCS_BATCH(batch, 0); OUT_BCS_BATCH(batch, i965->intel.mocs_state); /* the DW16-18 is for the deblocking filter */ - OUT_BCS_RELOC(batch, mfc_context->deblocking_filter_row_store_scratch_buffer.bo, + OUT_BCS_RELOC64(batch, mfc_context->deblocking_filter_row_store_scratch_buffer.bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0); - OUT_BCS_BATCH(batch, 0); OUT_BCS_BATCH(batch, i965->intel.mocs_state); /* the DW 19-50 is for Reference pictures*/ for (i = 0; i < ARRAY_ELEMS(mfc_context->reference_surfaces); i++) { if ( mfc_context->reference_surfaces[i].bo != NULL) { - OUT_BCS_RELOC(batch, mfc_context->reference_surfaces[i].bo, + OUT_BCS_RELOC64(batch, mfc_context->reference_surfaces[i].bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0); } else { OUT_BCS_BATCH(batch, 0); + OUT_BCS_BATCH(batch, 0); } - OUT_BCS_BATCH(batch, 0); } OUT_BCS_BATCH(batch, i965->intel.mocs_state); /* The DW 52-54 is for the MB status buffer */ - OUT_BCS_RELOC(batch, mfc_context->macroblock_status_buffer.bo, + OUT_BCS_RELOC64(batch, mfc_context->macroblock_status_buffer.bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0); /* Macroblock status buffer*/ - OUT_BCS_BATCH(batch, 0); OUT_BCS_BATCH(batch, i965->intel.mocs_state); /* the DW 55-57 is the ILDB buffer */ @@ -760,10 +752,9 @@ gen8_mfc_avc_directmode_state(VADriverContextP ctx, /* the DW1-32 is for the direct MV for reference */ for(i = 0; i < NUM_MFC_DMV_BUFFERS - 2; i += 2) { if ( mfc_context->direct_mv_buffers[i].bo != NULL) { - OUT_BCS_RELOC(batch, mfc_context->direct_mv_buffers[i].bo, + OUT_BCS_RELOC64(batch, mfc_context->direct_mv_buffers[i].bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0); - OUT_BCS_BATCH(batch, 0); } else { OUT_BCS_BATCH(batch, 0); OUT_BCS_BATCH(batch, 0); @@ -773,11 +764,10 @@ gen8_mfc_avc_directmode_state(VADriverContextP ctx, OUT_BCS_BATCH(batch, i965->intel.mocs_state); /* the DW34-36 is the MV for the current reference */ - OUT_BCS_RELOC(batch, mfc_context->direct_mv_buffers[NUM_MFC_DMV_BUFFERS - 2].bo, + OUT_BCS_RELOC64(batch, mfc_context->direct_mv_buffers[NUM_MFC_DMV_BUFFERS - 2].bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0); - OUT_BCS_BATCH(batch, 0); OUT_BCS_BATCH(batch, i965->intel.mocs_state); /* POL list */ @@ -802,10 +792,9 @@ gen8_mfc_bsp_buf_base_addr_state(VADriverContextP ctx, BEGIN_BCS_BATCH(batch, 10); OUT_BCS_BATCH(batch, MFX_BSP_BUF_BASE_ADDR_STATE | (10 - 2)); - OUT_BCS_RELOC(batch, mfc_context->bsd_mpc_row_store_scratch_buffer.bo, + OUT_BCS_RELOC64(batch, mfc_context->bsd_mpc_row_store_scratch_buffer.bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0); - OUT_BCS_BATCH(batch, 0); OUT_BCS_BATCH(batch, i965->intel.mocs_state); /* the DW4-6 is for MPR Row Store Scratch Buffer Base Address */ @@ -1707,11 +1696,10 @@ gen8_mfc_avc_pipeline_programing(VADriverContextP ctx, BEGIN_BCS_BATCH(batch, 3); OUT_BCS_BATCH(batch, MI_BATCH_BUFFER_START | (1 << 8) | (1 << 0)); - OUT_BCS_RELOC(batch, + OUT_BCS_RELOC64(batch, slice_batch_bo, I915_GEM_DOMAIN_COMMAND, 0, 0); - OUT_BCS_BATCH(batch, 0); ADVANCE_BCS_BATCH(batch); // end programing @@ -2393,12 +2381,11 @@ gen8_mfc_mpeg2_pipeline_programing(VADriverContextP ctx, BEGIN_BCS_BATCH(batch, 4); OUT_BCS_BATCH(batch, MI_BATCH_BUFFER_START | (1 << 8) | (1 << 0)); - OUT_BCS_RELOC(batch, + OUT_BCS_RELOC64(batch, slice_batch_bo, I915_GEM_DOMAIN_COMMAND, 0, 0); OUT_BCS_BATCH(batch, 0); - OUT_BCS_BATCH(batch, 0); ADVANCE_BCS_BATCH(batch); // end programing @@ -4060,13 +4047,14 @@ gen8_mfc_vp8_pic_state(VADriverContextP ctx, #define OUT_VP8_BUFFER(bo, offset) \ if (bo) \ - OUT_BCS_RELOC(batch, \ + OUT_BCS_RELOC64(batch, \ bo, \ I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, \ offset); \ - else \ + else { \ + OUT_BCS_BATCH(batch, 0); \ OUT_BCS_BATCH(batch, 0); \ - OUT_BCS_BATCH(batch, 0); \ + } \ OUT_BCS_BATCH(batch, i965->intel.mocs_state); static void @@ -4370,12 +4358,11 @@ gen8_mfc_vp8_pipeline_programing(VADriverContextP ctx, BEGIN_BCS_BATCH(batch, 4); OUT_BCS_BATCH(batch, MI_BATCH_BUFFER_START | (1 << 8) | (1 << 0)); - OUT_BCS_RELOC(batch, + OUT_BCS_RELOC64(batch, slice_batch_bo, I915_GEM_DOMAIN_COMMAND, 0, 0); OUT_BCS_BATCH(batch, 0); - OUT_BCS_BATCH(batch, 0); ADVANCE_BCS_BATCH(batch); // end programing diff --git a/src/gen8_mfd.c b/src/gen8_mfd.c index 61999b3..f603c46 100644 --- a/src/gen8_mfd.c +++ b/src/gen8_mfd.c @@ -187,24 +187,26 @@ gen8_mfd_pipe_buf_addr_state(VADriverContextP ctx, OUT_BCS_BATCH(batch, MFX_PIPE_BUF_ADDR_STATE | (61 - 2)); /* Pre-deblock 1-3 */ if (gen7_mfd_context->pre_deblocking_output.valid) - OUT_BCS_RELOC(batch, gen7_mfd_context->pre_deblocking_output.bo, + OUT_BCS_RELOC64(batch, gen7_mfd_context->pre_deblocking_output.bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0); - else + else { OUT_BCS_BATCH(batch, 0); OUT_BCS_BATCH(batch, 0); + } OUT_BCS_BATCH(batch, i965->intel.mocs_state); /* Post-debloing 4-6 */ if (gen7_mfd_context->post_deblocking_output.valid) - OUT_BCS_RELOC(batch, gen7_mfd_context->post_deblocking_output.bo, + OUT_BCS_RELOC64(batch, gen7_mfd_context->post_deblocking_output.bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0); - else + else { OUT_BCS_BATCH(batch, 0); OUT_BCS_BATCH(batch, 0); + } OUT_BCS_BATCH(batch, i965->intel.mocs_state); /* uncompressed-video & stream out 7-12 */ @@ -217,23 +219,25 @@ gen8_mfd_pipe_buf_addr_state(VADriverContextP ctx, /* intra row-store scratch 13-15 */ if (gen7_mfd_context->intra_row_store_scratch_buffer.valid) - OUT_BCS_RELOC(batch, gen7_mfd_context->intra_row_store_scratch_buffer.bo, + OUT_BCS_RELOC64(batch, gen7_mfd_context->intra_row_store_scratch_buffer.bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0); - else + else { OUT_BCS_BATCH(batch, 0); OUT_BCS_BATCH(batch, 0); + } OUT_BCS_BATCH(batch, i965->intel.mocs_state); /* deblocking-filter-row-store 16-18 */ if (gen7_mfd_context->deblocking_filter_row_store_scratch_buffer.valid) - OUT_BCS_RELOC(batch, gen7_mfd_context->deblocking_filter_row_store_scratch_buffer.bo, + OUT_BCS_RELOC64(batch, gen7_mfd_context->deblocking_filter_row_store_scratch_buffer.bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0); - else + else { OUT_BCS_BATCH(batch, 0); OUT_BCS_BATCH(batch, 0); + } OUT_BCS_BATCH(batch, i965->intel.mocs_state); @@ -246,14 +250,14 @@ gen8_mfd_pipe_buf_addr_state(VADriverContextP ctx, gen7_mfd_context->reference_surface[i].obj_surface->bo) { obj_surface = gen7_mfd_context->reference_surface[i].obj_surface; - OUT_BCS_RELOC(batch, obj_surface->bo, + OUT_BCS_RELOC64(batch, obj_surface->bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0); } else { OUT_BCS_BATCH(batch, 0); + OUT_BCS_BATCH(batch, 0); } - OUT_BCS_BATCH(batch, 0); } /* reference property 51 */ @@ -287,8 +291,7 @@ gen8_mfd_ind_obj_base_addr_state(VADriverContextP ctx, BEGIN_BCS_BATCH(batch, 26); OUT_BCS_BATCH(batch, MFX_IND_OBJ_BASE_ADDR_STATE | (26 - 2)); /* MFX In BS 1-5 */ - OUT_BCS_RELOC(batch, slice_data_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0); /* MFX Indirect Bitstream Object Base Address */ - OUT_BCS_BATCH(batch, 0); + OUT_BCS_RELOC64(batch, slice_data_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0); /* MFX Indirect Bitstream Object Base Address */ OUT_BCS_BATCH(batch, i965->intel.mocs_state); /* Upper bound 4-5 */ OUT_BCS_BATCH(batch, 0); @@ -338,33 +341,36 @@ gen8_mfd_bsp_buf_base_addr_state(VADriverContextP ctx, OUT_BCS_BATCH(batch, MFX_BSP_BUF_BASE_ADDR_STATE | (10 - 2)); if (gen7_mfd_context->bsd_mpc_row_store_scratch_buffer.valid) - OUT_BCS_RELOC(batch, gen7_mfd_context->bsd_mpc_row_store_scratch_buffer.bo, + OUT_BCS_RELOC64(batch, gen7_mfd_context->bsd_mpc_row_store_scratch_buffer.bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0); - else + else { OUT_BCS_BATCH(batch, 0); + OUT_BCS_BATCH(batch, 0); + } - OUT_BCS_BATCH(batch, 0); OUT_BCS_BATCH(batch, i965->intel.mocs_state); /* MPR Row Store Scratch buffer 4-6 */ if (gen7_mfd_context->mpr_row_store_scratch_buffer.valid) - OUT_BCS_RELOC(batch, gen7_mfd_context->mpr_row_store_scratch_buffer.bo, + OUT_BCS_RELOC64(batch, gen7_mfd_context->mpr_row_store_scratch_buffer.bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0); - else + else { + OUT_BCS_BATCH(batch, 0); OUT_BCS_BATCH(batch, 0); + } - OUT_BCS_BATCH(batch, 0); OUT_BCS_BATCH(batch, i965->intel.mocs_state); /* Bitplane 7-9 */ if (gen7_mfd_context->bitplane_read_buffer.valid) - OUT_BCS_RELOC(batch, gen7_mfd_context->bitplane_read_buffer.bo, + OUT_BCS_RELOC64(batch, gen7_mfd_context->bitplane_read_buffer.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0); - else + else { OUT_BCS_BATCH(batch, 0); - OUT_BCS_BATCH(batch, 0); + OUT_BCS_BATCH(batch, 0); + } OUT_BCS_BATCH(batch, i965->intel.mocs_state); ADVANCE_BCS_BATCH(batch); } @@ -535,10 +541,9 @@ gen8_mfd_avc_directmode_state(VADriverContextP ctx, obj_surface = gen7_mfd_context->reference_surface[i].obj_surface; gen7_avc_surface = obj_surface->private_data; - OUT_BCS_RELOC(batch, gen7_avc_surface->dmv_top, + OUT_BCS_RELOC64(batch, gen7_avc_surface->dmv_top, I915_GEM_DOMAIN_INSTRUCTION, 0, 0); - OUT_BCS_BATCH(batch, 0); } else { OUT_BCS_BATCH(batch, 0); OUT_BCS_BATCH(batch, 0); @@ -553,11 +558,10 @@ gen8_mfd_avc_directmode_state(VADriverContextP ctx, assert(obj_surface->bo && obj_surface->private_data); gen7_avc_surface = obj_surface->private_data; - OUT_BCS_RELOC(batch, gen7_avc_surface->dmv_top, + OUT_BCS_RELOC64(batch, gen7_avc_surface->dmv_top, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0); - OUT_BCS_BATCH(batch, 0); OUT_BCS_BATCH(batch, i965->intel.mocs_state); /* POC List */ @@ -1740,23 +1744,25 @@ gen8_mfd_vc1_directmode_state(VADriverContextP ctx, OUT_BCS_BATCH(batch, MFX_VC1_DIRECTMODE_STATE | (7 - 2)); if (dmv_write_buffer) - OUT_BCS_RELOC(batch, dmv_write_buffer, + OUT_BCS_RELOC64(batch, dmv_write_buffer, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0); - else + else { + OUT_BCS_BATCH(batch, 0); OUT_BCS_BATCH(batch, 0); + } - OUT_BCS_BATCH(batch, 0); OUT_BCS_BATCH(batch, i965->intel.mocs_state); if (dmv_read_buffer) - OUT_BCS_RELOC(batch, dmv_read_buffer, + OUT_BCS_RELOC64(batch, dmv_read_buffer, I915_GEM_DOMAIN_INSTRUCTION, 0, 0); - else + else { + OUT_BCS_BATCH(batch, 0); OUT_BCS_BATCH(batch, 0); + } - OUT_BCS_BATCH(batch, 0); OUT_BCS_BATCH(batch, i965->intel.mocs_state); ADVANCE_BCS_BATCH(batch); @@ -2311,11 +2317,10 @@ gen8_jpeg_wa_pipe_buf_addr_state(VADriverContextP ctx, BEGIN_BCS_BATCH(batch, 61); OUT_BCS_BATCH(batch, MFX_PIPE_BUF_ADDR_STATE | (61 - 2)); - OUT_BCS_RELOC(batch, + OUT_BCS_RELOC64(batch, obj_surface->bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0); - OUT_BCS_BATCH(batch, 0); OUT_BCS_BATCH(batch, i965->intel.mocs_state); @@ -2332,11 +2337,10 @@ gen8_jpeg_wa_pipe_buf_addr_state(VADriverContextP ctx, OUT_BCS_BATCH(batch, 0); /* the DW 13-15 is for intra row store scratch */ - OUT_BCS_RELOC(batch, + OUT_BCS_RELOC64(batch, intra_bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0); - OUT_BCS_BATCH(batch, 0); OUT_BCS_BATCH(batch, i965->intel.mocs_state); @@ -2390,19 +2394,17 @@ gen8_jpeg_wa_bsp_buf_base_addr_state(VADriverContextP ctx, BEGIN_BCS_BATCH(batch, 10); OUT_BCS_BATCH(batch, MFX_BSP_BUF_BASE_ADDR_STATE | (10 - 2)); - OUT_BCS_RELOC(batch, + OUT_BCS_RELOC64(batch, bsd_mpc_bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0); - OUT_BCS_BATCH(batch, 0); OUT_BCS_BATCH(batch, i965->intel.mocs_state); - OUT_BCS_RELOC(batch, + OUT_BCS_RELOC64(batch, mpr_bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0); - OUT_BCS_BATCH(batch, 0); OUT_BCS_BATCH(batch, i965->intel.mocs_state); OUT_BCS_BATCH(batch, 0); @@ -2513,11 +2515,10 @@ gen8_jpeg_wa_ind_obj_base_addr_state(VADriverContextP ctx, BEGIN_BCS_BATCH(batch, 11); OUT_BCS_BATCH(batch, MFX_IND_OBJ_BASE_ADDR_STATE | (11 - 2)); - OUT_BCS_RELOC(batch, + OUT_BCS_RELOC64(batch, gen7_mfd_context->jpeg_wa_slice_data_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0); - OUT_BCS_BATCH(batch, 0); OUT_BCS_BATCH(batch, i965->intel.mocs_state); OUT_BCS_BATCH(batch, 0); OUT_BCS_BATCH(batch, 0); /* ignore for VLD mode */ @@ -2899,10 +2900,9 @@ gen8_mfd_vp8_pic_state(VADriverContextP ctx, /* CoeffProbability table for non-key frame, DW16-DW18 */ if (probs_bo) { - OUT_BCS_RELOC(batch, probs_bo, + OUT_BCS_RELOC64(batch, probs_bo, 0, I915_GEM_DOMAIN_INSTRUCTION, 0); - OUT_BCS_BATCH(batch, 0); OUT_BCS_BATCH(batch, i965->intel.mocs_state); } else { OUT_BCS_BATCH(batch, 0); @@ -2957,10 +2957,9 @@ gen8_mfd_vp8_pic_state(VADriverContextP ctx, /* segmentation id stream base address, DW35-DW37 */ if (enable_segmentation) { - OUT_BCS_RELOC(batch, gen7_mfd_context->segmentation_buffer.bo, + OUT_BCS_RELOC64(batch, gen7_mfd_context->segmentation_buffer.bo, 0, I915_GEM_DOMAIN_INSTRUCTION, 0); - OUT_BCS_BATCH(batch, 0); OUT_BCS_BATCH(batch, i965->intel.mocs_state); } else { diff --git a/src/gen8_post_processing.c b/src/gen8_post_processing.c index db15894..35e46f9 100644 --- a/src/gen8_post_processing.c +++ b/src/gen8_post_processing.c @@ -1481,9 +1481,8 @@ gen8_pp_object_walker(VADriverContextP ctx, BEGIN_BATCH(batch, 3); OUT_BATCH(batch, MI_BATCH_BUFFER_START | (1 << 8) | (1 << 0)); - OUT_RELOC(batch, command_buffer, + OUT_RELOC64(batch, command_buffer, I915_GEM_DOMAIN_COMMAND, 0, 0); - OUT_BATCH(batch, 0); ADVANCE_BATCH(batch); dri_bo_unreference(command_buffer); diff --git a/src/gen8_vme.c b/src/gen8_vme.c index 7a9ed6b..65b8e25 100644 --- a/src/gen8_vme.c +++ b/src/gen8_vme.c @@ -719,11 +719,10 @@ static void gen8_vme_pipeline_programing(VADriverContextP ctx, gen8_gpe_pipeline_setup(ctx, &vme_context->gpe_context, batch); BEGIN_BATCH(batch, 3); OUT_BATCH(batch, MI_BATCH_BUFFER_START | (1 << 8) | (1 << 0)); - OUT_RELOC(batch, + OUT_RELOC64(batch, vme_context->vme_batchbuffer.bo, I915_GEM_DOMAIN_COMMAND, 0, 0); - OUT_BATCH(batch, 0); ADVANCE_BATCH(batch); intel_batchbuffer_end_atomic(batch); @@ -1110,12 +1109,11 @@ gen8_vme_mpeg2_pipeline_programing(VADriverContextP ctx, gen8_gpe_pipeline_setup(ctx, &vme_context->gpe_context, batch); BEGIN_BATCH(batch, 4); OUT_BATCH(batch, MI_BATCH_BUFFER_START | (1 << 8) | (1 << 0)); - OUT_RELOC(batch, + OUT_RELOC64(batch, vme_context->vme_batchbuffer.bo, I915_GEM_DOMAIN_COMMAND, 0, 0); OUT_BATCH(batch, 0); - OUT_BATCH(batch, 0); ADVANCE_BATCH(batch); intel_batchbuffer_end_atomic(batch); @@ -1250,12 +1248,11 @@ gen8_vme_vp8_pipeline_programing(VADriverContextP ctx, gen8_gpe_pipeline_setup(ctx, &vme_context->gpe_context, batch); BEGIN_BATCH(batch, 4); OUT_BATCH(batch, MI_BATCH_BUFFER_START | (1 << 8) | (1 << 0)); - OUT_RELOC(batch, + OUT_RELOC64(batch, vme_context->vme_batchbuffer.bo, I915_GEM_DOMAIN_COMMAND, 0, 0); OUT_BATCH(batch, 0); - OUT_BATCH(batch, 0); ADVANCE_BATCH(batch); intel_batchbuffer_end_atomic(batch); diff --git a/src/gen9_mfc_hevc.c b/src/gen9_mfc_hevc.c index 4234cf7..ea22aed 100644 --- a/src/gen9_mfc_hevc.c +++ b/src/gen9_mfc_hevc.c @@ -83,15 +83,15 @@ typedef enum _gen6_brc_status { #define OUT_BUFFER_X(buf_bo, is_target, ma) do { \ if (buf_bo) { \ - OUT_BCS_RELOC(batch, \ + OUT_BCS_RELOC64(batch, \ buf_bo, \ I915_GEM_DOMAIN_INSTRUCTION, \ is_target ? I915_GEM_DOMAIN_INSTRUCTION : 0, \ 0); \ } else { \ OUT_BCS_BATCH(batch, 0); \ + OUT_BCS_BATCH(batch, 0); \ } \ - OUT_BCS_BATCH(batch, 0); \ if (ma) \ OUT_BCS_BATCH(batch, i965->intel.mocs_state); \ } while (0) @@ -330,17 +330,15 @@ gen9_hcpe_ind_obj_base_addr_state(VADriverContextP ctx, OUT_BUFFER_NMA_REFERENCE(NULL); /* DW 4..5, Upper Bound */ OUT_BUFFER_MA_TARGET(mfc_context->hcp_indirect_cu_object.bo); /* DW 6..8, CU */ /* DW 9..11, PAK-BSE */ - OUT_BCS_RELOC(batch, + OUT_BCS_RELOC64(batch, mfc_context->hcp_indirect_pak_bse_object.bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, mfc_context->hcp_indirect_pak_bse_object.offset); - OUT_BCS_BATCH(batch, 0); OUT_BCS_BATCH(batch, i965->intel.mocs_state); - OUT_BCS_RELOC(batch, + OUT_BCS_RELOC64(batch, mfc_context->hcp_indirect_pak_bse_object.bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, mfc_context->hcp_indirect_pak_bse_object.end_offset); - OUT_BCS_BATCH(batch, 0); ADVANCE_BCS_BATCH(batch); } @@ -1966,11 +1964,10 @@ gen9_hcpe_hevc_pipeline_programing(VADriverContextP ctx, BEGIN_BCS_BATCH(batch, 3); OUT_BCS_BATCH(batch, MI_BATCH_BUFFER_START | (1 << 8) | (1 << 0)); - OUT_BCS_RELOC(batch, + OUT_BCS_RELOC64(batch, slice_batch_bo, I915_GEM_DOMAIN_COMMAND, 0, 0); - OUT_BCS_BATCH(batch, 0); ADVANCE_BCS_BATCH(batch); // end programing diff --git a/src/gen9_mfd.c b/src/gen9_mfd.c index 6c4435d..fed1bc1 100644 --- a/src/gen9_mfd.c +++ b/src/gen9_mfd.c @@ -43,15 +43,15 @@ #define OUT_BUFFER(buf_bo, is_target, ma) do { \ if (buf_bo) { \ - OUT_BCS_RELOC(batch, \ + OUT_BCS_RELOC64(batch, \ buf_bo, \ I915_GEM_DOMAIN_RENDER, \ is_target ? I915_GEM_DOMAIN_RENDER : 0, \ 0); \ } else { \ OUT_BCS_BATCH(batch, 0); \ + OUT_BCS_BATCH(batch, 0); \ } \ - OUT_BCS_BATCH(batch, 0); \ if (ma) \ OUT_BCS_BATCH(batch, i965->intel.mocs_state); \ } while (0) diff --git a/src/gen9_vme.c b/src/gen9_vme.c index 11602a8..e98dc71 100644 --- a/src/gen9_vme.c +++ b/src/gen9_vme.c @@ -768,11 +768,10 @@ static void gen9_vme_pipeline_programing(VADriverContextP ctx, gen9_gpe_pipeline_setup(ctx, &vme_context->gpe_context, batch); BEGIN_BATCH(batch, 3); OUT_BATCH(batch, MI_BATCH_BUFFER_START | (1 << 8) | (1 << 0)); - OUT_RELOC(batch, + OUT_RELOC64(batch, vme_context->vme_batchbuffer.bo, I915_GEM_DOMAIN_COMMAND, 0, 0); - OUT_BATCH(batch, 0); ADVANCE_BATCH(batch); gen9_gpe_pipeline_end(ctx, &vme_context->gpe_context, batch); @@ -1162,12 +1161,11 @@ gen9_vme_mpeg2_pipeline_programing(VADriverContextP ctx, gen9_gpe_pipeline_setup(ctx, &vme_context->gpe_context, batch); BEGIN_BATCH(batch, 4); OUT_BATCH(batch, MI_BATCH_BUFFER_START | (1 << 8) | (1 << 0)); - OUT_RELOC(batch, + OUT_RELOC64(batch, vme_context->vme_batchbuffer.bo, I915_GEM_DOMAIN_COMMAND, 0, 0); OUT_BATCH(batch, 0); - OUT_BATCH(batch, 0); ADVANCE_BATCH(batch); gen9_gpe_pipeline_end(ctx, &vme_context->gpe_context, batch); @@ -1303,12 +1301,11 @@ gen9_vme_vp8_pipeline_programing(VADriverContextP ctx, gen9_gpe_pipeline_setup(ctx, &vme_context->gpe_context, batch); BEGIN_BATCH(batch, 4); OUT_BATCH(batch, MI_BATCH_BUFFER_START | (1 << 8) | (1 << 0)); - OUT_RELOC(batch, + OUT_RELOC64(batch, vme_context->vme_batchbuffer.bo, I915_GEM_DOMAIN_COMMAND, 0, 0); OUT_BATCH(batch, 0); - OUT_BATCH(batch, 0); ADVANCE_BATCH(batch); gen9_gpe_pipeline_end(ctx, &vme_context->gpe_context, batch); @@ -1737,11 +1734,10 @@ static void gen9_vme_hevc_pipeline_programing(VADriverContextP ctx, gen9_gpe_pipeline_setup(ctx, &vme_context->gpe_context, batch); BEGIN_BATCH(batch, 3); OUT_BATCH(batch, MI_BATCH_BUFFER_START | (1 << 8) | (1 << 0)); - OUT_RELOC(batch, + OUT_RELOC64(batch, vme_context->vme_batchbuffer.bo, I915_GEM_DOMAIN_COMMAND, 0, 0); - OUT_BATCH(batch, 0); ADVANCE_BATCH(batch); gen9_gpe_pipeline_end(ctx, &vme_context->gpe_context, batch); -- 1.9.1 _______________________________________________ Libva mailing list Libva@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/libva