Accessing the TIMA from some specific ring/offset combination can trigger a special operation, with or without side effects. It is implemented in qemu with an array of special operations to compare accesses against. Since the presenter on P10 is pretty similar to P9, we had the full array defined for P9 and we just had a special case for P10 to treat one access differently. With a recent change, 6f2cbd133d4 ("pnv/xive2: Handle TIMA access through all ports"), we now ignore some of the bits of the TIMA address, but that patch managed to botch the detection of the special case for P10.
To clean that up, this patch introduces a full array of special ops to be used for P10. The code to detect a special access is common with P9, only the array of operations differs. The presenter can pick the correct array of special ops based on its configuration introduced in a previous patch. Fixes: Coverity CID 1512997, 1512998 Fixes: 6f2cbd133d4 ("pnv/xive2: Handle TIMA access through all ports") Signed-off-by: Frederic Barrat <fbar...@linux.ibm.com> --- hw/intc/pnv_xive2.c | 32 ---------------------------- hw/intc/xive.c | 52 +++++++++++++++++++++++++++++++++++++-------- 2 files changed, 43 insertions(+), 41 deletions(-) diff --git a/hw/intc/pnv_xive2.c b/hw/intc/pnv_xive2.c index 59534f6843..ed438a20ed 100644 --- a/hw/intc/pnv_xive2.c +++ b/hw/intc/pnv_xive2.c @@ -1656,17 +1656,6 @@ static const MemoryRegionOps pnv_xive2_ic_tm_indirect_ops = { /* * TIMA ops */ - -/* - * Special TIMA offsets to handle accesses in a POWER10 way. - * - * Only the CAM line updates done by the hypervisor should be handled - * specifically. - */ -#define HV_PAGE_OFFSET (XIVE_TM_HV_PAGE << TM_SHIFT) -#define HV_PUSH_OS_CTX_OFFSET (HV_PAGE_OFFSET | (TM_QW1_OS + TM_WORD2)) -#define HV_PULL_OS_CTX_OFFSET (HV_PAGE_OFFSET | TM_SPC_PULL_OS_CTX) - static void pnv_xive2_tm_write(void *opaque, hwaddr offset, uint64_t value, unsigned size) { @@ -1674,18 +1663,7 @@ static void pnv_xive2_tm_write(void *opaque, hwaddr offset, PnvXive2 *xive = pnv_xive2_tm_get_xive(cpu); XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc); XivePresenter *xptr = XIVE_PRESENTER(xive); - bool gen1_tima_os = - xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS; - - offset &= TM_ADDRESS_MASK; - /* TODO: should we switch the TM ops table instead ? */ - if (!gen1_tima_os && offset == HV_PUSH_OS_CTX_OFFSET) { - xive2_tm_push_os_ctx(xptr, tctx, offset, value, size); - return; - } - - /* Other TM ops are the same as XIVE1 */ xive_tctx_tm_write(xptr, tctx, offset, value, size); } @@ -1695,17 +1673,7 @@ static uint64_t pnv_xive2_tm_read(void *opaque, hwaddr offset, unsigned size) PnvXive2 *xive = pnv_xive2_tm_get_xive(cpu); XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc); XivePresenter *xptr = XIVE_PRESENTER(xive); - bool gen1_tima_os = - xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS; - - offset &= TM_ADDRESS_MASK; - - /* TODO: should we switch the TM ops table instead ? */ - if (!gen1_tima_os && offset == HV_PULL_OS_CTX_OFFSET) { - return xive2_tm_pull_os_ctx(xptr, tctx, offset, size); - } - /* Other TM ops are the same as XIVE1 */ return xive_tctx_tm_read(xptr, tctx, offset, size); } diff --git a/hw/intc/xive.c b/hw/intc/xive.c index 34a868b185..84c079b034 100644 --- a/hw/intc/xive.c +++ b/hw/intc/xive.c @@ -20,6 +20,7 @@ #include "monitor/monitor.h" #include "hw/irq.h" #include "hw/ppc/xive.h" +#include "hw/ppc/xive2.h" #include "hw/ppc/xive_regs.h" #include "trace.h" @@ -461,7 +462,7 @@ static void xive_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, } } -static __attribute__((unused)) uint32_t xive_presenter_get_config(XivePresenter *xptr) +static uint32_t xive_presenter_get_config(XivePresenter *xptr) { XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); @@ -504,14 +505,47 @@ static const XiveTmOp xive_tm_operations[] = { { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, xive_tm_pull_pool_ctx }, }; -static const XiveTmOp *xive_tm_find_op(hwaddr offset, unsigned size, bool write) +static const XiveTmOp xive2_tm_operations[] = { + /* + * MMIOs below 2K : raw values and special operations without side + * effects + */ + { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, NULL }, + { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive2_tm_push_os_ctx, NULL }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr, NULL }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, NULL }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, xive_tm_vt_poll }, + + /* MMIOs above 2K : special operations with side effects */ + { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, xive_tm_ack_os_reg }, + { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, NULL }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL, xive2_tm_pull_os_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL, xive2_tm_pull_os_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, xive_tm_ack_hv_reg }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, xive_tm_pull_pool_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, xive_tm_pull_pool_ctx }, +}; + +static const XiveTmOp *xive_tm_find_op(XivePresenter *xptr, hwaddr offset, + unsigned size, bool write) { uint8_t page_offset = (offset >> TM_SHIFT) & 0x3; uint32_t op_offset = offset & TM_ADDRESS_MASK; - int i; + const XiveTmOp *tm_ops; + int i, tm_ops_count; + uint32_t cfg; + + cfg = xive_presenter_get_config(xptr); + if (cfg & XIVE_PRESENTER_GEN1_TIMA_OS) { + tm_ops = xive_tm_operations; + tm_ops_count = ARRAY_SIZE(xive_tm_operations); + } else { + tm_ops = xive2_tm_operations; + tm_ops_count = ARRAY_SIZE(xive2_tm_operations); + } - for (i = 0; i < ARRAY_SIZE(xive_tm_operations); i++) { - const XiveTmOp *xto = &xive_tm_operations[i]; + for (i = 0; i < tm_ops_count; i++) { + const XiveTmOp *xto = &tm_ops[i]; /* Accesses done from a more privileged TIMA page is allowed */ if (xto->page_offset >= page_offset && @@ -542,7 +576,7 @@ void xive_tctx_tm_write(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, * First, check for special operations in the 2K region */ if (offset & TM_SPECIAL_OP) { - xto = xive_tm_find_op(offset, size, true); + xto = xive_tm_find_op(tctx->xptr, offset, size, true); if (!xto) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA " "@%"HWADDR_PRIx"\n", offset); @@ -555,7 +589,7 @@ void xive_tctx_tm_write(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, /* * Then, for special operations in the region below 2K. */ - xto = xive_tm_find_op(offset, size, true); + xto = xive_tm_find_op(tctx->xptr, offset, size, true); if (xto) { xto->write_handler(xptr, tctx, offset, value, size); return; @@ -581,7 +615,7 @@ uint64_t xive_tctx_tm_read(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, * First, check for special operations in the 2K region */ if (offset & TM_SPECIAL_OP) { - xto = xive_tm_find_op(offset, size, false); + xto = xive_tm_find_op(tctx->xptr, offset, size, false); if (!xto) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access to TIMA" "@%"HWADDR_PRIx"\n", offset); @@ -594,7 +628,7 @@ uint64_t xive_tctx_tm_read(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, /* * Then, for special operations in the region below 2K. */ - xto = xive_tm_find_op(offset, size, false); + xto = xive_tm_find_op(tctx->xptr, offset, size, false); if (xto) { ret = xto->read_handler(xptr, tctx, offset, size); goto out; -- 2.41.0