Add access control for command queue and event queue related registers
to ensure they can only be modified under proper conditions.
For command queue (CMDQ):
- smmu_cmdq_disabled_stable(): checks CMDQ bit in CR0/CR0ACK
- smmu_cmdq_base_writable(): checks IDR1.QUEUES_PRESET==0 and CMDQ disabled
For event queue (EVTQ):
- smmu_eventq_disabled_stable(): checks EVTQ bit in CR0/CR0ACK
- smmu_eventq_base_writable():checks IDR1.QUEUES_PRESET==0 and EVTQ disabled
- smmu_eventq_irq_cfg_writable(): checks MSI support and EVENTQ_IRQEN state
Additionally, mask reserved bits on writes using SMMU_QUEUE_BASE_RESERVED
for queue base registers and SMMU_EVENTQ_IRQ_CFG0_RESERVED for
EVENTQ_IRQ_CFG0.
Fixes: fae4be38b35d ("hw/arm/smmuv3: Implement MMIO write operations")
Signed-off-by: Tao Tang <[email protected]>
---
hw/arm/smmuv3.c | 157 +++++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 154 insertions(+), 3 deletions(-)
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
index 163c07adce4..9c09ea0716e 100644
--- a/hw/arm/smmuv3.c
+++ b/hw/arm/smmuv3.c
@@ -1421,6 +1421,73 @@ static bool smmu_strtab_base_writable(SMMUv3State *s,
SMMUSecSID sec_sid)
return smmuv3_smmu_disabled_stable(s, sec_sid);
}
+static inline int smmuv3_get_cr0_cmdqen(SMMUv3State *s, SMMUSecSID sec_sid)
+{
+ return FIELD_EX32(s->bank[sec_sid].cr[0], CR0, CMDQEN);
+}
+
+static inline int smmuv3_get_cr0ack_cmdqen(SMMUv3State *s, SMMUSecSID sec_sid)
+{
+ return FIELD_EX32(s->bank[sec_sid].cr0ack, CR0, CMDQEN);
+}
+
+static inline int smmuv3_get_cr0_eventqen(SMMUv3State *s, SMMUSecSID sec_sid)
+{
+ return FIELD_EX32(s->bank[sec_sid].cr[0], CR0, EVENTQEN);
+}
+
+static inline int smmuv3_get_cr0ack_eventqen(SMMUv3State *s, SMMUSecSID
sec_sid)
+{
+ return FIELD_EX32(s->bank[sec_sid].cr0ack, CR0, EVENTQEN);
+}
+
+/* Check if CMDQ is disabled in stable status */
+static bool smmu_cmdq_disabled_stable(SMMUv3State *s, SMMUSecSID sec_sid)
+{
+ int cr0_cmdqen = smmuv3_get_cr0_cmdqen(s, sec_sid);
+ int cr0ack_cmdqen = smmuv3_get_cr0ack_cmdqen(s, sec_sid);
+ return (cr0_cmdqen == 0 && cr0ack_cmdqen == 0);
+}
+
+/* Check if CMDQ_BASE register is writable */
+static bool smmu_cmdq_base_writable(SMMUv3State *s, SMMUSecSID sec_sid)
+{
+ /* Use NS bank as it's designed for all security states */
+ if (FIELD_EX32(s->bank[SMMU_SEC_SID_NS].idr[1], IDR1, QUEUES_PRESET)) {
+ return false;
+ }
+
+ return smmu_cmdq_disabled_stable(s, sec_sid);
+}
+
+/* Check if EVENTQ is disabled in stable status */
+static bool smmu_eventq_disabled_stable(SMMUv3State *s, SMMUSecSID sec_sid)
+{
+ int cr0_eventqen = smmuv3_get_cr0_eventqen(s, sec_sid);
+ int cr0ack_eventqen = smmuv3_get_cr0ack_eventqen(s, sec_sid);
+ return (cr0_eventqen == 0 && cr0ack_eventqen == 0);
+}
+
+/* Check if EVENTQ_BASE register is writable */
+static bool smmu_eventq_base_writable(SMMUv3State *s, SMMUSecSID sec_sid)
+{
+ if (FIELD_EX32(s->bank[SMMU_SEC_SID_NS].idr[1], IDR1, QUEUES_PRESET)) {
+ return false;
+ }
+
+ return smmu_eventq_disabled_stable(s, sec_sid);
+}
+
+/* Check if EVENTQ_IRQ_CFGx is writable */
+static bool smmu_eventq_irq_cfg_writable(SMMUv3State *s, SMMUSecSID sec_sid)
+{
+ if (!smmu_msi_supported(s)) {
+ return false;
+ }
+
+ return (FIELD_EX32(s->bank[sec_sid].irq_ctrl, IRQ_CTRL, EVENTQ_IRQEN) ==
0);
+}
+
static int smmuv3_cmdq_consume(SMMUv3State *s, Error **errp, SMMUSecSID
sec_sid)
{
SMMUState *bs = ARM_SMMU(s);
@@ -1741,21 +1808,39 @@ static MemTxResult smmu_writell(SMMUv3State *s, hwaddr
offset,
bank->strtab_base = data & SMMU_STRTAB_BASE_RESERVED;
return MEMTX_OK;
case A_CMDQ_BASE:
- bank->cmdq.base = data;
+ if (!smmu_cmdq_base_writable(s, reg_sec_sid)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "CMDQ_BASE write ignored: register is RO\n");
+ return MEMTX_OK;
+ }
+
+ bank->cmdq.base = data & SMMU_QUEUE_BASE_RESERVED;
bank->cmdq.log2size = extract64(bank->cmdq.base, 0, 5);
if (bank->cmdq.log2size > SMMU_CMDQS) {
bank->cmdq.log2size = SMMU_CMDQS;
}
return MEMTX_OK;
case A_EVENTQ_BASE:
- bank->eventq.base = data;
+ if (!smmu_eventq_base_writable(s, reg_sec_sid)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "EVENTQ_BASE write ignored: register is RO\n");
+ return MEMTX_OK;
+ }
+
+ bank->eventq.base = data & SMMU_QUEUE_BASE_RESERVED;
bank->eventq.log2size = extract64(bank->eventq.base, 0, 5);
if (bank->eventq.log2size > SMMU_EVENTQS) {
bank->eventq.log2size = SMMU_EVENTQS;
}
return MEMTX_OK;
case A_EVENTQ_IRQ_CFG0:
- bank->eventq_irq_cfg0 = data;
+ if (!smmu_eventq_irq_cfg_writable(s, reg_sec_sid)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "EVENTQ_IRQ_CFG0 write ignored: register is RO\n");
+ return MEMTX_OK;
+ }
+
+ bank->eventq_irq_cfg0 = data & SMMU_EVENTQ_IRQ_CFG0_RESERVED;
return MEMTX_OK;
default:
qemu_log_mask(LOG_UNIMP,
@@ -1880,6 +1965,13 @@ static MemTxResult smmu_writel(SMMUv3State *s, hwaddr
offset,
}
break;
case A_CMDQ_BASE: /* 64b */
+ if (!smmu_cmdq_base_writable(s, reg_sec_sid)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "CMDQ_BASE write ignored: register is RO\n");
+ return MEMTX_OK;
+ }
+
+ data &= SMMU_QUEUE_BASE_RESERVED;
bank->cmdq.base = deposit64(bank->cmdq.base, 0, 32, data);
bank->cmdq.log2size = extract64(bank->cmdq.base, 0, 5);
if (bank->cmdq.log2size > SMMU_CMDQS) {
@@ -1887,6 +1979,13 @@ static MemTxResult smmu_writel(SMMUv3State *s, hwaddr
offset,
}
break;
case A_CMDQ_BASE + 4: /* 64b */
+ if (!smmu_cmdq_base_writable(s, reg_sec_sid)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "CMDQ_BASE + 4 write ignored: register is RO\n");
+ return MEMTX_OK;
+ }
+
+ data &= SMMU_QUEUE_BASE_RESERVED;
bank->cmdq.base = deposit64(bank->cmdq.base, 32, 32, data);
break;
case A_CMDQ_PROD:
@@ -1894,9 +1993,22 @@ static MemTxResult smmu_writel(SMMUv3State *s, hwaddr
offset,
smmuv3_cmdq_consume(s, &local_err, reg_sec_sid);
break;
case A_CMDQ_CONS:
+ if (!smmu_cmdq_disabled_stable(s, reg_sec_sid)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "CMDQ_CONS write ignored: register is RO\n");
+ return MEMTX_OK;
+ }
+
bank->cmdq.cons = data;
break;
case A_EVENTQ_BASE: /* 64b */
+ if (!smmu_eventq_base_writable(s, reg_sec_sid)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "EVENTQ_BASE write ignored: register is RO\n");
+ return MEMTX_OK;
+ }
+
+ data &= SMMU_QUEUE_BASE_RESERVED;
bank->eventq.base = deposit64(bank->eventq.base, 0, 32, data);
bank->eventq.log2size = extract64(bank->eventq.base, 0, 5);
if (bank->eventq.log2size > SMMU_EVENTQS) {
@@ -1904,24 +2016,63 @@ static MemTxResult smmu_writel(SMMUv3State *s, hwaddr
offset,
}
break;
case A_EVENTQ_BASE + 4:
+ if (!smmu_eventq_base_writable(s, reg_sec_sid)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "EVENTQ_BASE + 4 write ignored: register is RO\n");
+ return MEMTX_OK;
+ }
+
+ data &= SMMU_QUEUE_BASE_RESERVED;
bank->eventq.base = deposit64(bank->eventq.base, 32, 32, data);
break;
case A_EVENTQ_PROD:
+ if (!smmu_eventq_disabled_stable(s, reg_sec_sid)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "EVENTQ_PROD write ignored: register is RO\n");
+ return MEMTX_OK;
+ }
+
bank->eventq.prod = data;
break;
case A_EVENTQ_CONS:
bank->eventq.cons = data;
break;
case A_EVENTQ_IRQ_CFG0: /* 64b */
+ if (!smmu_eventq_irq_cfg_writable(s, reg_sec_sid)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "EVENTQ_IRQ_CFG0 write ignored: register is RO\n");
+ return MEMTX_OK;
+ }
+
+ data &= SMMU_EVENTQ_IRQ_CFG0_RESERVED;
bank->eventq_irq_cfg0 = deposit64(bank->eventq_irq_cfg0, 0, 32, data);
break;
case A_EVENTQ_IRQ_CFG0 + 4:
+ if (!smmu_eventq_irq_cfg_writable(s, reg_sec_sid)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "EVENTQ_IRQ_CFG0+4 write ignored: register is RO\n");
+ return MEMTX_OK;
+ }
+
+ data &= SMMU_EVENTQ_IRQ_CFG0_RESERVED;
bank->eventq_irq_cfg0 = deposit64(bank->eventq_irq_cfg0, 32, 32, data);
break;
case A_EVENTQ_IRQ_CFG1:
+ if (!smmu_eventq_irq_cfg_writable(s, reg_sec_sid)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "EVENTQ_IRQ_CFG1 write ignored: register is RO\n");
+ return MEMTX_OK;
+ }
+
bank->eventq_irq_cfg1 = data;
break;
case A_EVENTQ_IRQ_CFG2:
+ if (!smmu_eventq_irq_cfg_writable(s, reg_sec_sid)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "EVENTQ_IRQ_CFG2 write ignored: register is RO\n");
+ return MEMTX_OK;
+ }
+
bank->eventq_irq_cfg2 = data;
break;
default:
--
2.34.1