On 11/27/21 04:00, David Gibson wrote:
On Fri, Nov 26, 2021 at 04:39:40PM -0300, Leandro Lupori wrote:
When updating the R bit of a PTE, the Hash64 MMU was using a wrong byte
offset, causing the first byte of the adjacent PTE to be corrupted.
This caused a panic when booting FreeBSD, using the Hash MMU.

Fixes: a2dd4e83e76b ("ppc/hash64: Rework R and C bit updates")
Signed-off-by: Leandro Lupori <leandro.lup...@eldorado.org.br>
---
Changes from v2:
- Add new defines for the byte offset of PTE bit C and
   HASH_PTE_SIZE_64 / 2 (pte1)
- Use new defines in hash64 and spapr code
---
  hw/ppc/spapr.c          | 8 ++++----
  hw/ppc/spapr_softmmu.c  | 2 +-
  target/ppc/mmu-hash64.c | 4 ++--
  target/ppc/mmu-hash64.h | 5 +++++
  4 files changed, 12 insertions(+), 7 deletions(-)

diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index 163c90388a..8ebf85bad8 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -1414,7 +1414,7 @@ void spapr_store_hpte(PowerPCCPU *cpu, hwaddr ptex,
          kvmppc_write_hpte(ptex, pte0, pte1);
      } else {
          if (pte0 & HPTE64_V_VALID) {
-            stq_p(spapr->htab + offset + HASH_PTE_SIZE_64 / 2, pte1);
+            stq_p(spapr->htab + offset + HPTE64_R_BYTE_OFFSET, pte1);

Urgh.. so, initially I thought this was wrong because I was confusing
HPTE64_R_BYTE_OFFSET with HPTE64_R_R_BYTE_OFFSET.  I doubt I'd be the
only one.

Calling something a BYTE_OFFSET then doing an stq to it is pretty
misleading I think.  WORD1_OFFSET or R_WORD_OFFSET might be better?

How about (inspired from XIVE) :

 #define HPTE64_W1    (HASH_PTE_SIZE_64 / 2)
 #define HPTE64_W1_R  14 // or HPTE64_W1 + 6
 #define HPTE64_W1_C  15 // or HPTE64_W1 + 7


Really these should be bitfields describing both words like we have
for XIVE. See include/hw/ppc/xive_regs.h. Is there a reason why ?
Or you could change these writebacks to byte writes, as powernv has
already been changed.

That's a bigger change. It depends if we want this fix for 6.2 or 7.0.

Thanks,

C.


I'm not sure if that's necessary in the case of
pseries - since in that case the HPT doesn't exist within the guest's
address space.

              /*
               * When setting valid, we write PTE1 first. This ensures
               * proper synchronization with the reading code in
@@ -1430,7 +1430,7 @@ void spapr_store_hpte(PowerPCCPU *cpu, hwaddr ptex,
               * ppc_hash64_pteg_search()
               */
              smp_wmb();
-            stq_p(spapr->htab + offset + HASH_PTE_SIZE_64 / 2, pte1);
+            stq_p(spapr->htab + offset + HPTE64_R_BYTE_OFFSET, pte1);
          }
      }
  }
@@ -1438,7 +1438,7 @@ void spapr_store_hpte(PowerPCCPU *cpu, hwaddr ptex,
  static void spapr_hpte_set_c(PPCVirtualHypervisor *vhyp, hwaddr ptex,
                               uint64_t pte1)
  {
-    hwaddr offset = ptex * HASH_PTE_SIZE_64 + 15;
+    hwaddr offset = ptex * HASH_PTE_SIZE_64 + HPTE64_R_C_BYTE_OFFSET;
      SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
if (!spapr->htab) {
@@ -1454,7 +1454,7 @@ static void spapr_hpte_set_c(PPCVirtualHypervisor *vhyp, 
hwaddr ptex,
  static void spapr_hpte_set_r(PPCVirtualHypervisor *vhyp, hwaddr ptex,
                               uint64_t pte1)
  {
-    hwaddr offset = ptex * HASH_PTE_SIZE_64 + 14;
+    hwaddr offset = ptex * HASH_PTE_SIZE_64 + HPTE64_R_R_BYTE_OFFSET;
      SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
if (!spapr->htab) {
diff --git a/hw/ppc/spapr_softmmu.c b/hw/ppc/spapr_softmmu.c
index f8924270ef..03676c4448 100644
--- a/hw/ppc/spapr_softmmu.c
+++ b/hw/ppc/spapr_softmmu.c
@@ -426,7 +426,7 @@ static void new_hpte_store(void *htab, uint64_t pteg, int 
slot,
      addr += slot * HASH_PTE_SIZE_64;
stq_p(addr, pte0);
-    stq_p(addr + HASH_PTE_SIZE_64 / 2, pte1);
+    stq_p(addr + HPTE64_R_BYTE_OFFSET, pte1);
  }
static int rehash_hpte(PowerPCCPU *cpu,
diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c
index 19832c4b46..168d397c26 100644
--- a/target/ppc/mmu-hash64.c
+++ b/target/ppc/mmu-hash64.c
@@ -786,7 +786,7 @@ static void ppc_hash64_set_dsi(CPUState *cs, int mmu_idx, 
uint64_t dar, uint64_t
static void ppc_hash64_set_r(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1)
  {
-    hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + 16;
+    hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + HPTE64_R_R_BYTE_OFFSET;
if (cpu->vhyp) {
          PPCVirtualHypervisorClass *vhc =
@@ -803,7 +803,7 @@ static void ppc_hash64_set_r(PowerPCCPU *cpu, hwaddr ptex, 
uint64_t pte1)
static void ppc_hash64_set_c(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1)
  {
-    hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + 15;
+    hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + HPTE64_R_C_BYTE_OFFSET;
if (cpu->vhyp) {
          PPCVirtualHypervisorClass *vhc =
diff --git a/target/ppc/mmu-hash64.h b/target/ppc/mmu-hash64.h
index c5b2f97ff7..2a46763f70 100644
--- a/target/ppc/mmu-hash64.h
+++ b/target/ppc/mmu-hash64.h
@@ -97,6 +97,11 @@ void ppc_hash64_finalize(PowerPCCPU *cpu);
  #define HPTE64_V_1TB_SEG        0x4000000000000000ULL
  #define HPTE64_V_VRMA_MASK      0x4001ffffff000000ULL
+/* PTE byte offsets */
+#define HPTE64_R_R_BYTE_OFFSET  14
+#define HPTE64_R_C_BYTE_OFFSET  15
+#define HPTE64_R_BYTE_OFFSET    (HASH_PTE_SIZE_64 / 2)
+
  /* Format changes for ARCH v3 */
  #define HPTE64_V_COMMON_BITS    0x000fffffffffffffULL
  #define HPTE64_R_3_0_SSIZE_SHIFT 58



Reply via email to