diff --git a/arch/powerpc/include/asm/mmu-hash64.h 
b/arch/powerpc/include/asm/mmu-hash64.h
index 1c65a59..5ff936b 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -143,6 +143,10 @@ struct mmu_psize_def
        unsigned long   sllp;   /* SLB L||LP (exact mask to use in slbmte) */
  };

+struct virt_addr {
+       unsigned long addr;
+};
+
  #endif /* __ASSEMBLY__ */

  /*


@@ -1153,13 +1153,13 @@ void hash_preload(struct mm_struct *mm, unsigned long 
ea,
  /* WARNING: This is called from hash_low_64.S, if you change this prototype,
   *          do not forget to update the assembly call site !
   */

I'd suggest having a similar warning next to the definition of struct virt_addr, as any changes to it mean we'll need to adjust hash_low_64.S

-void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int ssize,
+void flush_hash_page(struct virt_addr va, real_pte_t pte, int psize, int ssize,
                     int local)
  {
        unsigned long hash, index, shift, hidx, slot;

-       DBG_LOW("flush_hash_page(va=%016lx)\n", va);
-       pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
+       DBG_LOW("flush_hash_page(va=%016lx)\n", va.addr);
+       pte_iterate_hashed_subpages(pte, psize, va.addr, index, shift) {
                hash = hpt_hash(va, shift, ssize);
                hidx = __rpte_to_hidx(pte, index);
                if (hidx & _PTEIDX_SECONDARY)

--
Cody

_______________________________________________
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Reply via email to