Re: [RFC v4 09/17] powerpc: call the hash functions with the correct pkey value
On Tue, Jun 27, 2017 at 08:54:07PM +0530, Aneesh Kumar K.V wrote: > > > On Tuesday 27 June 2017 03:41 PM, Ram Pai wrote: > >Pass the correct protection key value to the hash functions on > >page fault. > > > >Signed-off-by: Ram Pai> >--- > > arch/powerpc/include/asm/pkeys.h | 11 +++ > > arch/powerpc/mm/hash_utils_64.c | 4 > > arch/powerpc/mm/mem.c| 6 ++ > > 3 files changed, 21 insertions(+) > > > >diff --git a/arch/powerpc/include/asm/pkeys.h > >b/arch/powerpc/include/asm/pkeys.h > >index ef1c601..1370b3f 100644 > >--- a/arch/powerpc/include/asm/pkeys.h > >+++ b/arch/powerpc/include/asm/pkeys.h > >@@ -74,6 +74,17 @@ static inline bool mm_pkey_is_allocated(struct mm_struct > >*mm, int pkey) > > } > > > > /* > >+ * return the protection key of the vma corresponding to the > >+ * given effective address @ea. > >+ */ > >+static inline int mm_pkey(struct mm_struct *mm, unsigned long ea) > >+{ > >+struct vm_area_struct *vma = find_vma(mm, ea); > >+int pkey = vma ? vma_pkey(vma) : 0; > >+return pkey; > >+} > >+ > >+/* > > > > That is not going to work in hash fault path right ? We can't do a > find_vma there without holding the mmap_sem There is a fundamental problem with this new design. Looks like we can't hold a lock in that path, without badly hurting the performance. I am moving back to the old design. Cant by-pass the pte. The keys will be programmed into the pte which will than be used to program the hpte. RP
Re: [RFC v4 09/17] powerpc: call the hash functions with the correct pkey value
On Tuesday 27 June 2017 03:41 PM, Ram Pai wrote: Pass the correct protection key value to the hash functions on page fault. Signed-off-by: Ram Pai--- arch/powerpc/include/asm/pkeys.h | 11 +++ arch/powerpc/mm/hash_utils_64.c | 4 arch/powerpc/mm/mem.c| 6 ++ 3 files changed, 21 insertions(+) diff --git a/arch/powerpc/include/asm/pkeys.h b/arch/powerpc/include/asm/pkeys.h index ef1c601..1370b3f 100644 --- a/arch/powerpc/include/asm/pkeys.h +++ b/arch/powerpc/include/asm/pkeys.h @@ -74,6 +74,17 @@ static inline bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey) } /* + * return the protection key of the vma corresponding to the + * given effective address @ea. + */ +static inline int mm_pkey(struct mm_struct *mm, unsigned long ea) +{ + struct vm_area_struct *vma = find_vma(mm, ea); + int pkey = vma ? vma_pkey(vma) : 0; + return pkey; +} + +/* That is not going to work in hash fault path right ? We can't do a find_vma there without holding the mmap_sem -aneesh
[RFC v4 09/17] powerpc: call the hash functions with the correct pkey value
Pass the correct protection key value to the hash functions on page fault. Signed-off-by: Ram Pai--- arch/powerpc/include/asm/pkeys.h | 11 +++ arch/powerpc/mm/hash_utils_64.c | 4 arch/powerpc/mm/mem.c| 6 ++ 3 files changed, 21 insertions(+) diff --git a/arch/powerpc/include/asm/pkeys.h b/arch/powerpc/include/asm/pkeys.h index ef1c601..1370b3f 100644 --- a/arch/powerpc/include/asm/pkeys.h +++ b/arch/powerpc/include/asm/pkeys.h @@ -74,6 +74,17 @@ static inline bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey) } /* + * return the protection key of the vma corresponding to the + * given effective address @ea. + */ +static inline int mm_pkey(struct mm_struct *mm, unsigned long ea) +{ + struct vm_area_struct *vma = find_vma(mm, ea); + int pkey = vma ? vma_pkey(vma) : 0; + return pkey; +} + +/* * Returns a positive, 5-bit key on success, or -1 on failure. */ static inline int mm_pkey_alloc(struct mm_struct *mm) diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 7e67dea..403f75d 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -1319,6 +1319,10 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, goto bail; } +#ifdef CONFIG_PPC64_MEMORY_PROTECTION_KEYS + pkey = mm_pkey(mm, ea); +#endif /* CONFIG_PPC64_MEMORY_PROTECTION_KEYS */ + if (hugeshift) { if (is_thp) rc = __hash_page_thp(ea, access, vsid, (pmd_t *)ptep, diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index ec890d3..0fcaa48 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -541,8 +541,14 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, return; } +#ifdef CONFIG_PPC64_MEMORY_PROTECTION_KEYS + hash_preload_pkey(vma->vm_mm, address, access, trap, vma_pkey(vma)); +#else hash_preload(vma->vm_mm, address, access, trap); +#endif /* CONFIG_PPC64_MEMORY_PROTECTION_KEYS */ + #endif /* CONFIG_PPC_STD_MMU */ + #if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \ && defined(CONFIG_HUGETLB_PAGE) if (is_vm_hugetlb_page(vma)) -- 1.8.3.1