Michael Neuling <mi...@neuling.org> writes: > From: Ian Munsie <imun...@au1.ibm.com> > > __spu_trap_data_seg() currently contains code to determine the VSID and ESID > required for a particular EA and mm struct. > > This code is generically useful for other co-processors. This moves the code > of the cell platform so it can be used by other powerpc code. It also adds > 1TB > segment handling which Cell didn't have. > > Signed-off-by: Ian Munsie <imun...@au1.ibm.com> > Signed-off-by: Michael Neuling <mi...@neuling.org> > --- > arch/powerpc/include/asm/mmu-hash64.h | 7 ++++- > arch/powerpc/mm/copro_fault.c | 48 > ++++++++++++++++++++++++++++++++++ > arch/powerpc/mm/slb.c | 3 --- > arch/powerpc/platforms/cell/spu_base.c | 41 +++-------------------------- > 4 files changed, 58 insertions(+), 41 deletions(-) > > diff --git a/arch/powerpc/include/asm/mmu-hash64.h > b/arch/powerpc/include/asm/mmu-hash64.h > index d765144..6d0b7a2 100644 > --- a/arch/powerpc/include/asm/mmu-hash64.h > +++ b/arch/powerpc/include/asm/mmu-hash64.h > @@ -189,7 +189,12 @@ static inline unsigned int mmu_psize_to_shift(unsigned > int mmu_psize) > #define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT) > > #ifndef __ASSEMBLY__ > - > +static inline int slb_vsid_shift(int ssize) > +{ > + if (ssize == MMU_SEGSIZE_256M) > + return SLB_VSID_SHIFT; > + return SLB_VSID_SHIFT_1T; > +} > static inline int segment_shift(int ssize) > { > if (ssize == MMU_SEGSIZE_256M) > diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c > index ba7df14..b865697 100644 > --- a/arch/powerpc/mm/copro_fault.c > +++ b/arch/powerpc/mm/copro_fault.c > @@ -90,3 +90,51 @@ out_unlock: > return ret; > } > EXPORT_SYMBOL_GPL(copro_handle_mm_fault); > + > +int copro_data_segment(struct mm_struct *mm, u64 ea, u64 *esid, u64 *vsid) > +{ > + int psize, ssize; > + > + *esid = (ea & ESID_MASK) | SLB_ESID_V; > + > + switch (REGION_ID(ea)) { > + case USER_REGION_ID: > + pr_devel("copro_data_segment: 0x%llx -- USER_REGION_ID\n", ea); > +#ifdef CONFIG_PPC_MM_SLICES > + psize = get_slice_psize(mm, ea); > +#else > + psize = mm->context.user_psize; > +#endif > + ssize = user_segment_size(ea); > + *vsid = (get_vsid(mm->context.id, ea, ssize) > + << slb_vsid_shift(ssize)) | SLB_VSID_USER; > + break; > + case VMALLOC_REGION_ID: > + pr_devel("copro_data_segment: 0x%llx -- VMALLOC_REGION_ID\n", > ea); > + if (ea < VMALLOC_END) > + psize = mmu_vmalloc_psize; > + else > + psize = mmu_io_psize; > + ssize = mmu_kernel_ssize; > + *vsid = (get_kernel_vsid(ea, mmu_kernel_ssize) > + << SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
why not *vsid = (get_kernel_vsid(ea, mmu_kernel_ssize) << slb_vsid_shift(ssize)) | SLB_VSID_KERNEL; for vmalloc and kernel region ? We could end up using 1T segments for kernel mapping too. -aneesh -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/