Use qemu_real_host_page_size instead. Signed-off-by: Richard Henderson <richard.hender...@linaro.org> --- linux-user/mmap.c | 66 +++++++++++++++++++++++------------------------ 1 file changed, 33 insertions(+), 33 deletions(-)
diff --git a/linux-user/mmap.c b/linux-user/mmap.c index 9aab48d4a3..fc23192a32 100644 --- a/linux-user/mmap.c +++ b/linux-user/mmap.c @@ -121,6 +121,7 @@ static int target_to_host_prot(int prot) /* NOTE: all the constants are the HOST ones, but addresses are target. */ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot) { + int host_page_size = qemu_real_host_page_size(); abi_ulong starts[3]; abi_ulong lens[3]; int prots[3]; @@ -145,13 +146,13 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot) } last = start + len - 1; - host_start = start & qemu_host_page_mask; + host_start = start & -host_page_size; host_last = HOST_PAGE_ALIGN(last) - 1; nranges = 0; mmap_lock(); - if (host_last - host_start < qemu_host_page_size) { + if (host_last - host_start < host_page_size) { /* Single host page contains all guest pages: sum the prot. */ prot1 = target_prot; for (abi_ulong a = host_start; a < start; a += TARGET_PAGE_SIZE) { @@ -161,7 +162,7 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot) prot1 |= page_get_flags(a + 1); } starts[nranges] = host_start; - lens[nranges] = qemu_host_page_size; + lens[nranges] = host_page_size; prots[nranges] = prot1; nranges++; } else { @@ -174,10 +175,10 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot) /* If the resulting sum differs, create a new range. */ if (prot1 != target_prot) { starts[nranges] = host_start; - lens[nranges] = qemu_host_page_size; + lens[nranges] = host_page_size; prots[nranges] = prot1; nranges++; - host_start += qemu_host_page_size; + host_start += host_page_size; } } @@ -189,9 +190,9 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot) } /* If the resulting sum differs, create a new range. */ if (prot1 != target_prot) { - host_last -= qemu_host_page_size; + host_last -= host_page_size; starts[nranges] = host_last + 1; - lens[nranges] = qemu_host_page_size; + lens[nranges] = host_page_size; prots[nranges] = prot1; nranges++; } @@ -226,6 +227,7 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot) static bool mmap_frag(abi_ulong real_start, abi_ulong start, abi_ulong last, int prot, int flags, int fd, off_t offset) { + int host_page_size = qemu_real_host_page_size(); abi_ulong real_last; void *host_start; int prot_old, prot_new; @@ -242,7 +244,7 @@ static bool mmap_frag(abi_ulong real_start, abi_ulong start, abi_ulong last, return false; } - real_last = real_start + qemu_host_page_size - 1; + real_last = real_start + host_page_size - 1; host_start = g2h_untagged(real_start); /* Get the protection of the target pages outside the mapping. */ @@ -260,12 +262,12 @@ static bool mmap_frag(abi_ulong real_start, abi_ulong start, abi_ulong last, * outside of the fragment we need to map. Allocate a new host * page to cover, discarding whatever else may have been present. */ - void *p = mmap(host_start, qemu_host_page_size, + void *p = mmap(host_start, host_page_size, target_to_host_prot(prot), flags | MAP_ANONYMOUS, -1, 0); if (p != host_start) { if (p != MAP_FAILED) { - munmap(p, qemu_host_page_size); + munmap(p, host_page_size); errno = EEXIST; } return false; @@ -280,7 +282,7 @@ static bool mmap_frag(abi_ulong real_start, abi_ulong start, abi_ulong last, /* Adjust protection to be able to write. */ if (!(host_prot_old & PROT_WRITE)) { host_prot_old |= PROT_WRITE; - mprotect(host_start, qemu_host_page_size, host_prot_old); + mprotect(host_start, host_page_size, host_prot_old); } /* Read or zero the new guest pages. */ @@ -294,7 +296,7 @@ static bool mmap_frag(abi_ulong real_start, abi_ulong start, abi_ulong last, /* Put final protection */ if (host_prot_new != host_prot_old) { - mprotect(host_start, qemu_host_page_size, host_prot_new); + mprotect(host_start, host_page_size, host_prot_new); } return true; } @@ -329,17 +331,18 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size, */ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align) { + int host_page_size = qemu_real_host_page_size(); void *ptr, *prev; abi_ulong addr; int wrapped, repeat; - align = MAX(align, qemu_host_page_size); + align = MAX(align, host_page_size); /* If 'start' == 0, then a default start address is used. */ if (start == 0) { start = mmap_next_start; } else { - start &= qemu_host_page_mask; + start &= -host_page_size; } start = ROUND_UP(start, align); @@ -448,6 +451,7 @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align) abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot, int flags, int fd, off_t offset) { + int host_page_size = qemu_real_host_page_size(); abi_ulong ret, last, real_start, real_last, retaddr, host_len; abi_ulong passthrough_start = -1, passthrough_last = 0; int page_flags; @@ -493,8 +497,8 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot, } } - real_start = start & qemu_host_page_mask; - host_offset = offset & qemu_host_page_mask; + real_start = start & -host_page_size; + host_offset = offset & -host_page_size; /* * If the user is asking for the kernel to find a location, do that @@ -523,8 +527,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot, * may need to truncate file maps at EOF and add extra anonymous pages * up to the targets page boundary. */ - if ((qemu_real_host_page_size() < qemu_host_page_size) && - !(flags & MAP_ANONYMOUS)) { + if (host_page_size < TARGET_PAGE_SIZE && !(flags & MAP_ANONYMOUS)) { struct stat sb; if (fstat(fd, &sb) == -1) { @@ -551,11 +554,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot, host_len = HOST_PAGE_ALIGN(host_len); host_prot = target_to_host_prot(target_prot); - /* - * Note: we prefer to control the mapping address. It is - * especially important if qemu_host_page_size > - * qemu_real_host_page_size. - */ + /* Note: we prefer to control the mapping address. */ p = mmap(g2h_untagged(start), host_len, host_prot, flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0); if (p == MAP_FAILED) { @@ -621,7 +620,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot, * aligned, so we read it */ if (!(flags & MAP_ANONYMOUS) && - (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) { + (offset & (host_page_size - 1)) != (start & (host_page_size - 1))) { /* * msync() won't work here, so we return an error if write is * possible while it is a shared mapping @@ -650,7 +649,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot, /* handle the start of the mapping */ if (start > real_start) { - if (real_last == real_start + qemu_host_page_size - 1) { + if (real_last == real_start + host_page_size - 1) { /* one single host page */ if (!mmap_frag(real_start, start, last, target_prot, flags, fd, offset)) { @@ -659,21 +658,21 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot, goto the_end1; } if (!mmap_frag(real_start, start, - real_start + qemu_host_page_size - 1, + real_start + host_page_size - 1, target_prot, flags, fd, offset)) { goto fail; } - real_start += qemu_host_page_size; + real_start += host_page_size; } /* handle the end of the mapping */ if (last < real_last) { - abi_ulong real_page = real_last - qemu_host_page_size + 1; + abi_ulong real_page = real_last - host_page_size + 1; if (!mmap_frag(real_page, real_page, last, target_prot, flags, fd, offset + real_page - start)) { goto fail; } - real_last -= qemu_host_page_size; + real_last -= host_page_size; } /* map the middle (easier) */ @@ -739,6 +738,7 @@ fail: static void mmap_reserve_or_unmap(abi_ulong start, abi_ulong len) { + int host_page_size = qemu_real_host_page_size(); abi_ulong real_start; abi_ulong real_last; abi_ulong real_len; @@ -748,7 +748,7 @@ static void mmap_reserve_or_unmap(abi_ulong start, abi_ulong len) int prot; last = start + len - 1; - real_start = start & qemu_host_page_mask; + real_start = start & -host_page_size; real_last = HOST_PAGE_ALIGN(last) - 1; /* @@ -757,7 +757,7 @@ static void mmap_reserve_or_unmap(abi_ulong start, abi_ulong len) * The single page special case is required for the last page, * lest real_start overflow to zero. */ - if (real_last - real_start < qemu_host_page_size) { + if (real_last - real_start < host_page_size) { prot = 0; for (a = real_start; a < start; a += TARGET_PAGE_SIZE) { prot |= page_get_flags(a); @@ -773,14 +773,14 @@ static void mmap_reserve_or_unmap(abi_ulong start, abi_ulong len) prot |= page_get_flags(a); } if (prot != 0) { - real_start += qemu_host_page_size; + real_start += host_page_size; } for (prot = 0, a = last; a < real_last; a += TARGET_PAGE_SIZE) { prot |= page_get_flags(a + 1); } if (prot != 0) { - real_last -= qemu_host_page_size; + real_last -= host_page_size; } if (real_last < real_start) { -- 2.34.1