Module Name:    src
Committed By:   thorpej
Date:           Wed Mar  4 01:21:17 UTC 2020

Modified Files:
        src/libexec/ld.elf_so: headers.c map_object.c rtld.c

Log Message:
PT_GNU_RELRO segments are arranged such that their vaddr + memsz ends
on a linker common page size boundary.  However, if the common page size
used by the linker is less than the VM page size being used by the kernel,
this can end up in the middle of a VM page and when the region is write-
protected, this can cause objects in neighboring .data to get incorrectly
write-protected, resulting in a crash.

Avoid this situation by calculating the end of the RELRO region not by
rounding memsz up to the VM page size, but rather by adding vaddr + memsz
and then truncating to the VM page size.

Fixes PR toolchain/55043.

XXX pullup-9


To generate a diff of this commit:
cvs rdiff -u -r1.67 -r1.68 src/libexec/ld.elf_so/headers.c
cvs rdiff -u -r1.60 -r1.61 src/libexec/ld.elf_so/map_object.c
cvs rdiff -u -r1.202 -r1.203 src/libexec/ld.elf_so/rtld.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/libexec/ld.elf_so/headers.c
diff -u src/libexec/ld.elf_so/headers.c:1.67 src/libexec/ld.elf_so/headers.c:1.68
--- src/libexec/ld.elf_so/headers.c:1.67	Sat Feb 29 18:53:55 2020
+++ src/libexec/ld.elf_so/headers.c	Wed Mar  4 01:21:17 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: headers.c,v 1.67 2020/02/29 18:53:55 kamil Exp $	 */
+/*	$NetBSD: headers.c,v 1.68 2020/03/04 01:21:17 thorpej Exp $	 */
 
 /*
  * Copyright 1996 John D. Polstra.
@@ -40,7 +40,7 @@
 
 #include <sys/cdefs.h>
 #ifndef lint
-__RCSID("$NetBSD: headers.c,v 1.67 2020/02/29 18:53:55 kamil Exp $");
+__RCSID("$NetBSD: headers.c,v 1.68 2020/03/04 01:21:17 thorpej Exp $");
 #endif /* not lint */
 
 #include <err.h>
@@ -516,9 +516,9 @@ _rtld_digest_phdr(const Elf_Phdr *phdr, 
 
 #ifdef GNU_RELRO
 		case PT_GNU_RELRO:
-			obj->relro_page = obj->relocbase
-			    + round_down(ph->p_vaddr);
-			obj->relro_size = round_up(ph->p_memsz);
+			/* rounding happens later. */
+			obj->relro_page = obj->relocbase + ph->p_vaddr;
+			obj->relro_size = ph->p_memsz;
 			dbg(("headers: %s %p phsize %" PRImemsz,
 			    "PT_GNU_RELRO", (void *)(uintptr_t)vaddr,
 			     ph->p_memsz));

Index: src/libexec/ld.elf_so/map_object.c
diff -u src/libexec/ld.elf_so/map_object.c:1.60 src/libexec/ld.elf_so/map_object.c:1.61
--- src/libexec/ld.elf_so/map_object.c:1.60	Sun Jan  6 19:44:54 2019
+++ src/libexec/ld.elf_so/map_object.c	Wed Mar  4 01:21:17 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: map_object.c,v 1.60 2019/01/06 19:44:54 joerg Exp $	 */
+/*	$NetBSD: map_object.c,v 1.61 2020/03/04 01:21:17 thorpej Exp $	 */
 
 /*
  * Copyright 1996 John D. Polstra.
@@ -34,7 +34,7 @@
 
 #include <sys/cdefs.h>
 #ifndef lint
-__RCSID("$NetBSD: map_object.c,v 1.60 2019/01/06 19:44:54 joerg Exp $");
+__RCSID("$NetBSD: map_object.c,v 1.61 2020/03/04 01:21:17 thorpej Exp $");
 #endif /* not lint */
 
 #include <errno.h>
@@ -406,8 +406,9 @@ _rtld_map_object(const char *path, int f
 	obj->relocbase = mapbase - base_vaddr;
 
 #ifdef GNU_RELRO
-	obj->relro_page = obj->relocbase + round_down(relro_page);
-	obj->relro_size = round_up(relro_size);
+	/* rounding happens later. */
+	obj->relro_page = obj->relocbase + relro_page;
+	obj->relro_size = relro_size;
 #endif
 
 	if (obj->dynamic)

Index: src/libexec/ld.elf_so/rtld.c
diff -u src/libexec/ld.elf_so/rtld.c:1.202 src/libexec/ld.elf_so/rtld.c:1.203
--- src/libexec/ld.elf_so/rtld.c:1.202	Sat Feb 29 04:23:05 2020
+++ src/libexec/ld.elf_so/rtld.c	Wed Mar  4 01:21:17 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: rtld.c,v 1.202 2020/02/29 04:23:05 kamil Exp $	 */
+/*	$NetBSD: rtld.c,v 1.203 2020/03/04 01:21:17 thorpej Exp $	 */
 
 /*
  * Copyright 1996 John D. Polstra.
@@ -40,7 +40,7 @@
 
 #include <sys/cdefs.h>
 #ifndef lint
-__RCSID("$NetBSD: rtld.c,v 1.202 2020/02/29 04:23:05 kamil Exp $");
+__RCSID("$NetBSD: rtld.c,v 1.203 2020/03/04 01:21:17 thorpej Exp $");
 #endif /* not lint */
 
 #include <sys/param.h>
@@ -1773,13 +1773,25 @@ int
 _rtld_relro(const Obj_Entry *obj, bool wantmain)
 {
 #ifdef GNU_RELRO
-	if (obj->relro_size == 0)
+	/*
+	 * If our VM page size is larger than the page size used by the
+	 * linker when laying out the object, we could end up making data
+	 * read-only that is unintended.  Detect and avoid this situation.
+	 * It may mean we are unable to protect everything we'd like, but
+	 * it's better than crashing.
+	 */
+	uintptr_t relro_end = (uintptr_t)obj->relro_page + obj->relro_size;
+	uintptr_t relro_start = round_down((uintptr_t)obj->relro_page);
+	assert(relro_end >= relro_start);
+	size_t relro_size = round_down(relro_end) - relro_start;
+
+	if (relro_size == 0)
 		return 0;
 	if (wantmain != (obj ==_rtld_objmain))
 		return 0;
 
-	dbg(("RELRO %s %p %zx\n", obj->path, obj->relro_page, obj->relro_size));
-	if (mprotect(obj->relro_page, obj->relro_size, PROT_READ) == -1) {
+	dbg(("RELRO %s %p %zx\n", obj->path, (void *)relro_start, relro_size));
+	if (mprotect((void *)relro_start, relro_size, PROT_READ) == -1) {
 		_rtld_error("%s: Cannot enforce relro " "protection: %s",
 		    obj->path, xstrerror(errno));
 		return -1;

Reply via email to