Use new flag VM_HAS_SPECIAL_PERMS for handling freeing of special
permissioned memory in vmalloc and remove places where memory was set RW
before freeing which is no longer needed. Also we no longer need a bit to
track if the memory is RO because it is tracked in vmalloc.

Cc: Daniel Borkmann <[email protected]>
Cc: Alexei Starovoitov <[email protected]>
Signed-off-by: Rick Edgecombe <[email protected]>
---
 include/linux/filter.h | 16 +++-------------
 kernel/bpf/core.c      |  1 -
 2 files changed, 3 insertions(+), 14 deletions(-)

diff --git a/include/linux/filter.h b/include/linux/filter.h
index 9cdfab7f383c..cc9581dd9c58 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -20,6 +20,7 @@
 #include <linux/set_memory.h>
 #include <linux/kallsyms.h>
 #include <linux/if_vlan.h>
+#include <linux/vmalloc.h>
 
 #include <net/sch_generic.h>
 
@@ -483,7 +484,6 @@ struct bpf_prog {
        u16                     pages;          /* Number of allocated pages */
        u16                     jited:1,        /* Is our filter JIT'ed? */
                                jit_requested:1,/* archs need to JIT the prog */
-                               undo_set_mem:1, /* Passed set_memory_ro() 
checkpoint */
                                gpl_compatible:1, /* Is filter GPL compatible? 
*/
                                cb_access:1,    /* Is control block accessed? */
                                dst_needed:1,   /* Do we need dst entry? */
@@ -681,26 +681,17 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 
size_default)
 
 static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
 {
+       set_vm_special(fp);
        set_memory_ro((unsigned long)fp, fp->pages);
 }
 
-static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
-{
-       if (fp->undo_set_mem)
-               set_memory_rw((unsigned long)fp, fp->pages);
-}
-
 static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
 {
+       set_vm_special(hdr);
        set_memory_ro((unsigned long)hdr, hdr->pages);
        set_memory_x((unsigned long)hdr, hdr->pages);
 }
 
-static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
-{
-       set_memory_rw((unsigned long)hdr, hdr->pages);
-}
-
 static inline struct bpf_binary_header *
 bpf_jit_binary_hdr(const struct bpf_prog *fp)
 {
@@ -735,7 +726,6 @@ void __bpf_prog_free(struct bpf_prog *fp);
 
 static inline void bpf_prog_unlock_free(struct bpf_prog *fp)
 {
-       bpf_prog_unlock_ro(fp);
        __bpf_prog_free(fp);
 }
 
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 19c49313c709..465c1c3623e8 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -804,7 +804,6 @@ void __weak bpf_jit_free(struct bpf_prog *fp)
        if (fp->jited) {
                struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
 
-               bpf_jit_binary_unlock_ro(hdr);
                bpf_jit_binary_free(hdr);
 
                WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
-- 
2.17.1

Reply via email to