tree:   https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/mm
head:   3950746d9d8ef981c1cb842384e0e86e8d1aad76
commit: c7b6f29b6257532792fc722b68fcc0e00b5a856c [14/35] bpf: Fail 
bpf_probe_write_user() while mm is switched
config: s390-defconfig (attached as .config)
compiler: s390x-linux-gnu-gcc (Debian 7.2.0-11) 7.2.0
reproduce:
        wget 
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O 
~/bin/make.cross
        chmod +x ~/bin/make.cross
        git checkout c7b6f29b6257532792fc722b68fcc0e00b5a856c
        # save the attached .config to linux build tree
        GCC_VERSION=7.2.0 make.cross ARCH=s390 

If you fix the issue, kindly add following tag
Reported-by: kbuild test robot <l...@intel.com>

All errors (new ones prefixed by >>):

   In file included from include/linux/kernel.h:11:0,
                    from kernel/trace/bpf_trace.c:5:
   kernel/trace/bpf_trace.c: In function '____bpf_probe_write_user':
>> kernel/trace/bpf_trace.c:179:16: error: implicit declaration of function 
>> 'nmi_uaccess_okay'; did you mean '__access_ok'? 
>> [-Werror=implicit-function-declaration]
     if (unlikely(!nmi_uaccess_okay()))
                   ^
   include/linux/compiler.h:77:42: note: in definition of macro 'unlikely'
    # define unlikely(x) __builtin_expect(!!(x), 0)
                                             ^
   cc1: some warnings being treated as errors

vim +179 kernel/trace/bpf_trace.c

   > 5  #include <linux/kernel.h>
     6  #include <linux/types.h>
     7  #include <linux/slab.h>
     8  #include <linux/bpf.h>
     9  #include <linux/bpf_perf_event.h>
    10  #include <linux/filter.h>
    11  #include <linux/uaccess.h>
    12  #include <linux/ctype.h>
    13  #include <linux/kprobes.h>
    14  #include <linux/syscalls.h>
    15  #include <linux/error-injection.h>
    16  
    17  #include <asm/tlb.h>
    18  
    19  #include "trace_probe.h"
    20  #include "trace.h"
    21  
    22  #ifdef CONFIG_MODULES
    23  struct bpf_trace_module {
    24          struct module *module;
    25          struct list_head list;
    26  };
    27  
    28  static LIST_HEAD(bpf_trace_modules);
    29  static DEFINE_MUTEX(bpf_module_mutex);
    30  
    31  static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const 
char *name)
    32  {
    33          struct bpf_raw_event_map *btp, *ret = NULL;
    34          struct bpf_trace_module *btm;
    35          unsigned int i;
    36  
    37          mutex_lock(&bpf_module_mutex);
    38          list_for_each_entry(btm, &bpf_trace_modules, list) {
    39                  for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
    40                          btp = &btm->module->bpf_raw_events[i];
    41                          if (!strcmp(btp->tp->name, name)) {
    42                                  if (try_module_get(btm->module))
    43                                          ret = btp;
    44                                  goto out;
    45                          }
    46                  }
    47          }
    48  out:
    49          mutex_unlock(&bpf_module_mutex);
    50          return ret;
    51  }
    52  #else
    53  static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const 
char *name)
    54  {
    55          return NULL;
    56  }
    57  #endif /* CONFIG_MODULES */
    58  
    59  u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
    60  u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
    61  
    62  /**
    63   * trace_call_bpf - invoke BPF program
    64   * @call: tracepoint event
    65   * @ctx: opaque context pointer
    66   *
    67   * kprobe handlers execute BPF programs via this helper.
    68   * Can be used from static tracepoints in the future.
    69   *
    70   * Return: BPF programs always return an integer which is interpreted by
    71   * kprobe handler as:
    72   * 0 - return from kprobe (event is filtered out)
    73   * 1 - store kprobe event into ring buffer
    74   * Other values are reserved and currently alias to 1
    75   */
    76  unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
    77  {
    78          unsigned int ret;
    79  
    80          if (in_nmi()) /* not supported yet */
    81                  return 1;
    82  
    83          preempt_disable();
    84  
    85          if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
    86                  /*
    87                   * since some bpf program is already running on this 
cpu,
    88                   * don't call into another bpf program (same or 
different)
    89                   * and don't send kprobe event into ring-buffer,
    90                   * so return zero here
    91                   */
    92                  ret = 0;
    93                  goto out;
    94          }
    95  
    96          /*
    97           * Instead of moving 
rcu_read_lock/rcu_dereference/rcu_read_unlock
    98           * to all call sites, we did a bpf_prog_array_valid() there to 
check
    99           * whether call->prog_array is empty or not, which is
   100           * a heurisitc to speed up execution.
   101           *
   102           * If bpf_prog_array_valid() fetched prog_array was
   103           * non-NULL, we go into trace_call_bpf() and do the actual
   104           * proper rcu_dereference() under RCU lock.
   105           * If it turns out that prog_array is NULL then, we bail out.
   106           * For the opposite, if the bpf_prog_array_valid() fetched 
pointer
   107           * was NULL, you'll skip the prog_array with the risk of missing
   108           * out of events when it was updated in between this and the
   109           * rcu_dereference() which is accepted risk.
   110           */
   111          ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, 
BPF_PROG_RUN);
   112  
   113   out:
   114          __this_cpu_dec(bpf_prog_active);
   115          preempt_enable();
   116  
   117          return ret;
   118  }
   119  EXPORT_SYMBOL_GPL(trace_call_bpf);
   120  
   121  #ifdef CONFIG_BPF_KPROBE_OVERRIDE
   122  BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, 
rc)
   123  {
   124          regs_set_return_value(regs, rc);
   125          override_function_with_return(regs);
   126          return 0;
   127  }
   128  
   129  static const struct bpf_func_proto bpf_override_return_proto = {
   130          .func           = bpf_override_return,
   131          .gpl_only       = true,
   132          .ret_type       = RET_INTEGER,
   133          .arg1_type      = ARG_PTR_TO_CTX,
   134          .arg2_type      = ARG_ANYTHING,
   135  };
   136  #endif
   137  
   138  BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, 
unsafe_ptr)
   139  {
   140          int ret;
   141  
   142          ret = probe_kernel_read(dst, unsafe_ptr, size);
   143          if (unlikely(ret < 0))
   144                  memset(dst, 0, size);
   145  
   146          return ret;
   147  }
   148  
   149  static const struct bpf_func_proto bpf_probe_read_proto = {
   150          .func           = bpf_probe_read,
   151          .gpl_only       = true,
   152          .ret_type       = RET_INTEGER,
   153          .arg1_type      = ARG_PTR_TO_UNINIT_MEM,
   154          .arg2_type      = ARG_CONST_SIZE_OR_ZERO,
   155          .arg3_type      = ARG_ANYTHING,
   156  };
   157  
   158  BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src,
   159             u32, size)
   160  {
   161          /*
   162           * Ensure we're in user context which is safe for the helper to
   163           * run. This helper has no business in a kthread.
   164           *
   165           * access_ok() should prevent writing to non-user memory, but in
   166           * some situations (nommu, temporary switch, etc) access_ok() 
does
   167           * not provide enough validation, hence the check on KERNEL_DS.
   168           *
   169           * nmi_uaccess_okay() ensures the probe is not run in an interim
   170           * state, when the task or mm are switched. This is specifically
   171           * required to prevent the use of temporary mm.
   172           */
   173  
   174          if (unlikely(in_interrupt() ||
   175                       current->flags & (PF_KTHREAD | PF_EXITING)))
   176                  return -EPERM;
   177          if (unlikely(uaccess_kernel()))
   178                  return -EPERM;
 > 179          if (unlikely(!nmi_uaccess_okay()))
   180                  return -EPERM;
   181          if (!access_ok(unsafe_ptr, size))
   182                  return -EPERM;
   183  
   184          return probe_kernel_write(unsafe_ptr, src, size);
   185  }
   186  

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

Attachment: .config.gz
Description: application/gzip

Reply via email to