The user space event FD from perf_event_open() syscall is converted
to the pointer to struct perf event and stored in map.

Signed-off-by: kaixu xia <xiaka...@huawei.com>
---
 include/linux/perf_event.h |    2 ++
 kernel/bpf/syscall.c       |   68 ++++++++++++++++++++++++++++++++++++++++++++
 kernel/events/core.c       |   22 ++++++++++++++
 3 files changed, 92 insertions(+)

diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 2027809..2ea4067 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -641,6 +641,7 @@ extern int perf_event_init_task(struct task_struct *child);
 extern void perf_event_exit_task(struct task_struct *child);
 extern void perf_event_free_task(struct task_struct *task);
 extern void perf_event_delayed_put(struct task_struct *task);
+extern struct perf_event *perf_event_get(unsigned int fd);
 extern void perf_event_print_debug(void);
 extern void perf_pmu_disable(struct pmu *pmu);
 extern void perf_pmu_enable(struct pmu *pmu);
@@ -979,6 +980,7 @@ static inline int perf_event_init_task(struct task_struct 
*child)   { return 0; }
 static inline void perf_event_exit_task(struct task_struct *child)     { }
 static inline void perf_event_free_task(struct task_struct *task)      { }
 static inline void perf_event_delayed_put(struct task_struct *task)    { }
+static struct perf_event *perf_event_get(unsigned int fd)              { 
return NULL; }
 static inline void perf_event_print_debug(void)                                
{ }
 static inline int perf_event_task_disable(void)                                
{ return -EINVAL; }
 static inline int perf_event_task_enable(void)                         { 
return -EINVAL; }
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 4c2d9e6..ac76792 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -17,6 +17,7 @@
 #include <linux/license.h>
 #include <linux/filter.h>
 #include <linux/version.h>
+#include <linux/perf_event.h>
 
 static LIST_HEAD(bpf_map_types);
 
@@ -65,6 +66,19 @@ void bpf_map_put(struct bpf_map *map)
        }
 }
 
+static int bpf_map_perf_event_put(void *value)
+{
+       struct perf_event *event;
+
+       event = (struct perf_event *)(*(unsigned long *)value);
+       if (!event)
+               return -EBADF;
+
+       perf_event_release_kernel(event);
+
+       return 0;
+}
+
 static int bpf_map_release(struct inode *inode, struct file *filp)
 {
        struct bpf_map *map = filp->private_data;
@@ -75,6 +89,13 @@ static int bpf_map_release(struct inode *inode, struct file 
*filp)
                 */
                bpf_prog_array_map_clear(map);
 
+       if (map->flags & BPF_MAP_FLAG_PERF_EVENT) {
+               rcu_read_lock();
+               if (map->ops->map_traverse_elem(bpf_map_perf_event_put, map) < 
0)
+                       return -EINVAL;
+               rcu_read_unlock();
+       }
+
        bpf_map_put(map);
        return 0;
 }
@@ -176,6 +197,10 @@ static int map_lookup_elem(union bpf_attr *attr)
        if (IS_ERR(map))
                return PTR_ERR(map);
 
+       if (map->flags & BPF_MAP_FLAG_PERF_EVENT)
+               /* prevent user space from reading elem for PMU map */
+               return -EACCES;
+
        err = -ENOMEM;
        key = kmalloc(map->key_size, GFP_USER);
        if (!key)
@@ -215,6 +240,39 @@ err_put:
        return err;
 }
 
+static int replace_map_with_perf_event(void *value)
+{
+       struct perf_event *event;
+       u32 fd;
+
+       fd = *(u32 *)value;
+
+       event = perf_event_get(fd);
+       if (IS_ERR(event))
+               return PTR_ERR(event);
+
+       if (atomic_long_inc_not_zero(&event->refcount))
+               memcpy(value, &event, sizeof(struct perf_event *));
+       else
+               return -ENOENT;
+
+       return 0;
+}
+
+static bool check_map_perf_event_stored(struct bpf_map *map, void *key)
+{
+       void *value;
+       bool is_stored = false;
+
+       rcu_read_lock();
+       value = map->ops->map_lookup_elem(map, key);
+       if (value && (*(unsigned long *)value))
+               is_stored = true;
+       rcu_read_unlock();
+
+       return is_stored;
+}
+
 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
 
 static int map_update_elem(union bpf_attr *attr)
@@ -252,6 +310,16 @@ static int map_update_elem(union bpf_attr *attr)
        if (copy_from_user(value, uvalue, map->value_size) != 0)
                goto free_value;
 
+       if (map->flags & BPF_MAP_FLAG_PERF_EVENT) {
+               err = -EINVAL;
+               if (check_map_perf_event_stored(map, key))
+                       goto free_value;
+
+               err = -EBADF;
+               if (replace_map_with_perf_event(value) != 0)
+                       goto free_value;
+       }
+
        /* eBPF program that use maps are running under rcu_read_lock(),
         * therefore all map accessors rely on this fact, so do the same here
         */
diff --git a/kernel/events/core.c b/kernel/events/core.c
index e965cfa..c4e34b7 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -8582,6 +8582,28 @@ void perf_event_delayed_put(struct task_struct *task)
                WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
 }
 
+struct perf_event *perf_event_get(unsigned int fd)
+{
+       struct perf_event *event;
+       struct fd f;
+
+       f = fdget(fd);
+
+       if (!f.file)
+               return ERR_PTR(-EBADF);
+
+       if (f.file->f_op != &perf_fops) {
+               fdput(f);
+               return ERR_PTR(-EINVAL);
+       }
+
+       event = f.file->private_data;
+
+       fdput(f);
+
+       return event;
+}
+
 /*
  * inherit a event from parent task to child task:
  */
-- 
1.7.10.4

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to