Move reusable code from map_lookup_elem to helper functions to avoid code
duplication in kernel/bpf/syscall.c

Suggested-by: Stanislav Fomichev <s...@google.com>
Signed-off-by: Brian Vazquez <bria...@google.com>
---
 kernel/bpf/syscall.c | 134 +++++++++++++++++++++++--------------------
 1 file changed, 73 insertions(+), 61 deletions(-)

diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 7713cf39795a4..a1823a50f9be0 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -126,6 +126,76 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr 
*attr)
        return map;
 }
 
+static u32 bpf_map_value_size(struct bpf_map *map)
+{
+       if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
+           map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
+           map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
+           map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
+               return round_up(map->value_size, 8) * num_possible_cpus();
+       else if (IS_FD_MAP(map))
+               return sizeof(u32);
+       else
+               return  map->value_size;
+}
+
+static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
+                             __u64 flags)
+{
+       void *ptr;
+       int err;
+
+       if (bpf_map_is_dev_bound(map))
+               return  bpf_map_offload_lookup_elem(map, key, value);
+
+       preempt_disable();
+       this_cpu_inc(bpf_prog_active);
+       if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
+           map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
+               err = bpf_percpu_hash_copy(map, key, value);
+       } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
+               err = bpf_percpu_array_copy(map, key, value);
+       } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
+               err = bpf_percpu_cgroup_storage_copy(map, key, value);
+       } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
+               err = bpf_stackmap_copy(map, key, value);
+       } else if (IS_FD_ARRAY(map)) {
+               err = bpf_fd_array_map_lookup_elem(map, key, value);
+       } else if (IS_FD_HASH(map)) {
+               err = bpf_fd_htab_map_lookup_elem(map, key, value);
+       } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
+               err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
+       } else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
+                  map->map_type == BPF_MAP_TYPE_STACK) {
+               err = map->ops->map_peek_elem(map, value);
+       } else {
+               rcu_read_lock();
+               if (map->ops->map_lookup_elem_sys_only)
+                       ptr = map->ops->map_lookup_elem_sys_only(map, key);
+               else
+                       ptr = map->ops->map_lookup_elem(map, key);
+               if (IS_ERR(ptr)) {
+                       err = PTR_ERR(ptr);
+               } else if (!ptr) {
+                       err = -ENOENT;
+               } else {
+                       err = 0;
+                       if (flags & BPF_F_LOCK)
+                               /* lock 'ptr' and copy everything but lock */
+                               copy_map_value_locked(map, value, ptr, true);
+                       else
+                               copy_map_value(map, value, ptr);
+                       /* mask lock, since value wasn't zero inited */
+                       check_and_init_map_lock(map, value);
+               }
+               rcu_read_unlock();
+       }
+       this_cpu_dec(bpf_prog_active);
+       preempt_enable();
+
+       return err;
+}
+
 void *bpf_map_area_alloc(size_t size, int numa_node)
 {
        /* We really just want to fail instead of triggering OOM killer
@@ -729,7 +799,7 @@ static int map_lookup_elem(union bpf_attr *attr)
        void __user *uvalue = u64_to_user_ptr(attr->value);
        int ufd = attr->map_fd;
        struct bpf_map *map;
-       void *key, *value, *ptr;
+       void *key, *value;
        u32 value_size;
        struct fd f;
        int err;
@@ -761,72 +831,14 @@ static int map_lookup_elem(union bpf_attr *attr)
                goto err_put;
        }
 
-       if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
-           map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
-           map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
-           map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
-               value_size = round_up(map->value_size, 8) * num_possible_cpus();
-       else if (IS_FD_MAP(map))
-               value_size = sizeof(u32);
-       else
-               value_size = map->value_size;
+       value_size = bpf_map_value_size(map);
 
        err = -ENOMEM;
        value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
        if (!value)
                goto free_key;
 
-       if (bpf_map_is_dev_bound(map)) {
-               err = bpf_map_offload_lookup_elem(map, key, value);
-               goto done;
-       }
-
-       preempt_disable();
-       this_cpu_inc(bpf_prog_active);
-       if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
-           map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
-               err = bpf_percpu_hash_copy(map, key, value);
-       } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
-               err = bpf_percpu_array_copy(map, key, value);
-       } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
-               err = bpf_percpu_cgroup_storage_copy(map, key, value);
-       } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
-               err = bpf_stackmap_copy(map, key, value);
-       } else if (IS_FD_ARRAY(map)) {
-               err = bpf_fd_array_map_lookup_elem(map, key, value);
-       } else if (IS_FD_HASH(map)) {
-               err = bpf_fd_htab_map_lookup_elem(map, key, value);
-       } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
-               err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
-       } else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
-                  map->map_type == BPF_MAP_TYPE_STACK) {
-               err = map->ops->map_peek_elem(map, value);
-       } else {
-               rcu_read_lock();
-               if (map->ops->map_lookup_elem_sys_only)
-                       ptr = map->ops->map_lookup_elem_sys_only(map, key);
-               else
-                       ptr = map->ops->map_lookup_elem(map, key);
-               if (IS_ERR(ptr)) {
-                       err = PTR_ERR(ptr);
-               } else if (!ptr) {
-                       err = -ENOENT;
-               } else {
-                       err = 0;
-                       if (attr->flags & BPF_F_LOCK)
-                               /* lock 'ptr' and copy everything but lock */
-                               copy_map_value_locked(map, value, ptr, true);
-                       else
-                               copy_map_value(map, value, ptr);
-                       /* mask lock, since value wasn't zero inited */
-                       check_and_init_map_lock(map, value);
-               }
-               rcu_read_unlock();
-       }
-       this_cpu_dec(bpf_prog_active);
-       preempt_enable();
-
-done:
+       err = bpf_map_copy_value(map, key, value, attr->flags);
        if (err)
                goto free_value;
 
-- 
2.22.0.410.gd8fdbe21b5-goog

Reply via email to