Use the new callback to perform allocation checks for hash maps.

Signed-off-by: Jakub Kicinski <jakub.kicin...@netronome.com>
Reviewed-by: Quentin Monnet <quentin.mon...@netronome.com>
---
 kernel/bpf/hashtab.c | 55 +++++++++++++++++++++++++++++++++++++---------------
 1 file changed, 39 insertions(+), 16 deletions(-)

diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index b80f42adf068..7fd6519444d3 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -227,7 +227,7 @@ static int alloc_extra_elems(struct bpf_htab *htab)
 }
 
 /* Called from syscall */
-static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
+static int htab_map_alloc_check(union bpf_attr *attr)
 {
        bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
                       attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
@@ -241,9 +241,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
        bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
        bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
        int numa_node = bpf_map_attr_numa_node(attr);
-       struct bpf_htab *htab;
-       int err, i;
-       u64 cost;
 
        BUILD_BUG_ON(offsetof(struct htab_elem, htab) !=
                     offsetof(struct htab_elem, hash_node.pprev));
@@ -254,33 +251,33 @@ static struct bpf_map *htab_map_alloc(union bpf_attr 
*attr)
                /* LRU implementation is much complicated than other
                 * maps.  Hence, limit to CAP_SYS_ADMIN for now.
                 */
-               return ERR_PTR(-EPERM);
+               return -EPERM;
 
        if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK)
                /* reserved bits should not be used */
-               return ERR_PTR(-EINVAL);
+               return -EINVAL;
 
        if (!lru && percpu_lru)
-               return ERR_PTR(-EINVAL);
+               return -EINVAL;
 
        if (lru && !prealloc)
-               return ERR_PTR(-ENOTSUPP);
+               return -ENOTSUPP;
 
        if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru))
-               return ERR_PTR(-EINVAL);
+               return -EINVAL;
 
        /* check sanity of attributes.
         * value_size == 0 may be allowed in the future to use map as a set
         */
        if (attr->max_entries == 0 || attr->key_size == 0 ||
            attr->value_size == 0)
-               return ERR_PTR(-EINVAL);
+               return -EINVAL;
 
        if (attr->key_size > MAX_BPF_STACK)
                /* eBPF programs initialize keys on stack, so they cannot be
                 * larger than max stack size
                 */
-               return ERR_PTR(-E2BIG);
+               return -E2BIG;
 
        if (attr->value_size >= KMALLOC_MAX_SIZE -
            MAX_BPF_STACK - sizeof(struct htab_elem))
@@ -289,7 +286,28 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
                 * sure that the elem_size doesn't overflow and it's
                 * kmalloc-able later in htab_map_update_elem()
                 */
-               return ERR_PTR(-E2BIG);
+               return -E2BIG;
+
+       return 0;
+}
+
+static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
+{
+       bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
+                      attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
+       bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
+                   attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
+       /* percpu_lru means each cpu has its own LRU list.
+        * it is different from BPF_MAP_TYPE_PERCPU_HASH where
+        * the map's value itself is percpu.  percpu_lru has
+        * nothing to do with the map's value.
+        */
+       bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
+       bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
+       int numa_node = bpf_map_attr_numa_node(attr);
+       struct bpf_htab *htab;
+       int err, i;
+       u64 cost;
 
        htab = kzalloc(sizeof(*htab), GFP_USER);
        if (!htab)
@@ -1142,6 +1160,7 @@ static void htab_map_free(struct bpf_map *map)
 }
 
 const struct bpf_map_ops htab_map_ops = {
+       .map_alloc_check = htab_map_alloc_check,
        .map_alloc = htab_map_alloc,
        .map_free = htab_map_free,
        .map_get_next_key = htab_map_get_next_key,
@@ -1152,6 +1171,7 @@ const struct bpf_map_ops htab_map_ops = {
 };
 
 const struct bpf_map_ops htab_lru_map_ops = {
+       .map_alloc_check = htab_map_alloc_check,
        .map_alloc = htab_map_alloc,
        .map_free = htab_map_free,
        .map_get_next_key = htab_map_get_next_key,
@@ -1235,6 +1255,7 @@ int bpf_percpu_hash_update(struct bpf_map *map, void 
*key, void *value,
 }
 
 const struct bpf_map_ops htab_percpu_map_ops = {
+       .map_alloc_check = htab_map_alloc_check,
        .map_alloc = htab_map_alloc,
        .map_free = htab_map_free,
        .map_get_next_key = htab_map_get_next_key,
@@ -1244,6 +1265,7 @@ const struct bpf_map_ops htab_percpu_map_ops = {
 };
 
 const struct bpf_map_ops htab_lru_percpu_map_ops = {
+       .map_alloc_check = htab_map_alloc_check,
        .map_alloc = htab_map_alloc,
        .map_free = htab_map_free,
        .map_get_next_key = htab_map_get_next_key,
@@ -1252,11 +1274,11 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = {
        .map_delete_elem = htab_lru_map_delete_elem,
 };
 
-static struct bpf_map *fd_htab_map_alloc(union bpf_attr *attr)
+static int fd_htab_map_alloc_check(union bpf_attr *attr)
 {
        if (attr->value_size != sizeof(u32))
-               return ERR_PTR(-EINVAL);
-       return htab_map_alloc(attr);
+               return -EINVAL;
+       return htab_map_alloc_check(attr);
 }
 
 static void fd_htab_map_free(struct bpf_map *map)
@@ -1327,7 +1349,7 @@ static struct bpf_map *htab_of_map_alloc(union bpf_attr 
*attr)
        if (IS_ERR(inner_map_meta))
                return inner_map_meta;
 
-       map = fd_htab_map_alloc(attr);
+       map = htab_map_alloc(attr);
        if (IS_ERR(map)) {
                bpf_map_meta_free(inner_map_meta);
                return map;
@@ -1371,6 +1393,7 @@ static void htab_of_map_free(struct bpf_map *map)
 }
 
 const struct bpf_map_ops htab_of_maps_map_ops = {
+       .map_alloc_check = fd_htab_map_alloc_check,
        .map_alloc = htab_of_map_alloc,
        .map_free = htab_of_map_free,
        .map_get_next_key = htab_map_get_next_key,
-- 
2.15.1

Reply via email to