Re: [PATCH bpf-next v3 2/3] bpf: do not walk twice the hash map on free

2024-04-30 Thread Kumar Kartikeya Dwivedi
On Tue, 30 Apr 2024 at 12:44, Benjamin Tissoires  wrote:
>
> If someone stores both a timer and a workqueue in a hash map, on free, we
> would walk it twice.
> Add a check in htab_free_malloced_timers_or_wq and free the timers
> and workqueues if they are present.
>
> Fixes: 246331e3f1ea ("bpf: allow struct bpf_wq to be embedded in arraymaps 
> and hashmaps")
> Signed-off-by: Benjamin Tissoires 
>
> ---

I had forgotten how the extra_elems logic is working, turns out
everything is in the preallocated elems array and per-cpu extra_elems
stores the *pointer* to them for recycling without hitting the pcpu
allocator, so should be fine (just in case this confuses anyone else).

>
> changes in v3:
> - fix unloading of bpf_wq, again
>
> changes in v2:
> - fix wq being not freed (and static call not used)
> ---

Acked-by: Kumar Kartikeya Dwivedi 



[PATCH bpf-next v3 2/3] bpf: do not walk twice the hash map on free

2024-04-30 Thread Benjamin Tissoires
If someone stores both a timer and a workqueue in a hash map, on free, we
would walk it twice.
Add a check in htab_free_malloced_timers_or_wq and free the timers
and workqueues if they are present.

Fixes: 246331e3f1ea ("bpf: allow struct bpf_wq to be embedded in arraymaps and 
hashmaps")
Signed-off-by: Benjamin Tissoires 

---

changes in v3:
- fix unloading of bpf_wq, again

changes in v2:
- fix wq being not freed (and static call not used)
---
 kernel/bpf/hashtab.c | 49 +
 1 file changed, 13 insertions(+), 36 deletions(-)

diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 0179183c543a..06115f8728e8 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -221,13 +221,11 @@ static bool htab_has_extra_elems(struct bpf_htab *htab)
return !htab_is_percpu(htab) && !htab_is_lru(htab);
 }
 
-static void htab_free_prealloced_timers(struct bpf_htab *htab)
+static void htab_free_prealloced_timers_and_wq(struct bpf_htab *htab)
 {
u32 num_entries = htab->map.max_entries;
int i;
 
-   if (!btf_record_has_field(htab->map.record, BPF_TIMER))
-   return;
if (htab_has_extra_elems(htab))
num_entries += num_possible_cpus();
 
@@ -235,27 +233,12 @@ static void htab_free_prealloced_timers(struct bpf_htab 
*htab)
struct htab_elem *elem;
 
elem = get_htab_elem(htab, i);
-   bpf_obj_free_timer(htab->map.record, elem->key + 
round_up(htab->map.key_size, 8));
-   cond_resched();
-   }
-}
-
-static void htab_free_prealloced_wq(struct bpf_htab *htab)
-{
-   u32 num_entries = htab->map.max_entries;
-   int i;
-
-   if (!btf_record_has_field(htab->map.record, BPF_WORKQUEUE))
-   return;
-   if (htab_has_extra_elems(htab))
-   num_entries += num_possible_cpus();
-
-   for (i = 0; i < num_entries; i++) {
-   struct htab_elem *elem;
-
-   elem = get_htab_elem(htab, i);
-   bpf_obj_free_workqueue(htab->map.record,
-  elem->key + round_up(htab->map.key_size, 
8));
+   if (btf_record_has_field(htab->map.record, BPF_TIMER))
+   bpf_obj_free_timer(htab->map.record,
+  elem->key + 
round_up(htab->map.key_size, 8));
+   if (btf_record_has_field(htab->map.record, BPF_WORKQUEUE))
+   bpf_obj_free_workqueue(htab->map.record,
+  elem->key + 
round_up(htab->map.key_size, 8));
cond_resched();
}
 }
@@ -1515,7 +1498,7 @@ static void delete_all_elements(struct bpf_htab *htab)
migrate_enable();
 }
 
-static void htab_free_malloced_timers_or_wq(struct bpf_htab *htab, bool 
is_timer)
+static void htab_free_malloced_timers_and_wq(struct bpf_htab *htab)
 {
int i;
 
@@ -1527,10 +1510,10 @@ static void htab_free_malloced_timers_or_wq(struct 
bpf_htab *htab, bool is_timer
 
hlist_nulls_for_each_entry(l, n, head, hash_node) {
/* We only free timer on uref dropping to zero */
-   if (is_timer)
+   if (btf_record_has_field(htab->map.record, BPF_TIMER))
bpf_obj_free_timer(htab->map.record,
   l->key + 
round_up(htab->map.key_size, 8));
-   else
+   if (btf_record_has_field(htab->map.record, 
BPF_WORKQUEUE))
bpf_obj_free_workqueue(htab->map.record,
   l->key + 
round_up(htab->map.key_size, 8));
}
@@ -1544,17 +1527,11 @@ static void htab_map_free_timers_and_wq(struct bpf_map 
*map)
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
 
/* We only free timer and workqueue on uref dropping to zero */
-   if (btf_record_has_field(htab->map.record, BPF_TIMER)) {
-   if (!htab_is_prealloc(htab))
-   htab_free_malloced_timers_or_wq(htab, true);
-   else
-   htab_free_prealloced_timers(htab);
-   }
-   if (btf_record_has_field(htab->map.record, BPF_WORKQUEUE)) {
+   if (btf_record_has_field(htab->map.record, BPF_TIMER | BPF_WORKQUEUE)) {
if (!htab_is_prealloc(htab))
-   htab_free_malloced_timers_or_wq(htab, false);
+   htab_free_malloced_timers_and_wq(htab);
else
-   htab_free_prealloced_wq(htab);
+   htab_free_prealloced_timers_and_wq(htab);
}
 }
 

-- 
2.44.0