On Mon, Apr 01, 2019 at 10:58:19AM +0200, Daniel Bristot de Oliveira wrote:
> diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
> index 8aa65fbbd764..ab75b222a7e2 100644
> --- a/arch/x86/kernel/jump_label.c
> +++ b/arch/x86/kernel/jump_label.c
> @@ -15,6 +15,7 @@
>  #include <asm/kprobes.h>
>  #include <asm/alternative.h>
>  #include <asm/text-patching.h>
> +#include <linux/slab.h>
>  
>  union jump_code_union {
>       char code[JUMP_LABEL_NOP_SIZE];
> @@ -111,6 +112,93 @@ void arch_jump_label_transform(struct jump_entry *entry,
>       mutex_unlock(&text_mutex);
>  }
>  
> +unsigned int entry_vector_max_elem __read_mostly;
> +struct text_patch_loc *entry_vector;
> +unsigned int entry_vector_nr_elem;
> +
> +void arch_jump_label_init(void)
> +{
> +     entry_vector = (void *) __get_free_page(GFP_KERNEL);
> +
> +     if (WARN_ON_ONCE(!entry_vector))
> +             return;
> +
> +     entry_vector_max_elem = PAGE_SIZE / sizeof(struct text_patch_loc);
> +     return;
> +}
> +
> +int arch_jump_label_transform_queue(struct jump_entry *entry,
> +                                  enum jump_label_type type)
> +{
> +     struct text_patch_loc *tp;
> +     void *entry_code;
> +
> +     /*
> +      * Batch mode disabled before being able to allocate memory:
> +      * Fallback to the non-batching mode.
> +      */
> +     if (unlikely(!entry_vector_max_elem)) {
> +             if (!slab_is_available() || early_boot_irqs_disabled)

See, the thing is, you never use slab, so that slab check is completely
wrong.

> +                     goto fallback;
> +
> +             arch_jump_label_init();
> +     }
> +
> +     /*
> +      * No more space in the vector, tell upper layer to apply
> +      * the queue before continuing.
> +      */
> +     if (entry_vector_nr_elem == entry_vector_max_elem)
> +             return -ENOSPC;
> +
> +     tp = &entry_vector[entry_vector_nr_elem];
> +
> +     entry_code = (void *)jump_entry_code(entry);
> +
> +     /*
> +      * The int3 handler will do a bsearch in the queue, so we need entries
> +      * to be sorted. We can survive an unsorted list by rejecting the entry,
> +      * forcing the generic jump_label code to apply the queue. Warning once,
> +      * to raise the attention to the case of an unsorted entry that is
> +      * better not happen, because, in the worst case we will perform in the
> +      * same way as we do without batching - with some more overhead.
> +      */
> +     if (entry_vector_nr_elem > 0) {
> +             int prev_idx = entry_vector_nr_elem - 1;
> +             struct text_patch_loc *prev_tp = &entry_vector[prev_idx];
> +
> +             if (WARN_ON_ONCE(prev_tp->addr > entry_code))
> +                     return -EINVAL;
> +     }
> +
> +     __jump_label_set_jump_code(entry, type,
> +                                (union jump_code_union *) &tp->opcode, 0);
> +
> +     tp->addr = entry_code;
> +     tp->detour = entry_code + JUMP_LABEL_NOP_SIZE;
> +     tp->len = JUMP_LABEL_NOP_SIZE;
> +
> +     entry_vector_nr_elem++;
> +
> +     return 0;
> +
> +fallback:
> +     arch_jump_label_transform(entry, type);
> +     return 0;
> +}

So how about we do something like:

+static struct bp_patching_desc {
+       int nr_entries;
+       struct text_patch_loc vec[PAGE_SIZE / sizeof(struct text_patch_loc)];
+} bp_patching;

and call it a day?

Then we have static storage, no allocation, no fail paths.

Also note that I removed that whole in_progress thing, as that is
completely redudant vs !!nr_entries.

Reply via email to