On 23.03.21 08:23, Dmitry Vyukov wrote:
>> diff --git a/kernel/kcov.c b/kernel/kcov.c
>> index 80bfe71bbe13..1f727043146a 100644
>> --- a/kernel/kcov.c
>> +++ b/kernel/kcov.c
>> @@ -24,6 +24,7 @@
>> #include <linux/refcount.h>
>> #include <linux/log2.h>
>> #include <asm/setup.h>
>> +#include <asm/sections.h>
>
> Is this for __always_inline?
> __always_inline is defined in include/linux/compiler_types.h.
>
This is for the symbols marking start and end of the text segment
(_stext/_etext).
>
>>
>> #define kcov_debug(fmt, ...) pr_debug("%s: " fmt, __func__, ##__VA_ARGS__)
>>
>> @@ -151,10 +152,8 @@ static void kcov_remote_area_put(struct
>> kcov_remote_area *area,
>> list_add(&area->list, &kcov_remote_areas);
>> }
>>
>> -static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct
>> task_struct *t)
>> +static __always_inline notrace bool check_kcov_mode(enum kcov_mode
>> needed_mode, struct task_struct *t, unsigned int *mode)
>> {
>> - unsigned int mode;
>> -
>> /*
>> * We are interested in code coverage as a function of a syscall
>> inputs,
>> * so we ignore code executed in interrupts, unless we are in a
>> remote
>> @@ -162,7 +161,7 @@ static notrace bool check_kcov_mode(enum kcov_mode
>> needed_mode, struct task_stru
>> */
>> if (!in_task() && !(in_serving_softirq() && t->kcov_softirq))
>> return false;
>> - mode = READ_ONCE(t->kcov_mode);
>> + *mode = READ_ONCE(t->kcov_mode);
>> /*
>> * There is some code that runs in interrupts but for which
>> * in_interrupt() returns false (e.g. preempt_schedule_irq()).
>> @@ -171,7 +170,7 @@ static notrace bool check_kcov_mode(enum kcov_mode
>> needed_mode, struct task_stru
>> * kcov_start().
>> */
>> barrier();
>> - return mode == needed_mode;
>> + return ((int)(*mode & (KCOV_IN_CTXSW | needed_mode))) > 0;
>
> This logic and the rest of the patch looks good to me.
>
> Thanks
Thx.
>
>> }
>>
>> static notrace unsigned long canonicalize_ip(unsigned long ip)
>> @@ -191,18 +190,27 @@ void notrace __sanitizer_cov_trace_pc(void)
>> struct task_struct *t;
>> unsigned long *area;
>> unsigned long ip = canonicalize_ip(_RET_IP_);
>> - unsigned long pos;
>> + unsigned long pos, idx;
>> + unsigned int mode;
>>
>> t = current;
>> - if (!check_kcov_mode(KCOV_MODE_TRACE_PC, t))
>> + if (!check_kcov_mode(KCOV_MODE_TRACE_PC | KCOV_MODE_UNIQUE_PC, t,
>> &mode))
>> return;
>>
>> area = t->kcov_area;
>> - /* The first 64-bit word is the number of subsequent PCs. */
>> - pos = READ_ONCE(area[0]) + 1;
>> - if (likely(pos < t->kcov_size)) {
>> - area[pos] = ip;
>> - WRITE_ONCE(area[0], pos);
>> + if (likely(mode == KCOV_MODE_TRACE_PC)) {
>> + /* The first 64-bit word is the number of subsequent PCs. */
>> + pos = READ_ONCE(area[0]) + 1;
>> + if (likely(pos < t->kcov_size)) {
>> + area[pos] = ip;
>> + WRITE_ONCE(area[0], pos);
>> + }
>> + } else {
>> + idx = (ip - canonicalize_ip((unsigned long)&_stext)) / 4;
>> + pos = idx % BITS_PER_LONG;
>> + idx /= BITS_PER_LONG;
>> + if (likely(idx < t->kcov_size))
>> + WRITE_ONCE(area[idx], READ_ONCE(area[idx]) | 1L <<
>> pos);
>> }
>> }
>> EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
>> @@ -213,9 +221,10 @@ static void notrace write_comp_data(u64 type, u64 arg1,
>> u64 arg2, u64 ip)
>> struct task_struct *t;
>> u64 *area;
>> u64 count, start_index, end_pos, max_pos;
>> + unsigned int mode;
>>
>> t = current;
>> - if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t))
>> + if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t, &mode))
>> return;
>>
>> ip = canonicalize_ip(ip);
>> @@ -362,7 +371,7 @@ void kcov_task_init(struct task_struct *t)
>> static void kcov_reset(struct kcov *kcov)
>> {
>> kcov->t = NULL;
>> - kcov->mode = KCOV_MODE_INIT;
>> + kcov->mode = KCOV_MODE_INIT_TRACE;
>> kcov->remote = false;
>> kcov->remote_size = 0;
>> kcov->sequence++;
>> @@ -468,12 +477,13 @@ static int kcov_mmap(struct file *filep, struct
>> vm_area_struct *vma)
>>
>> spin_lock_irqsave(&kcov->lock, flags);
>> size = kcov->size * sizeof(unsigned long);
>> - if (kcov->mode != KCOV_MODE_INIT || vma->vm_pgoff != 0 ||
>> + if (kcov->mode & ~(KCOV_INIT_TRACE | KCOV_INIT_UNIQUE) ||
>> vma->vm_pgoff != 0 ||
>> vma->vm_end - vma->vm_start != size) {
>> res = -EINVAL;
>> goto exit;
>> }
>> if (!kcov->area) {
>> + kcov_debug("mmap(): Allocating 0x%lx bytes\n", size);
>> kcov->area = area;
>> vma->vm_flags |= VM_DONTEXPAND;
>> spin_unlock_irqrestore(&kcov->lock, flags);
>> @@ -515,6 +525,8 @@ static int kcov_get_mode(unsigned long arg)
>> {
>> if (arg == KCOV_TRACE_PC)
>> return KCOV_MODE_TRACE_PC;
>> + else if (arg == KCOV_UNIQUE_PC)
>> + return KCOV_MODE_UNIQUE_PC;
>> else if (arg == KCOV_TRACE_CMP)
>> #ifdef CONFIG_KCOV_ENABLE_COMPARISONS
>> return KCOV_MODE_TRACE_CMP;
>> @@ -562,12 +574,14 @@ static int kcov_ioctl_locked(struct kcov *kcov,
>> unsigned int cmd,
>> {
>> struct task_struct *t;
>> unsigned long size, unused;
>> - int mode, i;
>> + int mode, i, text_size, ret = 0;
>> struct kcov_remote_arg *remote_arg;
>> struct kcov_remote *remote;
>> unsigned long flags;
>>
>> switch (cmd) {
>> + case KCOV_INIT_UNIQUE:
>> + /* fallthrough here */
>
> Looking at "git log --grep fallthrough", it seems that the modern way
> to say this is to use the fallthrough keyword.
>
> Please run checkpatch, it shows a bunch of other warnings as well:
>
> git diff HEAD^ | scripts/checkpatch.pl -
Yeah. I'll do that.
--
Alexander Lochmann PGP key: 0xBC3EF6FD
Heiliger Weg 72 phone: +49.231.28053964
D-44141 Dortmund mobile: +49.151.15738323