This is a preparatory commit for the upcoming addition of a new hardware tag-based (MTE-based) KASAN mode.
Rework print_memory_metadata() to make it agnostic with regard to the way metadata is stored. Allow providing a separate metadata_fetch_row() implementation for each KASAN mode. Hardware tag-based KASAN will provide its own implementation that doesn't use shadow memory. No functional changes for software modes. Signed-off-by: Andrey Konovalov <andreyk...@google.com> Signed-off-by: Vincenzo Frascino <vincenzo.frasc...@arm.com> Reviewed-by: Marco Elver <el...@google.com> Reviewed-by: Alexander Potapenko <gli...@google.com> --- Change-Id: I5b0ed1d079ea776e620beca6a529a861e7dced95 --- mm/kasan/kasan.h | 8 ++++++ mm/kasan/report.c | 56 +++++++++++++++++++-------------------- mm/kasan/report_generic.c | 5 ++++ mm/kasan/report_sw_tags.c | 5 ++++ 4 files changed, 45 insertions(+), 29 deletions(-) diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index c79d30c6fcdb..3b349a6e799d 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -58,6 +58,13 @@ #define KASAN_ABI_VERSION 1 #endif +/* Metadata layout customization. */ +#define META_BYTES_PER_BLOCK 1 +#define META_BLOCKS_PER_ROW 16 +#define META_BYTES_PER_ROW (META_BLOCKS_PER_ROW * META_BYTES_PER_BLOCK) +#define META_MEM_BYTES_PER_ROW (META_BYTES_PER_ROW * KASAN_GRANULE_SIZE) +#define META_ROWS_AROUND_ADDR 2 + struct kasan_access_info { const void *access_addr; const void *first_bad_addr; @@ -170,6 +177,7 @@ bool check_invalid_free(void *addr); void *find_first_bad_addr(void *addr, size_t size); const char *get_bug_type(struct kasan_access_info *info); +void metadata_fetch_row(char *buffer, void *row); #if defined(CONFIG_KASAN_GENERIC) && CONFIG_KASAN_STACK void print_address_stack_frame(const void *addr); diff --git a/mm/kasan/report.c b/mm/kasan/report.c index ab28e350bf39..2c503b667413 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -33,12 +33,6 @@ #include "kasan.h" #include "../slab.h" -/* Metadata layout customization. */ -#define META_BYTES_PER_BLOCK 1 -#define META_BLOCKS_PER_ROW 16 -#define META_BYTES_PER_ROW (META_BLOCKS_PER_ROW * META_BYTES_PER_BLOCK) -#define META_ROWS_AROUND_ADDR 2 - static unsigned long kasan_flags; #define KASAN_BIT_REPORTED 0 @@ -238,55 +232,59 @@ static void print_address_description(void *addr, u8 tag) print_address_stack_frame(addr); } -static bool row_is_guilty(const void *row, const void *guilty) +static bool meta_row_is_guilty(const void *row, const void *addr) { - return (row <= guilty) && (guilty < row + META_BYTES_PER_ROW); + return (row <= addr) && (addr < row + META_MEM_BYTES_PER_ROW); } -static int shadow_pointer_offset(const void *row, const void *shadow) +static int meta_pointer_offset(const void *row, const void *addr) { - /* The length of ">ff00ff00ff00ff00: " is - * 3 + (BITS_PER_LONG/8)*2 chars. + /* + * Memory state around the buggy address: + * ff00ff00ff00ff00: 00 00 00 05 fe fe fe fe fe fe fe fe fe fe fe fe + * ... + * + * The length of ">ff00ff00ff00ff00: " is + * 3 + (BITS_PER_LONG / 8) * 2 chars. + * The length of each granule metadata is 2 bytes + * plus 1 byte for space. */ - return 3 + (BITS_PER_LONG/8)*2 + (shadow - row)*2 + - (shadow - row) / META_BYTES_PER_BLOCK + 1; + return 3 + (BITS_PER_LONG / 8) * 2 + + (addr - row) / KASAN_GRANULE_SIZE * 3 + 1; } static void print_memory_metadata(const void *addr) { int i; - const void *shadow = kasan_mem_to_shadow(addr); - const void *shadow_row; + void *row; - shadow_row = (void *)round_down((unsigned long)shadow, - META_BYTES_PER_ROW) - - META_ROWS_AROUND_ADDR * META_BYTES_PER_ROW; + row = (void *)round_down((unsigned long)addr, META_MEM_BYTES_PER_ROW) + - META_ROWS_AROUND_ADDR * META_MEM_BYTES_PER_ROW; pr_err("Memory state around the buggy address:\n"); for (i = -META_ROWS_AROUND_ADDR; i <= META_ROWS_AROUND_ADDR; i++) { - const void *kaddr = kasan_shadow_to_mem(shadow_row); - char buffer[4 + (BITS_PER_LONG/8)*2]; - char shadow_buf[META_BYTES_PER_ROW]; + char buffer[4 + (BITS_PER_LONG / 8) * 2]; + char metadata[META_BYTES_PER_ROW]; snprintf(buffer, sizeof(buffer), - (i == 0) ? ">%px: " : " %px: ", kaddr); + (i == 0) ? ">%px: " : " %px: ", row); + /* * We should not pass a shadow pointer to generic * function, because generic functions may try to * access kasan mapping for the passed address. */ - memcpy(shadow_buf, shadow_row, META_BYTES_PER_ROW); + metadata_fetch_row(&metadata[0], row); + print_hex_dump(KERN_ERR, buffer, DUMP_PREFIX_NONE, META_BYTES_PER_ROW, 1, - shadow_buf, META_BYTES_PER_ROW, 0); + metadata, META_BYTES_PER_ROW, 0); - if (row_is_guilty(shadow_row, shadow)) - pr_err("%*c\n", - shadow_pointer_offset(shadow_row, shadow), - '^'); + if (meta_row_is_guilty(row, addr)) + pr_err("%*c\n", meta_pointer_offset(row, addr), '^'); - shadow_row += META_BYTES_PER_ROW; + row += META_MEM_BYTES_PER_ROW; } } diff --git a/mm/kasan/report_generic.c b/mm/kasan/report_generic.c index 16ed550850e9..8a9c889872da 100644 --- a/mm/kasan/report_generic.c +++ b/mm/kasan/report_generic.c @@ -123,6 +123,11 @@ const char *get_bug_type(struct kasan_access_info *info) return get_wild_bug_type(info); } +void metadata_fetch_row(char *buffer, void *row) +{ + memcpy(buffer, kasan_mem_to_shadow(row), META_BYTES_PER_ROW); +} + #if CONFIG_KASAN_STACK static bool __must_check tokenize_frame_descr(const char **frame_descr, char *token, size_t max_tok_len, diff --git a/mm/kasan/report_sw_tags.c b/mm/kasan/report_sw_tags.c index c87d5a343b4e..add2dfe6169c 100644 --- a/mm/kasan/report_sw_tags.c +++ b/mm/kasan/report_sw_tags.c @@ -80,6 +80,11 @@ void *find_first_bad_addr(void *addr, size_t size) return p; } +void metadata_fetch_row(char *buffer, void *row) +{ + memcpy(buffer, kasan_mem_to_shadow(row), META_BYTES_PER_ROW); +} + void print_tags(u8 addr_tag, const void *addr) { u8 *shadow = (u8 *)kasan_mem_to_shadow(addr); -- 2.29.2.299.gdc1121823c-goog