On Fri, Dec 04 2020 at 20:27, Corentin Labbe wrote:
> On Fri, Dec 04, 2020 at 04:08:27PM +0100, Thomas Gleixner wrote:
>> On Fri, Dec 04 2020 at 14:26, Corentin Labbe wrote:
>> > On Fri, Dec 04, 2020 at 12:34:05AM +0100, Thomas Gleixner wrote:
>> >> The unmap comes from sg_miter_stop() and looking at the previous
>> >> map/unmap cycles there are never nested maps.
>> >> 
>> >> [  996.943030] cryptset-316       0d..4 73943317us : 
>> >> __kmap_local_pfn_prot: kmap_local_pfn: 1 ffefd000
>> >> 
>> >> is the first event which allocates a nested map. 
>> >> 
>> >> So something goes south either in sg_miter or in the crypto maze.
>> >> 
>> >> Enabling CONFIG_DEBUG_KMAP_LOCAL and function tracing might give us more 
>> >> clue.
>> >
>> > Done, http://kernel.montjoie.ovh/130466.log
>> 
>> Does not provide more information with the debug enabled. So can you
>> please enable CONFIG_FUNCTION_TRACER and add 'ftrace=function' to the
>> command line?
>
> Done, http://kernel.montjoie.ovh/130490.log

Aaargh. That overwrites everything while printing out that
warning.

Can you please replace the debug patch with the one below and try again?
That stops the trace right on the condition.

Thanks,

        tglx
---
diff --git a/mm/highmem.c b/mm/highmem.c
index b49364a306b8..8f8862f79d23 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -485,6 +485,7 @@ static inline bool kmap_high_unmap_local(unsigned long 
vaddr)
 {
 #ifdef ARCH_NEEDS_KMAP_HIGH_GET
        if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
+               trace_printk("kunmap_high: %lx\n", vaddr);
                kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
                return true;
        }
@@ -520,6 +521,7 @@ void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t 
prot)
        preempt_disable();
        idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn);
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+       trace_printk("kmap_local_pfn: %d %lx\n", idx, (unsigned long) vaddr);
        BUG_ON(!pte_none(*(kmap_pte - idx)));
        pteval = pfn_pte(pfn, prot);
        set_pte_at(&init_mm, vaddr, kmap_pte - idx, pteval);
@@ -545,8 +547,10 @@ void *__kmap_local_page_prot(struct page *page, pgprot_t 
prot)
 
        /* Try kmap_high_get() if architecture has it enabled */
        kmap = arch_kmap_local_high_get(page);
-       if (kmap)
+       if (kmap) {
+               trace_printk("kmap_local_high_get: %lx\n", (unsigned long) 
kmap);
                return kmap;
+       }
 
        return __kmap_local_pfn_prot(page_to_pfn(page), prot);
 }
@@ -578,7 +582,11 @@ void kunmap_local_indexed(void *vaddr)
 
        preempt_disable();
        idx = arch_kmap_local_unmap_idx(kmap_local_idx(), addr);
-       WARN_ON_ONCE(addr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+       trace_printk("kunmap_local: %i %lx\n", idx, (unsigned long) vaddr);
+       if (addr != __fix_to_virt(FIX_KMAP_BEGIN + idx)) {
+               tracing_off();
+               BUG();
+       }
 
        arch_kmap_local_pre_unmap(addr);
        pte_clear(&init_mm, addr, kmap_pte - idx);

Reply via email to