From: Masami Hiramatsu (Google) <[email protected]>

On a real hardware, since panic and reboot the machine will not
flush hardware cache to the persistent ring buffer, the events
written right before the panic can be lost. Moreover, since
there will be an inconsistency between the commit counter (which
is written atomically via local_set()) and the data, validation
will fail and all data in the persistent ring buffer will be lost.

To avoid this issue, this will stop recording on the ring buffer
and flush cache at the reserved memory on panic.

Fixes: e645535a954a ("tracing: Add option to use memmapped memory for trace 
boot instance")
Cc: [email protected]
Signed-off-by: Masami Hiramatsu (Google) <[email protected]>
---
 kernel/trace/ring_buffer.c |   21 +++++++++++++++++++++
 1 file changed, 21 insertions(+)

diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index f7fd4bdf6560..d2b69221a94c 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -6,6 +6,7 @@
  */
 #include <linux/sched/isolation.h>
 #include <linux/trace_recursion.h>
+#include <linux/panic_notifier.h>
 #include <linux/trace_events.h>
 #include <linux/ring_buffer.h>
 #include <linux/trace_clock.h>
@@ -589,6 +590,7 @@ struct trace_buffer {
 
        unsigned long                   range_addr_start;
        unsigned long                   range_addr_end;
+       struct notifier_block           flush_nb;
 
        struct ring_buffer_meta         *meta;
 
@@ -2470,6 +2472,16 @@ static void rb_free_cpu_buffer(struct 
ring_buffer_per_cpu *cpu_buffer)
        kfree(cpu_buffer);
 }
 
+static int rb_flush_buffer_cb(struct notifier_block *nb, unsigned long event, 
void *data)
+{
+       struct trace_buffer *buffer = container_of(nb, struct trace_buffer, 
flush_nb);
+
+       ring_buffer_record_disable(buffer);
+       flush_kernel_vmap_range((void *)buffer->range_addr_start,
+                               buffer->range_addr_end - 
buffer->range_addr_start);
+       return NOTIFY_DONE;
+}
+
 static struct trace_buffer *alloc_buffer(unsigned long size, unsigned flags,
                                         int order, unsigned long start,
                                         unsigned long end,
@@ -2589,6 +2601,12 @@ static struct trace_buffer *alloc_buffer(unsigned long 
size, unsigned flags,
 
        mutex_init(&buffer->mutex);
 
+       /* Persistent ring buffer needs to flush cache before reboot. */
+       if (start & end) {
+               buffer->flush_nb.notifier_call = rb_flush_buffer_cb;
+               atomic_notifier_chain_register(&panic_notifier_list, 
&buffer->flush_nb);
+       }
+
        return_ptr(buffer);
 
  fail_free_buffers:
@@ -2676,6 +2694,9 @@ ring_buffer_free(struct trace_buffer *buffer)
 {
        int cpu;
 
+       if (buffer->range_addr_start && buffer->range_addr_end)
+               atomic_notifier_chain_unregister(&panic_notifier_list, 
&buffer->flush_nb);
+
        cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
 
        irq_work_sync(&buffer->irq_work.work);


Reply via email to