Account lost messages in pritk-safe and printk-safe-nmi
contexts and report those numbers during printk_safe_flush().

The patch also moves lost message counter to struct
`printk_safe_seq_buf' instead of having dedicated static
counters - this simplifies the code.

Signed-off-by: Sergey Senozhatsky <[email protected]>
---
 kernel/printk/internal.h    | 17 -----------------
 kernel/printk/printk.c      | 10 ----------
 kernel/printk/printk_safe.c | 38 ++++++++++++++++++++++++++++----------
 3 files changed, 28 insertions(+), 37 deletions(-)

diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h
index c65e36509f3b..1db044f808b7 100644
--- a/kernel/printk/internal.h
+++ b/kernel/printk/internal.h
@@ -16,23 +16,6 @@
  */
 #include <linux/percpu.h>
 
-#ifdef CONFIG_PRINTK_NMI
-
-extern atomic_t nmi_message_lost;
-static inline int get_nmi_message_lost(void)
-{
-       return atomic_xchg(&nmi_message_lost, 0);
-}
-
-#else /* CONFIG_PRINTK_NMI */
-
-static inline int get_nmi_message_lost(void)
-{
-       return 0;
-}
-
-#endif /* CONFIG_PRINTK_NMI */
-
 #ifdef CONFIG_PRINTK
 
 #define PRINTK_SAFE_CONTEXT_MASK       0x7fffffff
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 9cc9a97c2cc8..86f3f03faf6f 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -1676,7 +1676,6 @@ asmlinkage int vprintk_emit(int facility, int level,
        unsigned long flags;
        int this_cpu;
        int printed_len = 0;
-       int nmi_message_lost;
        bool in_sched = false;
        /* cpu currently holding logbuf_lock in this function */
        static unsigned int logbuf_cpu = UINT_MAX;
@@ -1727,15 +1726,6 @@ asmlinkage int vprintk_emit(int facility, int level,
                                         strlen(recursion_msg));
        }
 
-       nmi_message_lost = get_nmi_message_lost();
-       if (unlikely(nmi_message_lost)) {
-               text_len = scnprintf(textbuf, sizeof(textbuf),
-                                    "BAD LUCK: lost %d message(s) from NMI 
context!",
-                                    nmi_message_lost);
-               printed_len += log_store(0, 2, LOG_PREFIX|LOG_NEWLINE, 0,
-                                        NULL, 0, textbuf, text_len);
-       }
-
        /*
         * The printf needs to come first; we need the syslog
         * prefix which might be passed-in as a parameter.
diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
index 054c9128ade0..04fff7074cce 100644
--- a/kernel/printk/printk_safe.c
+++ b/kernel/printk/printk_safe.c
@@ -40,13 +40,15 @@
  * were handled or when IRQs are blocked.
  */
 static int printk_safe_irq_ready;
-atomic_t nmi_message_lost;
 
 #define SAFE_LOG_BUF_LEN ((1 << CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT) -    \
-                        sizeof(atomic_t) - sizeof(struct irq_work))
+                               sizeof(atomic_t) -                      \
+                               sizeof(atomic_t) -                      \
+                               sizeof(struct irq_work))
 
 struct printk_safe_seq_buf {
        atomic_t                len;    /* length of written data */
+       atomic_t                message_lost;
        struct irq_work         work;   /* IRQ work that flushes the buffer */
        unsigned char           buffer[SAFE_LOG_BUF_LEN];
 };
@@ -58,6 +60,16 @@ static DEFINE_PER_CPU(int, printk_context);
 static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq);
 #endif
 
+/* Get flushed in a more safe context. */
+static void queue_flush_work(struct printk_safe_seq_buf *s)
+{
+       if (printk_safe_irq_ready) {
+               /* Make sure that IRQ work is really initialized. */
+               smp_rmb();
+               irq_work_queue(&s->work);
+       }
+}
+
 /*
  * Add a message to per-CPU context-dependent buffer. NMI and printk-safe
  * have dedicated buffers, because otherwise printk-safe preempted by
@@ -79,7 +91,8 @@ static int printk_safe_log_store(struct printk_safe_seq_buf 
*s,
 
        /* The trailing '\0' is not counted into len. */
        if (len >= sizeof(s->buffer) - 1) {
-               atomic_inc(&nmi_message_lost);
+               atomic_inc(&s->message_lost);
+               queue_flush_work(s);
                return 0;
        }
 
@@ -91,6 +104,8 @@ static int printk_safe_log_store(struct printk_safe_seq_buf 
*s,
                smp_rmb();
 
        add = vscnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, args);
+       if (!add)
+               return 0;
 
        /*
         * Do it once again if the buffer has been flushed in the meantime.
@@ -100,13 +115,7 @@ static int printk_safe_log_store(struct 
printk_safe_seq_buf *s,
        if (atomic_cmpxchg(&s->len, len, len + add) != len)
                goto again;
 
-       /* Get flushed in a more safe context. */
-       if (add && printk_safe_irq_ready) {
-               /* Make sure that IRQ work is really initialized. */
-               smp_rmb();
-               irq_work_queue(&s->work);
-       }
-
+       queue_flush_work(s);
        return add;
 }
 
@@ -168,6 +177,14 @@ static int printk_safe_flush_buffer(const char *start, 
size_t len)
        return len;
 }
 
+static void report_message_lost(struct printk_safe_seq_buf *s)
+{
+       int lost = atomic_xchg(&s->message_lost, 0);
+
+       if (lost)
+               printk_deferred("Lost %d message(s)!\n", lost);
+}
+
 /*
  * Flush data from the associated per-CPU buffer. The function
  * can be called either via IRQ work or independently.
@@ -225,6 +242,7 @@ static void __printk_safe_flush(struct irq_work *work)
                goto more;
 
 out:
+       report_message_lost(s);
        raw_spin_unlock_irqrestore(&read_lock, flags);
 }
 
-- 
2.11.0

Reply via email to