If persistent RAM is being used to record trace entries, allocate and
free pages using ramtrace_alloc_page and ramtrace_free_page.

Signed-off-by: Nachammai Karuppiah <nachukan...@gmail.com>
---
 kernel/trace/ring_buffer.c | 122 +++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 119 insertions(+), 3 deletions(-)

diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 34e50c1..c99719e 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -353,6 +353,18 @@ static void free_buffer_page(struct buffer_page *bpage)
        kfree(bpage);
 }
 
+#ifdef CONFIG_TRACE_EVENTS_TO_PSTORE
+static void
+free_buffer_page_cpu(struct buffer_page *bpage, int cpu, bool use_pstore)
+{
+       if (use_pstore) {
+               ramtrace_free_page(bpage->page, cpu);
+               kfree(bpage);
+       } else
+               free_buffer_page(bpage);
+}
+#endif
+
 /*
  * We need to fit the time_stamp delta into 27 bits.
  */
@@ -1200,7 +1212,12 @@ static int rb_check_pages(struct ring_buffer_per_cpu 
*cpu_buffer)
        return 0;
 }
 
+#ifdef CONFIG_TRACE_EVENTS_TO_PSTORE
+static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu,
+                               bool use_pstore)
+#else
 static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
+#endif
 {
        struct buffer_page *bpage, *tmp;
        bool user_thread = current->mm != NULL;
@@ -1214,6 +1231,11 @@ static int __rb_allocate_pages(long nr_pages, struct 
list_head *pages, int cpu)
         * to prevent doing any allocation when it is obvious that it is
         * not going to succeed.
         */
+#ifdef CONFIG_TRACE_EVENTS_TO_PSTORE
+       if (use_pstore)
+               i = ramtrace_available_mem();
+       else
+#endif
        i = si_mem_available();
        if (i < nr_pages)
                return -ENOMEM;
@@ -1246,10 +1268,22 @@ static int __rb_allocate_pages(long nr_pages, struct 
list_head *pages, int cpu)
 
                list_add(&bpage->list, pages);
 
+#ifdef CONFIG_TRACE_EVENTS_TO_PSTORE
+               if (use_pstore) {
+                       void *address = ramtrace_alloc_page(cpu);
+
+                       if (!address)
+                               goto free_pages;
+                       bpage->page = address;
+               } else {
+#endif
                page = alloc_pages_node(cpu_to_node(cpu), mflags, 0);
                if (!page)
                        goto free_pages;
                bpage->page = page_address(page);
+#ifdef CONFIG_TRACE_EVENTS_TO_PSTORE
+               }
+#endif
                rb_init_page(bpage->page);
 
                if (user_thread && fatal_signal_pending(current))
@@ -1263,7 +1297,11 @@ static int __rb_allocate_pages(long nr_pages, struct 
list_head *pages, int cpu)
 free_pages:
        list_for_each_entry_safe(bpage, tmp, pages, list) {
                list_del_init(&bpage->list);
+#ifdef CONFIG_TRACE_EVENTS_TO_PSTORE
+               free_buffer_page_cpu(bpage, cpu, use_pstore);
+#else
                free_buffer_page(bpage);
+#endif
        }
        if (user_thread)
                clear_current_oom_origin();
@@ -1278,7 +1316,12 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu 
*cpu_buffer,
 
        WARN_ON(!nr_pages);
 
+#ifdef CONFIG_TRACE_EVENTS_TO_PSTORE
+       if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu,
+                               cpu_buffer->use_pstore))
+#else
        if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu))
+#endif
                return -ENOMEM;
 
        /*
@@ -1414,10 +1457,23 @@ rb_allocate_cpu_buffer(struct trace_buffer *buffer, 
long nr_pages, int cpu)
        rb_check_bpage(cpu_buffer, bpage);
 
        cpu_buffer->reader_page = bpage;
+
+#ifdef CONFIG_TRACE_EVENTS_TO_PSTORE
+       if (cpu_buffer->use_pstore) {
+               void *address = ramtrace_alloc_page(cpu);
+
+               if (!address)
+                       goto fail_free_reader;
+               bpage->page = address;
+       } else {
+#endif
        page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
        if (!page)
                goto fail_free_reader;
        bpage->page = page_address(page);
+#ifdef CONFIG_TRACE_EVENTS_TO_PSTORE
+       }
+#endif
        rb_init_page(bpage->page);
 
        INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
@@ -1436,7 +1492,12 @@ rb_allocate_cpu_buffer(struct trace_buffer *buffer, long 
nr_pages, int cpu)
        return cpu_buffer;
 
  fail_free_reader:
+#ifdef CONFIG_TRACE_EVENTS_TO_PSTORE
+       free_buffer_page_cpu(cpu_buffer->reader_page, cpu,
+                            cpu_buffer->use_pstore);
+#else
        free_buffer_page(cpu_buffer->reader_page);
+#endif
 
  fail_free_buffer:
        kfree(cpu_buffer);
@@ -1447,18 +1508,32 @@ static void rb_free_cpu_buffer(struct 
ring_buffer_per_cpu *cpu_buffer)
 {
        struct list_head *head = cpu_buffer->pages;
        struct buffer_page *bpage, *tmp;
-
+#ifdef CONFIG_TRACE_EVENTS_TO_PSTORE
+       free_buffer_page_cpu(cpu_buffer->reader_page, cpu_buffer->cpu,
+                            cpu_buffer->use_pstore);
+#else
        free_buffer_page(cpu_buffer->reader_page);
+#endif
 
        rb_head_page_deactivate(cpu_buffer);
 
        if (head) {
                list_for_each_entry_safe(bpage, tmp, head, list) {
                        list_del_init(&bpage->list);
+#ifdef CONFIG_TRACE_EVENTS_TO_PSTORE
+                       free_buffer_page_cpu(bpage, cpu_buffer->cpu,
+                                            cpu_buffer->use_pstore);
+#else
                        free_buffer_page(bpage);
+#endif
                }
                bpage = list_entry(head, struct buffer_page, list);
+#ifdef CONFIG_TRACE_EVENTS_TO_PSTORE
+               free_buffer_page_cpu(bpage, cpu_buffer->cpu,
+                                    cpu_buffer->use_pstore);
+#else
                free_buffer_page(bpage);
+#endif
        }
 
        kfree(cpu_buffer);
@@ -1832,7 +1907,12 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, 
unsigned long nr_pages)
                 * We have already removed references to this list item, just
                 * free up the buffer_page and its page
                 */
+#ifdef CONFIG_TRACE_EVENTS_TO_PSTORE
+               free_buffer_page_cpu(to_remove_page, cpu_buffer->cpu,
+                                    cpu_buffer->use_pstore);
+#else
                free_buffer_page(to_remove_page);
+#endif
                nr_removed--;
 
        } while (to_remove_page != last_page);
@@ -1913,7 +1993,12 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
                list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
                                         list) {
                        list_del_init(&bpage->list);
+#ifdef CONFIG_TRACE_EVENTS_TO_PSTORE
+                       free_buffer_page_cpu(bpage, cpu_buffer->cpu,
+                                            cpu_buffer->use_pstore);
+#else
                        free_buffer_page(bpage);
+#endif
                }
        }
        return success;
@@ -2252,8 +2337,14 @@ int ring_buffer_resize(struct trace_buffer *buffer, 
unsigned long size,
                         * allocated without receiving ENOMEM
                         */
                        INIT_LIST_HEAD(&cpu_buffer->new_pages);
+#ifdef CONFIG_TRACE_EVENTS_TO_PSTORE
+                       if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
+                                               &cpu_buffer->new_pages, cpu,
+                                               cpu_buffer->use_pstore)) {
+#else
                        if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
                                                &cpu_buffer->new_pages, cpu)) {
+#endif
                                /* not enough memory for new pages */
                                err = -ENOMEM;
                                goto out_err;
@@ -2319,7 +2410,12 @@ int ring_buffer_resize(struct trace_buffer *buffer, 
unsigned long size,
                INIT_LIST_HEAD(&cpu_buffer->new_pages);
                if (cpu_buffer->nr_pages_to_update > 0 &&
                        __rb_allocate_pages(cpu_buffer->nr_pages_to_update,
+#ifdef CONFIG_TRACE_EVENTS_TO_PSTORE
+                                           &cpu_buffer->new_pages, cpu_id,
+                                           cpu_buffer->use_pstore)) {
+#else
                                            &cpu_buffer->new_pages, cpu_id)) {
+#endif
                        err = -ENOMEM;
                        goto out_err;
                }
@@ -2379,7 +2475,12 @@ int ring_buffer_resize(struct trace_buffer *buffer, 
unsigned long size,
                list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
                                        list) {
                        list_del_init(&bpage->list);
+#ifdef CONFIG_TRACE_EVENTS_TO_PSTORE
+                       free_buffer_page_cpu(bpage, cpu,
+                                            cpu_buffer->use_pstore);
+#else
                        free_buffer_page(bpage);
+#endif
                }
        }
  out_err_unlock:
@@ -5184,13 +5285,22 @@ void *ring_buffer_alloc_read_page(struct trace_buffer 
*buffer, int cpu)
        if (bpage)
                goto out;
 
+#ifdef CONFIG_TRACE_EVENTS_TO_PSTORE
+       if (cpu_buffer->use_pstore) {
+               bpage = (struct buffer_data_page *)ramtrace_alloc_page(cpu);
+               if (!bpage)
+                       return ERR_PTR(-ENOMEM);
+       } else {
+#endif
        page = alloc_pages_node(cpu_to_node(cpu),
                                GFP_KERNEL | __GFP_NORETRY, 0);
        if (!page)
                return ERR_PTR(-ENOMEM);
 
        bpage = page_address(page);
-
+#ifdef CONFIG_TRACE_EVENTS_TO_PSTORE
+       }
+#endif
  out:
        rb_init_page(bpage);
        down_read(&trace_read_sem);
@@ -5229,7 +5339,13 @@ void ring_buffer_free_read_page(struct trace_buffer 
*buffer, int cpu, void *data
        arch_spin_unlock(&cpu_buffer->lock);
        local_irq_restore(flags);
 
- out:
+out:
+#ifdef CONFIG_TRACE_EVENTS_TO_PSTORE
+       if (cpu_buffer->use_pstore) {
+               ramtrace_free_page(bpage, cpu);
+               return;
+       }
+#endif
        free_page((unsigned long)bpage);
        up_read(&trace_read_sem);
 }
-- 
2.7.4

Reply via email to