In preparation for the ring-buffer memory mapping, allocate compound
pages for the ring-buffer sub-buffers to enable us to map them to
user-space with vm_insert_pages().

Signed-off-by: Vincent Donnefort <vdonnef...@google.com>

diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 25476ead681b..cc9ebe593571 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1524,7 +1524,7 @@ static int __rb_allocate_pages(struct ring_buffer_per_cpu 
*cpu_buffer,
                list_add(&bpage->list, pages);
 
                page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu),
-                                       mflags | __GFP_ZERO,
+                                       mflags | __GFP_COMP | __GFP_ZERO,
                                        cpu_buffer->buffer->subbuf_order);
                if (!page)
                        goto free_pages;
@@ -1609,7 +1609,7 @@ rb_allocate_cpu_buffer(struct trace_buffer *buffer, long 
nr_pages, int cpu)
 
        cpu_buffer->reader_page = bpage;
 
-       page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL | __GFP_ZERO,
+       page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL | __GFP_COMP | 
__GFP_ZERO,
                                cpu_buffer->buffer->subbuf_order);
        if (!page)
                goto fail_free_reader;
@@ -5579,7 +5579,7 @@ ring_buffer_alloc_read_page(struct trace_buffer *buffer, 
int cpu)
                goto out;
 
        page = alloc_pages_node(cpu_to_node(cpu),
-                               GFP_KERNEL | __GFP_NORETRY | __GFP_ZERO,
+                               GFP_KERNEL | __GFP_NORETRY | __GFP_COMP | 
__GFP_ZERO,
                                cpu_buffer->buffer->subbuf_order);
        if (!page) {
                kfree(bpage);
-- 
2.44.0.769.g3c40516874-goog


Reply via email to