Some pmus (such as BTS or Intel PT without multiple-entry ToPA capability)
don't support scatter-gather and will prefer larger contiguous areas for
their output regions.

This patch adds a new pmu capability to request higher order allocations.

Signed-off-by: Alexander Shishkin <alexander.shish...@linux.intel.com>
---
 include/linux/perf_event.h  |  1 +
 kernel/events/ring_buffer.c | 51 +++++++++++++++++++++++++++++++++++++++------
 2 files changed, 46 insertions(+), 6 deletions(-)

diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index cf62338421..4d9ede200f 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -170,6 +170,7 @@ struct perf_event;
  * pmu::capabilities flags
  */
 #define PERF_PMU_CAP_NO_INTERRUPT              0x01
+#define PERF_PMU_CAP_AUX_NO_SG                 0x02
 
 /**
  * struct pmu - generic performance monitoring unit
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 00708d5916..d10919ca42 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -242,29 +242,68 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, 
int flags)
        spin_lock_init(&rb->event_lock);
 }
 
+#define PERF_AUX_GFP   (GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY)
+
+static struct page *rb_alloc_aux_page(int node, int order)
+{
+       struct page *page;
+
+       if (order > MAX_ORDER)
+               order = MAX_ORDER;
+
+       do {
+               page = alloc_pages_node(node, PERF_AUX_GFP, order);
+       } while (!page && order--);
+
+       if (page && order) {
+               /*
+                * Communicate the allocation size to the driver
+                */
+               split_page(page, order);
+               SetPagePrivate(page);
+               set_page_private(page, order);
+       }
+
+       return page;
+}
+
+static void rb_free_aux_page(struct ring_buffer *rb, int idx)
+{
+       struct page *page = virt_to_page(rb->aux_pages[idx]);
+
+       ClearPagePrivate(page);
+       page->mapping = NULL;
+       __free_page(page);
+}
+
 int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
                 pgoff_t pgoff, int nr_pages, int flags)
 {
        bool overwrite = !(flags & RING_BUFFER_WRITABLE);
        int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu);
-       int ret = -ENOMEM;
+       int ret = -ENOMEM, order = 0;
 
        if (!has_aux(event))
                return -ENOTSUPP;
 
+       if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG)
+               order = get_order(nr_pages * PAGE_SIZE);
+
        rb->aux_pages = kzalloc_node(nr_pages * sizeof(void *), GFP_KERNEL, 
node);
        if (!rb->aux_pages)
                return -ENOMEM;
 
-       for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;
-            rb->aux_nr_pages++) {
+       for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) {
                struct page *page;
+               int last;
 
-               page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
+               page = rb_alloc_aux_page(node, order);
                if (!page)
                        goto out;
 
-               rb->aux_pages[rb->aux_nr_pages] = page_address(page);
+               for (last = rb->aux_nr_pages + (1 << page_private(page));
+                    last > rb->aux_nr_pages; rb->aux_nr_pages++)
+                       rb->aux_pages[rb->aux_nr_pages] = page_address(page++);
        }
 
        rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, 
nr_pages,
@@ -298,7 +337,7 @@ void rb_free_aux(struct ring_buffer *rb, struct perf_event 
*event)
        }
 
        for (pg = 0; pg < rb->aux_nr_pages; pg++)
-               free_page((unsigned long)rb->aux_pages[pg]);
+               rb_free_aux_page(rb, pg);
 
        kfree(rb->aux_pages);
        rb->aux_nr_pages = 0;
-- 
2.1.0.rc1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to