From: Masami Hiramatsu (Google) <[email protected]>
Since the MSBs of rb_data_page::commit are used for storing
RB_MISSED_EVENTS and RB_MISSED_STORED, we need to mask out those bits
when it is used for finding the size of data pages.
Fixes: 5f3b6e839f3c ("ring-buffer: Validate boot range memory events")
Fixes: 5b7be9c709e1 ("ring-buffer: Add test to validate the time stamp deltas")
Cc: [email protected]
Signed-off-by: Masami Hiramatsu (Google) <[email protected]>
---
Changes in v4:
- Fix to move rb_commit_index() after ring_buffer_per_cpu definition.
---
kernel/trace/ring_buffer.c | 32 ++++++++++++++++----------------
1 file changed, 16 insertions(+), 16 deletions(-)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 3eb124c93d72..67bc652ba02a 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -395,6 +395,12 @@ static __always_inline unsigned int rb_page_commit(struct
buffer_page *bpage)
return local_read(&bpage->page->commit);
}
+/* Size is determined by what has been committed */
+static __always_inline unsigned rb_page_size(struct buffer_page *bpage)
+{
+ return rb_page_commit(bpage) & ~RB_MISSED_MASK;
+}
+
static void free_buffer_page(struct buffer_page *bpage)
{
/* Range pages are not to be freed */
@@ -614,6 +620,12 @@ struct ring_buffer_iter {
int missed_events;
};
+static __always_inline unsigned
+rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
+{
+ return rb_page_commit(cpu_buffer->commit_page);
+}
+
int ring_buffer_print_page_header(struct trace_buffer *buffer, struct
trace_seq *s)
{
struct buffer_data_page field;
@@ -1907,7 +1919,7 @@ static int rb_validate_buffer(struct buffer_data_page
*dpage, int cpu)
u64 delta;
int tail;
- tail = local_read(&dpage->commit);
+ tail = local_read(&dpage->commit) & ~RB_MISSED_MASK;
return rb_read_data_buffer(dpage, tail, cpu, &ts, &delta);
}
@@ -1934,7 +1946,7 @@ static void rb_meta_validate_events(struct
ring_buffer_per_cpu *cpu_buffer)
goto invalid;
}
entries += ret;
- entry_bytes += local_read(&cpu_buffer->reader_page->page->commit);
+ entry_bytes += rb_page_size(cpu_buffer->reader_page);
local_set(&cpu_buffer->reader_page->entries, ret);
ts = head_page->page->time_stamp;
@@ -2054,7 +2066,7 @@ static void rb_meta_validate_events(struct
ring_buffer_per_cpu *cpu_buffer)
local_inc(&cpu_buffer->pages_touched);
entries += ret;
- entry_bytes += local_read(&head_page->page->commit);
+ entry_bytes += rb_page_size(head_page);
local_set(&cpu_buffer->head_page->entries, ret);
if (head_page == cpu_buffer->commit_page)
@@ -3257,18 +3269,6 @@ rb_iter_head_event(struct ring_buffer_iter *iter)
return NULL;
}
-/* Size is determined by what has been committed */
-static __always_inline unsigned rb_page_size(struct buffer_page *bpage)
-{
- return rb_page_commit(bpage) & ~RB_MISSED_MASK;
-}
-
-static __always_inline unsigned
-rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
-{
- return rb_page_commit(cpu_buffer->commit_page);
-}
-
static __always_inline unsigned
rb_event_index(struct ring_buffer_per_cpu *cpu_buffer, struct
ring_buffer_event *event)
{
@@ -4433,7 +4433,7 @@ static void check_buffer(struct ring_buffer_per_cpu
*cpu_buffer,
if (tail == CHECK_FULL_PAGE) {
full = true;
- tail = local_read(&bpage->commit);
+ tail = local_read(&bpage->commit) & ~RB_MISSED_MASK;
} else if (info->add_timestamp &
(RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)) {
/* Ignore events with absolute time stamps */