----- Original Message -----
> On 06/18/2012 11:15 PM, Dave Anderson wrote:
> >
> > ----- Original Message -----
> >> Since Linux v3.4 (specifically, commit 438ced1720b584000 "ring-buffer:
> >> Add per_cpu ring buffer control files"), the trace buffer size is now
> >> per-cpu. The patch below updates the trace extension to handle this.
> >>
> >> Rabin
>
>
> It looks very good to me, but for future possible convenience, it would
> be better if the buffers[i].nr_pages is also set when !per_cpu_buffer_sizes.
>
> ACK-ed.
>
> Thanks,
> Lai
OK thanks -- I have updated Rabin's patch with your additional suggestion.
The updated patch for crash-6.0.8 is attached.
Thanks,
Dave
>
> >> @@ -362,6 +374,10 @@ static int ftrace_init_buffers(struct
> >> ring_buffer_per_cpu *buffers,
> >> buffer_read_value(reader_page);
> >> buffer_read_value(overrun);
> >> buffer_read_value(entries);
> >> + if (per_cpu_buffer_sizes) {
> >> + buffer_read_value(nr_pages);
> >> + pages = buffers[i].nr_pages;
> >> + }
>
> else {
> buffers[i].nr_pages = pages;
> }
>
>
>
> >> #undef buffer_read_value
> >>
> >> if (ftrace_init_pages(buffers + i, pages) < 0)
>
>
>
> >> --
> >> 1.7.9.5
> >>
--- trace.c 11 May 2012 15:10:19 -0000 1.5
+++ trace.c 19 Jun 2012 13:01:42 -0000
@@ -24,6 +24,7 @@
* lockless ring_buffer and old non-lockless ring_buffer are both supported.
*/
static int lockless_ring_buffer;
+static int per_cpu_buffer_sizes;
#define koffset(struct, member) struct##_##member##_offset
@@ -37,6 +38,7 @@
static int koffset(ring_buffer_per_cpu, cpu);
static int koffset(ring_buffer_per_cpu, pages);
+static int koffset(ring_buffer_per_cpu, nr_pages);
static int koffset(ring_buffer_per_cpu, head_page);
static int koffset(ring_buffer_per_cpu, tail_page);
static int koffset(ring_buffer_per_cpu, commit_page);
@@ -71,6 +73,7 @@
ulong real_head_page;
int head_page_index;
+ unsigned int nr_pages;
ulong *pages;
ulong *linear_pages;
@@ -144,7 +147,14 @@
init_offset(trace_array, buffer);
init_offset(tracer, name);
- init_offset(ring_buffer, pages);
+ if (MEMBER_EXISTS("ring_buffer_per_cpu", "nr_pages")) {
+ per_cpu_buffer_sizes = 1;
+ if (verbose)
+ fprintf(fp, "per cpu buffer sizes\n");
+ }
+
+ if (!per_cpu_buffer_sizes)
+ init_offset(ring_buffer, pages);
init_offset(ring_buffer, flags);
init_offset(ring_buffer, cpus);
init_offset(ring_buffer, buffers);
@@ -155,6 +165,8 @@
fprintf(fp, "lockless\n");
}
+ if (per_cpu_buffer_sizes)
+ init_offset(ring_buffer_per_cpu, nr_pages);
init_offset(ring_buffer_per_cpu, cpu);
init_offset(ring_buffer_per_cpu, pages);
init_offset(ring_buffer_per_cpu, head_page);
@@ -362,6 +374,12 @@
buffer_read_value(reader_page);
buffer_read_value(overrun);
buffer_read_value(entries);
+ if (per_cpu_buffer_sizes) {
+ buffer_read_value(nr_pages);
+ pages = buffers[i].nr_pages;
+ } else
+ buffers[i].nr_pages = pages;
+
#undef buffer_read_value
if (ftrace_init_pages(buffers + i, pages) < 0)
--
Crash-utility mailing list
[email protected]
https://www.redhat.com/mailman/listinfo/crash-utility