Signed-off-by: Francis Deslauriers <fdesl...@gmail.com>
Reviewed-by: Raphaƫl Beamonte <raphael.beamo...@gmail.com>
---
 arch/x86/mm/fault.c |    7 +++++++
 mm/memory.c         |    5 +++++
 2 files changed, 12 insertions(+)

diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 2b97525..6ceaaaa 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -20,6 +20,9 @@
 #include <asm/fixmap.h>                        /* VSYSCALL_START               
*/
 #include <asm/context_tracking.h>      /* exception_enter(), ...       */
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/fault.h>                /* trace_page_fault_*(), ...    
*/
+
 /*
  * Page fault error code bits:
  *
@@ -754,12 +757,14 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned 
long error_code,
 
                if (likely(show_unhandled_signals))
                        show_signal_msg(regs, error_code, address, tsk);
+               trace_page_fault_entry(regs, address, error_code & PF_WRITE);
 
                tsk->thread.cr2         = address;
                tsk->thread.error_code  = error_code;
                tsk->thread.trap_nr     = X86_TRAP_PF;
 
                force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0);
+               trace_page_fault_exit(-1);
 
                return;
        }
@@ -1183,7 +1188,9 @@ good_area:
         * make sure we exit gracefully rather than endlessly redo
         * the fault:
         */
+       trace_page_fault_entry(regs, address, write);
        fault = handle_mm_fault(mm, vma, address, flags);
+       trace_page_fault_exit(fault);
 
        if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
                if (mm_fault_error(regs, error_code, address, fault))
diff --git a/mm/memory.c b/mm/memory.c
index 494526a..49a8119 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -67,6 +67,8 @@
 #include <asm/tlbflush.h>
 #include <asm/pgtable.h>
 
+#include <trace/events/fault.h>
+
 #include "internal.h"
 
 #ifdef LAST_NID_NOT_IN_PAGE_FLAGS
@@ -1828,8 +1830,11 @@ long __get_user_pages(struct task_struct *tsk, struct 
mm_struct *mm,
                                if (foll_flags & FOLL_NOWAIT)
                                        fault_flags |= (FAULT_FLAG_ALLOW_RETRY 
| FAULT_FLAG_RETRY_NOWAIT);
 
+                               trace_page_fault_entry(0, start,
+                                               foll_flags & FOLL_WRITE);
                                ret = handle_mm_fault(mm, vma, start,
                                                        fault_flags);
+                               trace_page_fault_exit(ret);
 
                                if (ret & VM_FAULT_ERROR) {
                                        if (ret & VM_FAULT_OOM)
-- 
1.7.10.4


_______________________________________________
lttng-dev mailing list
lttng-dev@lists.lttng.org
http://lists.lttng.org/cgi-bin/mailman/listinfo/lttng-dev

Reply via email to