On 4/25/19 10:31 PM, Palmer Dabbelt wrote:
On Wed, 10 Apr 2019 15:44:48 PDT (-0700), [email protected] wrote:
The TLB flush counters under vmstat seems to be very helpful while
debugging TLB flush performance in RISC-V.

Update the counters in every TLB flush methods respectively.

Signed-off-by: Atish Patra <[email protected]>
---
  arch/riscv/include/asm/tlbflush.h |  5 +++++
  arch/riscv/mm/tlbflush.c          | 12 ++++++++++++
  2 files changed, 17 insertions(+)

diff --git a/arch/riscv/include/asm/tlbflush.h 
b/arch/riscv/include/asm/tlbflush.h
index 29a780ca232a..19779a083f52 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -9,6 +9,7 @@
  #define _ASM_RISCV_TLBFLUSH_H

  #include <linux/mm_types.h>
+#include <linux/vmstat.h>

  /*
   * Flush entire local TLB.  'sfence.vma' implicitly fences with the 
instruction
@@ -16,11 +17,13 @@
   */
  static inline void local_flush_tlb_all(void)
  {
+       count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
        __asm__ __volatile__ ("sfence.vma" : : : "memory");
  }

  static inline void local_flush_tlb_mm(struct mm_struct *mm)
  {
+       count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
        /* Flush ASID 0 so that global mappings are not affected */
        __asm__ __volatile__ ("sfence.vma x0, %0" : : "r" (0) : "memory");
  }
@@ -28,6 +31,7 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
  static inline void local_flush_tlb_page(struct vm_area_struct *vma,
        unsigned long addr)
  {
+       count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
        __asm__ __volatile__ ("sfence.vma %0, %1"
                              : : "r" (addr), "r" (0)
                              : "memory");
@@ -35,6 +39,7 @@ static inline void local_flush_tlb_page(struct vm_area_struct 
*vma,

  static inline void local_flush_tlb_kernel_page(unsigned long addr)
  {
+       count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
        __asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory");
  }

diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index ceee76f14a0a..8072d7da32bb 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -4,6 +4,8 @@
   */

  #include <linux/mm.h>
+#include <linux/vmstat.h>
+#include <linux/cpumask.h>
  #include <asm/sbi.h>

  #define SFENCE_VMA_FLUSH_ALL ((unsigned long) -1)
@@ -110,6 +112,7 @@ static void ipi_remote_sfence_vma(void *info)
        unsigned long size = data->size;
        unsigned long i;

+       count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
        if (size == SFENCE_VMA_FLUSH_ALL) {
                local_flush_tlb_all();
        }
@@ -129,6 +132,8 @@ static void ipi_remote_sfence_vma_asid(void *info)
        unsigned long size = data->size;
        unsigned long i;

+       count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
+       /* Flush entire MM context */
        if (size == SFENCE_VMA_FLUSH_ALL) {
                __asm__ __volatile__ ("sfence.vma x0, %0"
                                      : : "r" (asid)
@@ -158,6 +163,13 @@ static void remote_sfence_vma(unsigned long start, 
unsigned long size)
  static void remote_sfence_vma_asid(cpumask_t *mask, unsigned long start,
                                   unsigned long size, unsigned long asid)
  {
+       int cpuid = smp_processor_id();
+
+       if (cpumask_equal(mask, cpumask_of(cpuid)))
+               count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+       else
+               count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
+
        if (tlbi_ipi) {
                struct tlbi info = {
                        .start = start,

Looks good, but it's not applying on for-next (based on rc6).  Do you mind
re-spinning the patches?



This patch depends is based on Gary's TLB flush patch series

https://patchwork.kernel.org/project/linux-riscv/list/?series=97315

So it should only be merged on top of it.

Regards,
Atish
_______________________________________________
linux-riscv mailing list
[email protected]
http://lists.infradead.org/mailman/listinfo/linux-riscv


Reply via email to