Zap at lease 10 pages before releasing mmu-lock to reduce the overload
caused by requiring lock

[ It improves kernel building 0.6% ~ 1% ]

Signed-off-by: Xiao Guangrong <xiaoguangr...@linux.vnet.ibm.com>
---
 arch/x86/kvm/mmu.c |   14 ++++++++++++--
 1 files changed, 12 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 891ad2c..7ad0e50 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4207,14 +4207,18 @@ restart:
        spin_unlock(&kvm->mmu_lock);
 }
 
+#define BATCH_ZAP_PAGES        10
 static void kvm_zap_obsolete_pages(struct kvm *kvm)
 {
        struct kvm_mmu_page *sp, *node;
        LIST_HEAD(invalid_list);
+       int batch = 0;
 
 restart:
        list_for_each_entry_safe_reverse(sp, node,
              &kvm->arch.active_mmu_pages, link) {
+               int ret;
+
                /*
                 * No obsolete page exists before new created page since
                 * active_mmu_pages is the FIFO list.
@@ -4252,10 +4256,16 @@ restart:
                 * Need not flush tlb since we only zap the sp with invalid
                 * generation number.
                 */
-               if (cond_resched_lock(&kvm->mmu_lock))
+               if ((batch >= BATCH_ZAP_PAGES) &&
+                     cond_resched_lock(&kvm->mmu_lock)) {
+                       batch = 0;
                        goto restart;
+               }
 
-               if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list))
+               ret = kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
+               batch += ret;
+
+               if (ret)
                        goto restart;
        }
 
-- 
1.7.7.6

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to