]> git.openfabrics.org - ~emulex/infiniband.git/commitdiff
KVM: MMU: Introduce a helper function for FIFO zapping
authorTakuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Wed, 6 Mar 2013 07:06:58 +0000 (16:06 +0900)
committerMarcelo Tosatti <mtosatti@redhat.com>
Thu, 7 Mar 2013 20:26:27 +0000 (17:26 -0300)
Make the code for zapping the oldest mmu page, placed at the tail of the
active list, a separate function.

Reviewed-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
arch/x86/kvm/mmu.c

index 0f42645a063c11e0ea0c1db49cf7cbb8329d0e01..fdacabba6f62ea1f045d554c91df6d46147e4eb6 100644 (file)
@@ -2110,6 +2110,21 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
        }
 }
 
+static bool prepare_zap_oldest_mmu_page(struct kvm *kvm,
+                                       struct list_head *invalid_list)
+{
+       struct kvm_mmu_page *sp;
+
+       if (list_empty(&kvm->arch.active_mmu_pages))
+               return false;
+
+       sp = list_entry(kvm->arch.active_mmu_pages.prev,
+                       struct kvm_mmu_page, link);
+       kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
+
+       return true;
+}
+
 /*
  * Changing the number of mmu pages allocated to the vm
  * Note: if goal_nr_mmu_pages is too small, you will get dead lock
@@ -2117,23 +2132,15 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
 {
        LIST_HEAD(invalid_list);
-       /*
-        * If we set the number of mmu pages to be smaller be than the
-        * number of actived pages , we must to free some mmu pages before we
-        * change the value
-        */
 
        spin_lock(&kvm->mmu_lock);
 
        if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
-               while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages &&
-                       !list_empty(&kvm->arch.active_mmu_pages)) {
-                       struct kvm_mmu_page *page;
+               /* Need to free some mmu pages to achieve the goal. */
+               while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages)
+                       if (!prepare_zap_oldest_mmu_page(kvm, &invalid_list))
+                               break;
 
-                       page = container_of(kvm->arch.active_mmu_pages.prev,
-                                           struct kvm_mmu_page, link);
-                       kvm_mmu_prepare_zap_page(kvm, page, &invalid_list);
-               }
                kvm_mmu_commit_zap_page(kvm, &invalid_list);
                goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
        }
@@ -4007,13 +4014,10 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
 {
        LIST_HEAD(invalid_list);
 
-       while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES &&
-              !list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
-               struct kvm_mmu_page *sp;
+       while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES) {
+               if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list))
+                       break;
 
-               sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
-                                 struct kvm_mmu_page, link);
-               kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
                ++vcpu->kvm->stat.mmu_recycled;
        }
        kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
@@ -4182,19 +4186,6 @@ restart:
        spin_unlock(&kvm->mmu_lock);
 }
 
-static void kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm,
-                                               struct list_head *invalid_list)
-{
-       struct kvm_mmu_page *page;
-
-       if (list_empty(&kvm->arch.active_mmu_pages))
-               return;
-
-       page = container_of(kvm->arch.active_mmu_pages.prev,
-                           struct kvm_mmu_page, link);
-       kvm_mmu_prepare_zap_page(kvm, page, invalid_list);
-}
-
 static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
 {
        struct kvm *kvm;
@@ -4229,7 +4220,7 @@ static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
                idx = srcu_read_lock(&kvm->srcu);
                spin_lock(&kvm->mmu_lock);
 
-               kvm_mmu_remove_some_alloc_mmu_pages(kvm, &invalid_list);
+               prepare_zap_oldest_mmu_page(kvm, &invalid_list);
                kvm_mmu_commit_zap_page(kvm, &invalid_list);
 
                spin_unlock(&kvm->mmu_lock);