]> git.openfabrics.org - ~emulex/infiniband.git/commitdiff
KVM: MMU: Selectively set PageDirty when releasing guest memory
authorIzik Eidus <izike@localhost.localdomain>
Tue, 20 Nov 2007 09:49:33 +0000 (11:49 +0200)
committerAvi Kivity <avi@qumranet.com>
Wed, 30 Jan 2008 15:53:09 +0000 (17:53 +0200)
Improve dirty bit setting for pages that kvm release, until now every page
that we released we marked dirty, from now only pages that have potential
to get dirty we mark dirty.

Signed-off-by: Izik Eidus <izike@qumranet.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
drivers/kvm/kvm.h
drivers/kvm/kvm_main.c
drivers/kvm/mmu.c
drivers/kvm/paging_tmpl.h
drivers/kvm/x86.c

index 52e80183e05018ae487ffb6b55d9856a31dc5f7b..c2acd74389fafe62f8e3c7e64f7e4e01f9fed04a 100644 (file)
@@ -393,7 +393,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
                            int user_alloc);
 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
-void kvm_release_page(struct page *page);
+void kvm_release_page_clean(struct page *page);
+void kvm_release_page_dirty(struct page *page);
 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
                        int len);
 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
index 4e1bd948847038e4483def237d5b5abefc8c4538..729573b844e5e860d4d108f68c5889f2d849ebc6 100644 (file)
@@ -543,13 +543,19 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
 
 EXPORT_SYMBOL_GPL(gfn_to_page);
 
-void kvm_release_page(struct page *page)
+void kvm_release_page_clean(struct page *page)
+{
+       put_page(page);
+}
+EXPORT_SYMBOL_GPL(kvm_release_page_clean);
+
+void kvm_release_page_dirty(struct page *page)
 {
        if (!PageReserved(page))
                SetPageDirty(page);
        put_page(page);
 }
-EXPORT_SYMBOL_GPL(kvm_release_page);
+EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
 
 static int next_segment(unsigned long len, int offset)
 {
@@ -1055,7 +1061,7 @@ static struct page *kvm_vm_nopage(struct vm_area_struct *vma,
        /* current->mm->mmap_sem is already held so call lockless version */
        page = __gfn_to_page(kvm, pgoff);
        if (is_error_page(page)) {
-               kvm_release_page(page);
+               kvm_release_page_clean(page);
                return NOPAGE_SIGBUS;
        }
        if (type != NULL)
index 8add4d5c6840802f73a6b53b96cf9ad5ef9c010d..4624f3789b9a8df1d6ad490fee317dd564c44455 100644 (file)
@@ -420,14 +420,18 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
        struct kvm_rmap_desc *desc;
        struct kvm_rmap_desc *prev_desc;
        struct kvm_mmu_page *page;
+       struct page *release_page;
        unsigned long *rmapp;
        int i;
 
        if (!is_rmap_pte(*spte))
                return;
        page = page_header(__pa(spte));
-       kvm_release_page(pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >>
-                        PAGE_SHIFT));
+       release_page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
+       if (is_writeble_pte(*spte))
+               kvm_release_page_dirty(release_page);
+       else
+               kvm_release_page_clean(release_page);
        rmapp = gfn_to_rmap(kvm, page->gfns[spte - page->spt]);
        if (!*rmapp) {
                printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
@@ -893,7 +897,9 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
 {
        int level = PT32E_ROOT_LEVEL;
        hpa_t table_addr = vcpu->mmu.root_hpa;
+       struct page *page;
 
+       page = pfn_to_page(p >> PAGE_SHIFT);
        for (; ; level--) {
                u32 index = PT64_INDEX(v, level);
                u64 *table;
@@ -908,7 +914,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
                        pte = table[index];
                        was_rmapped = is_rmap_pte(pte);
                        if (is_shadow_present_pte(pte) && is_writeble_pte(pte)) {
-                               kvm_release_page(pfn_to_page(p >> PAGE_SHIFT));
+                               kvm_release_page_clean(page);
                                return 0;
                        }
                        mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
@@ -918,7 +924,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
                        if (!was_rmapped)
                                rmap_add(vcpu, &table[index], v >> PAGE_SHIFT);
                        else
-                               kvm_release_page(pfn_to_page(p >> PAGE_SHIFT));
+                               kvm_release_page_clean(page);
+
                        return 0;
                }
 
@@ -933,7 +940,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
                                                     1, 3, &table[index]);
                        if (!new_table) {
                                pgprintk("nonpaging_map: ENOMEM\n");
-                               kvm_release_page(pfn_to_page(p >> PAGE_SHIFT));
+                               kvm_release_page_clean(page);
                                return -ENOMEM;
                        }
 
@@ -1049,8 +1056,8 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
        paddr = gpa_to_hpa(vcpu->kvm, addr & PT64_BASE_ADDR_MASK);
 
        if (is_error_hpa(paddr)) {
-               kvm_release_page(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
-                                >> PAGE_SHIFT));
+               kvm_release_page_clean(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
+                                      >> PAGE_SHIFT));
                return 1;
        }
 
@@ -1580,7 +1587,7 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
                                       " valid guest gva %lx\n", audit_msg, va);
                        page = pfn_to_page((gpa & PT64_BASE_ADDR_MASK)
                                           >> PAGE_SHIFT);
-                       kvm_release_page(page);
+                       kvm_release_page_clean(page);
 
                }
        }
index 77a2b22492bf20f512329dd9feb9a011a7b19ac0..bf15d127a48f817478cea2d504dda5a078e523fd 100644 (file)
@@ -212,8 +212,8 @@ static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu,
        if (is_error_hpa(paddr)) {
                set_shadow_pte(shadow_pte,
                               shadow_trap_nonpresent_pte | PT_SHADOW_IO_MARK);
-               kvm_release_page(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
-                                            >> PAGE_SHIFT));
+               kvm_release_page_clean(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
+                                      >> PAGE_SHIFT));
                return;
        }
 
@@ -259,12 +259,12 @@ unshadowed:
 
                        page = pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
                                           >> PAGE_SHIFT);
-                       kvm_release_page(page);
+                       kvm_release_page_clean(page);
                }
        }
        else
-               kvm_release_page(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
-                                >> PAGE_SHIFT));
+               kvm_release_page_clean(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
+                                      >> PAGE_SHIFT));
        if (!ptwrite || !*ptwrite)
                vcpu->last_pte_updated = shadow_pte;
 }
@@ -503,7 +503,7 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
                else
                        sp->spt[i] = shadow_notrap_nonpresent_pte;
        kunmap_atomic(gpt, KM_USER0);
-       kvm_release_page(page);
+       kvm_release_page_clean(page);
 }
 
 #undef pt_element_t
index 5a1b72fbaeaa5ff4153c3430cca09197da32e37b..6212984a2e6c6f5b557fa21029c7b6202a0c7707 100644 (file)
@@ -1472,7 +1472,7 @@ static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
 
        for (i = 0; i < ARRAY_SIZE(vcpu->pio.guest_pages); ++i)
                if (vcpu->pio.guest_pages[i]) {
-                       kvm_release_page(vcpu->pio.guest_pages[i]);
+                       kvm_release_page_dirty(vcpu->pio.guest_pages[i]);
                        vcpu->pio.guest_pages[i] = NULL;
                }
 }