]> git.openfabrics.org - ~emulex/infiniband.git/commitdiff
mm: softdirty: enable write notifications on VMAs after VM_SOFTDIRTY cleared
authorPeter Feiner <pfeiner@google.com>
Mon, 13 Oct 2014 22:55:46 +0000 (15:55 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 14 Oct 2014 00:18:28 +0000 (02:18 +0200)
For VMAs that don't want write notifications, PTEs created for read faults
have their write bit set.  If the read fault happens after VM_SOFTDIRTY is
cleared, then the PTE's softdirty bit will remain clear after subsequent
writes.

Here's a simple code snippet to demonstrate the bug:

  char* m = mmap(NULL, getpagesize(), PROT_READ | PROT_WRITE,
                 MAP_ANONYMOUS | MAP_SHARED, -1, 0);
  system("echo 4 > /proc/$PPID/clear_refs"); /* clear VM_SOFTDIRTY */
  assert(*m == '\0');     /* new PTE allows write access */
  assert(!soft_dirty(x));
  *m = 'x';               /* should dirty the page */
  assert(soft_dirty(x));  /* fails */

With this patch, write notifications are enabled when VM_SOFTDIRTY is
cleared.  Furthermore, to avoid unnecessary faults, write notifications
are disabled when VM_SOFTDIRTY is set.

As a side effect of enabling and disabling write notifications with
care, this patch fixes a bug in mprotect where vm_page_prot bits set by
drivers were zapped on mprotect.  An analogous bug was fixed in mmap by
commit c9d0bf241451 ("mm: uncached vma support with writenotify").

Signed-off-by: Peter Feiner <pfeiner@google.com>
Reported-by: Peter Feiner <pfeiner@google.com>
Suggested-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: Pavel Emelyanov <xemul@parallels.com>
Cc: Jamie Liu <jamieliu@google.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
fs/proc/task_mmu.c
include/asm-generic/pgtable.h
include/linux/mm.h
mm/memory.c
mm/mmap.c
mm/mprotect.c

index b7a7dc963a359229ee7607a8d32b9c28221810bf..4e0388cffe3db3f7ee88b9ab812d89c056d8c60b 100644 (file)
@@ -827,8 +827,21 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
                        .private = &cp,
                };
                down_read(&mm->mmap_sem);
-               if (type == CLEAR_REFS_SOFT_DIRTY)
+               if (type == CLEAR_REFS_SOFT_DIRTY) {
+                       for (vma = mm->mmap; vma; vma = vma->vm_next) {
+                               if (!(vma->vm_flags & VM_SOFTDIRTY))
+                                       continue;
+                               up_read(&mm->mmap_sem);
+                               down_write(&mm->mmap_sem);
+                               for (vma = mm->mmap; vma; vma = vma->vm_next) {
+                                       vma->vm_flags &= ~VM_SOFTDIRTY;
+                                       vma_set_page_prot(vma);
+                               }
+                               downgrade_write(&mm->mmap_sem);
+                               break;
+                       }
                        mmu_notifier_invalidate_range_start(mm, 0, -1);
+               }
                for (vma = mm->mmap; vma; vma = vma->vm_next) {
                        cp.vma = vma;
                        if (is_vm_hugetlb_page(vma))
@@ -848,10 +861,6 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
                                continue;
                        if (type == CLEAR_REFS_MAPPED && !vma->vm_file)
                                continue;
-                       if (type == CLEAR_REFS_SOFT_DIRTY) {
-                               if (vma->vm_flags & VM_SOFTDIRTY)
-                                       vma->vm_flags &= ~VM_SOFTDIRTY;
-                       }
                        walk_page_range(vma->vm_start, vma->vm_end,
                                        &clear_refs_walk);
                }
index 081ff8826bf6855630f1a7d5a3c51302d72b87c7..752e30d639045c2ec970b479c680a08d4381d4e1 100644 (file)
@@ -253,6 +253,20 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
 #define pgprot_device pgprot_noncached
 #endif
 
+#ifndef pgprot_modify
+#define pgprot_modify pgprot_modify
+static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
+{
+       if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot)))
+               newprot = pgprot_noncached(newprot);
+       if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot)))
+               newprot = pgprot_writecombine(newprot);
+       if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot)))
+               newprot = pgprot_device(newprot);
+       return newprot;
+}
+#endif
+
 /*
  * When walking page tables, get the address of the next boundary,
  * or the end address of the range if that comes earlier.  Although no
index 4cd45cb95e6d375c88854b500dff684d283dd4d9..02d11ee7f19d1515ac47178575d653e52fc8e4e8 100644 (file)
@@ -1974,11 +1974,16 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
 
 #ifdef CONFIG_MMU
 pgprot_t vm_get_page_prot(unsigned long vm_flags);
+void vma_set_page_prot(struct vm_area_struct *vma);
 #else
 static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
 {
        return __pgprot(0);
 }
+static inline void vma_set_page_prot(struct vm_area_struct *vma)
+{
+       vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+}
 #endif
 
 #ifdef CONFIG_NUMA_BALANCING
index e229970e4223ff02cb24b43b9e47e5ee211555d6..1cc6bfbd872ee17122b6e85859662696f595a363 100644 (file)
@@ -2053,7 +2053,8 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
        old_page = vm_normal_page(vma, address, orig_pte);
        if (!old_page) {
                /*
-                * VM_MIXEDMAP !pfn_valid() case
+                * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
+                * VM_PFNMAP VMA.
                 *
                 * We should not cow pages in a shared writeable mapping.
                 * Just mark the pages writable as we can't do any dirty
index 93d28c7e54201de5549dab1a3dd696d785a06302..7f855206e7fb2bb1f9a30fcf7745bff7a2ae3adb 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -89,6 +89,25 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags)
 }
 EXPORT_SYMBOL(vm_get_page_prot);
 
+static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
+{
+       return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
+}
+
+/* Update vma->vm_page_prot to reflect vma->vm_flags. */
+void vma_set_page_prot(struct vm_area_struct *vma)
+{
+       unsigned long vm_flags = vma->vm_flags;
+
+       vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
+       if (vma_wants_writenotify(vma)) {
+               vm_flags &= ~VM_SHARED;
+               vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot,
+                                                    vm_flags);
+       }
+}
+
+
 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;  /* heuristic overcommit */
 int sysctl_overcommit_ratio __read_mostly = 50;        /* default is 50% */
 unsigned long sysctl_overcommit_kbytes __read_mostly;
@@ -1475,11 +1494,16 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
        if (vma->vm_ops && vma->vm_ops->page_mkwrite)
                return 1;
 
-       /* The open routine did something to the protections already? */
+       /* The open routine did something to the protections that pgprot_modify
+        * won't preserve? */
        if (pgprot_val(vma->vm_page_prot) !=
-           pgprot_val(vm_get_page_prot(vm_flags)))
+           pgprot_val(vm_pgprot_modify(vma->vm_page_prot, vm_flags)))
                return 0;
 
+       /* Do we need to track softdirty? */
+       if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY))
+               return 1;
+
        /* Specialty mapping? */
        if (vm_flags & VM_PFNMAP)
                return 0;
@@ -1615,21 +1639,6 @@ munmap_back:
                        goto free_vma;
        }
 
-       if (vma_wants_writenotify(vma)) {
-               pgprot_t pprot = vma->vm_page_prot;
-
-               /* Can vma->vm_page_prot have changed??
-                *
-                * Answer: Yes, drivers may have changed it in their
-                *         f_op->mmap method.
-                *
-                * Ensures that vmas marked as uncached stay that way.
-                */
-               vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);
-               if (pgprot_val(pprot) == pgprot_val(pgprot_noncached(pprot)))
-                       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-       }
-
        vma_link(mm, vma, prev, rb_link, rb_parent);
        /* Once vma denies write, undo our temporary denial count */
        if (file) {
@@ -1663,6 +1672,8 @@ out:
         */
        vma->vm_flags |= VM_SOFTDIRTY;
 
+       vma_set_page_prot(vma);
+
        return addr;
 
 unmap_and_free_vma:
index c43d557941f807471a3c39963f77a8ba3d32dd8a..ace93454ce8ebe10f0b3cf5278aafd0a0d5fe4df 100644 (file)
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
 
-#ifndef pgprot_modify
-static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
-{
-       return newprot;
-}
-#endif
-
 /*
  * For a prot_numa update we only hold mmap_sem for read so there is a
  * potential race with faulting where a pmd was temporarily none. This
@@ -93,7 +86,9 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
                                 * Avoid taking write faults for pages we
                                 * know to be dirty.
                                 */
-                               if (dirty_accountable && pte_dirty(ptent))
+                               if (dirty_accountable && pte_dirty(ptent) &&
+                                   (pte_soft_dirty(ptent) ||
+                                    !(vma->vm_flags & VM_SOFTDIRTY)))
                                        ptent = pte_mkwrite(ptent);
                                ptep_modify_prot_commit(mm, addr, pte, ptent);
                                updated = true;
@@ -320,13 +315,8 @@ success:
         * held in write mode.
         */
        vma->vm_flags = newflags;
-       vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
-                                         vm_get_page_prot(newflags));
-
-       if (vma_wants_writenotify(vma)) {
-               vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
-               dirty_accountable = 1;
-       }
+       dirty_accountable = vma_wants_writenotify(vma);
+       vma_set_page_prot(vma);
 
        change_protection(vma, start, end, vma->vm_page_prot,
                          dirty_accountable, 0);