]> git.openfabrics.org - ~emulex/infiniband.git/commitdiff
sh: Migrate from PG_mapped to PG_dcache_dirty.
authorPaul Mundt <lethal@linux-sh.org>
Wed, 22 Jul 2009 10:20:49 +0000 (19:20 +0900)
committerPaul Mundt <lethal@linux-sh.org>
Wed, 22 Jul 2009 10:20:49 +0000 (19:20 +0900)
This inverts the delayed dcache flush a bit to be more in line with other
platforms. At the same time this also gives us the ability to do some
more optimizations and cleanup. Now that the update_mmu_cache() callsite
only tests for the bit, the implementation can gradually be split out and
made generic, rather than relying on special implementations for each of
the peculiar CPU types.

SH7705 in 32kB mode and SH-4 still need slightly different handling, but
this is something that can remain isolated in the varying page copy/clear
routines. On top of that, SH-X3 is dcache coherent, so there is no need
to bother with any of these tests in the PTEAEX version of
update_mmu_cache(), so we kill that off too.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
arch/sh/include/asm/page.h
arch/sh/include/asm/pgtable.h
arch/sh/include/cpu-sh3/cpu/cacheflush.h
arch/sh/include/cpu-sh4/cpu/cacheflush.h
arch/sh/mm/cache-sh4.c
arch/sh/mm/cache-sh7705.c
arch/sh/mm/pg-sh4.c
arch/sh/mm/pg-sh7705.c
arch/sh/mm/tlb-pteaex.c
arch/sh/mm/tlb-sh3.c
arch/sh/mm/tlb-sh4.c

index 49592c780a6e8d604df09d9ed45edb9728bcbdfc..a31ab40040f0a67c845be8413d9859980b7f1a4f 100644 (file)
@@ -50,6 +50,12 @@ extern unsigned long shm_align_mask;
 extern unsigned long max_low_pfn, min_low_pfn;
 extern unsigned long memory_start, memory_end;
 
+static inline unsigned long
+pages_do_alias(unsigned long addr1, unsigned long addr2)
+{
+       return (addr1 ^ addr2) & shm_align_mask;
+}
+
 extern void clear_page(void *to);
 extern void copy_page(void *to, void *from);
 
index 2a011b18090b64418189db554c564c58bb59e7bf..d9f68f9c3cb341cc2e00e30a0c7a59043b5629f6 100644 (file)
@@ -133,13 +133,6 @@ typedef pte_t *pte_addr_t;
  */
 #define pgtable_cache_init()   do { } while (0)
 
-#if !defined(CONFIG_CACHE_OFF) && (defined(CONFIG_CPU_SH4) || \
-       defined(CONFIG_SH7705_CACHE_32KB))
-struct mm_struct;
-#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
-pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
-#endif
-
 struct vm_area_struct;
 extern void update_mmu_cache(struct vm_area_struct * vma,
                             unsigned long address, pte_t pte);
index 1ac27aae6700d377f5f3ea3b46ad8b8ddcd7b021..6485ad5649adba34016fa1f95811d9868d1f2354 100644 (file)
  * SH4. Unlike the SH4 this is a unified cache so we need to do some work
  * in mmap when 'exec'ing a new binary
  */
- /* 32KB cache, 4kb PAGE sizes need to check bit 12 */
-#define CACHE_ALIAS 0x00001000
-
-#define PG_mapped      PG_arch_1
+#define PG_dcache_dirty        PG_arch_1
 
 void flush_cache_all(void);
 void flush_cache_mm(struct mm_struct *mm);
index 065306d376eb694f59309d4f699ab84478c1b976..3564f1722195f4fa9422dfacbd6f8af965e066b8 100644 (file)
@@ -38,6 +38,6 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
 /* Initialization of P3 area for copy_user_page */
 void p3_cache_init(void);
 
-#define PG_mapped      PG_arch_1
+#define PG_dcache_dirty        PG_arch_1
 
 #endif /* __ASM_CPU_SH4_CACHEFLUSH_H */
index 5cfe08dbb59ed0997c4451459577423b8713e817..c3a09b27f8d587c23d5175bafd9cdf7a1db5e80c 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/mm.h>
 #include <linux/io.h>
 #include <linux/mutex.h>
+#include <linux/fs.h>
 #include <asm/mmu_context.h>
 #include <asm/cacheflush.h>
 
@@ -246,7 +247,14 @@ static inline void flush_cache_4096(unsigned long start,
  */
 void flush_dcache_page(struct page *page)
 {
-       if (test_bit(PG_mapped, &page->flags)) {
+       struct address_space *mapping = page_mapping(page);
+
+#ifndef CONFIG_SMP
+       if (mapping && !mapping_mapped(mapping))
+               set_bit(PG_dcache_dirty, &page->flags);
+       else
+#endif
+       {
                unsigned long phys = PHYSADDR(page_address(page));
                unsigned long addr = CACHE_OC_ADDRESS_ARRAY;
                int i, n;
index 22dacc7788236c96162b9df9e6d3483b04c9a67b..fa37bff306b963d526df7922519df30c662983d4 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/init.h>
 #include <linux/mman.h>
 #include <linux/mm.h>
+#include <linux/fs.h>
 #include <linux/threads.h>
 #include <asm/addrspace.h>
 #include <asm/page.h>
@@ -128,7 +129,11 @@ static void __uses_jump_to_uncached __flush_dcache_page(unsigned long phys)
  */
 void flush_dcache_page(struct page *page)
 {
-       if (test_bit(PG_mapped, &page->flags))
+       struct address_space *mapping = page_mapping(page);
+
+       if (mapping && !mapping_mapped(mapping))
+               set_bit(PG_dcache_dirty, &page->flags);
+       else
                __flush_dcache_page(PHYSADDR(page_address(page)));
 }
 
index 2fe14da1f83909826d352249ba849488103fb4c2..f3c4b2a54fc756b2fc6b60e349d5bc20f1ebc8be 100644 (file)
@@ -15,8 +15,6 @@
 #include <asm/mmu_context.h>
 #include <asm/cacheflush.h>
 
-#define CACHE_ALIAS (current_cpu_data.dcache.alias_mask)
-
 #define kmap_get_fixmap_pte(vaddr)                                     \
        pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
 
@@ -68,10 +66,9 @@ static inline void kunmap_coherent(struct page *page)
  */
 void clear_user_page(void *to, unsigned long address, struct page *page)
 {
-       __set_bit(PG_mapped, &page->flags);
-
        clear_page(to);
-       if ((((address & PAGE_MASK) ^ (unsigned long)to) & CACHE_ALIAS))
+
+       if (pages_do_alias((unsigned long)to, address & PAGE_MASK))
                __flush_wback_region(to, PAGE_SIZE);
 }
 
@@ -79,13 +76,14 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
                       unsigned long vaddr, void *dst, const void *src,
                       unsigned long len)
 {
-       void *vto;
-
-       __set_bit(PG_mapped, &page->flags);
-
-       vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
-       memcpy(vto, src, len);
-       kunmap_coherent(vto);
+       if (page_mapped(page) && !test_bit(PG_dcache_dirty, &page->flags)) {
+               void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
+               memcpy(vto, src, len);
+               kunmap_coherent(vto);
+       } else {
+               memcpy(dst, src, len);
+               set_bit(PG_dcache_dirty, &page->flags);
+       }
 
        if (vma->vm_flags & VM_EXEC)
                flush_cache_page(vma, vaddr, page_to_pfn(page));
@@ -95,13 +93,14 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
                         unsigned long vaddr, void *dst, const void *src,
                         unsigned long len)
 {
-       void *vfrom;
-
-       __set_bit(PG_mapped, &page->flags);
-
-       vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
-       memcpy(dst, vfrom, len);
-       kunmap_coherent(vfrom);
+       if (page_mapped(page) && !test_bit(PG_dcache_dirty, &page->flags)) {
+               void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
+               memcpy(dst, vfrom, len);
+               kunmap_coherent(vfrom);
+       } else {
+               memcpy(dst, src, len);
+               set_bit(PG_dcache_dirty, &page->flags);
+       }
 }
 
 void copy_user_highpage(struct page *to, struct page *from,
@@ -109,14 +108,19 @@ void copy_user_highpage(struct page *to, struct page *from,
 {
        void *vfrom, *vto;
 
-       __set_bit(PG_mapped, &to->flags);
-
        vto = kmap_atomic(to, KM_USER1);
-       vfrom = kmap_coherent(from, vaddr);
-       copy_page(vto, vfrom);
-       kunmap_coherent(vfrom);
 
-       if (((vaddr ^ (unsigned long)vto) & CACHE_ALIAS))
+       if (page_mapped(from) && !test_bit(PG_dcache_dirty, &from->flags)) {
+               vfrom = kmap_coherent(from, vaddr);
+               copy_page(vto, vfrom);
+               kunmap_coherent(vfrom);
+       } else {
+               vfrom = kmap_atomic(from, KM_USER0);
+               copy_page(vto, vfrom);
+               kunmap_atomic(vfrom, KM_USER0);
+       }
+
+       if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
                __flush_wback_region(vto, PAGE_SIZE);
 
        kunmap_atomic(vto, KM_USER1);
@@ -124,23 +128,3 @@ void copy_user_highpage(struct page *to, struct page *from,
        smp_wmb();
 }
 EXPORT_SYMBOL(copy_user_highpage);
-
-/*
- * For SH-4, we have our own implementation for ptep_get_and_clear
- */
-pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
-{
-       pte_t pte = *ptep;
-
-       pte_clear(mm, addr, ptep);
-       if (!pte_not_present(pte)) {
-               unsigned long pfn = pte_pfn(pte);
-               if (pfn_valid(pfn)) {
-                       struct page *page = pfn_to_page(pfn);
-                       struct address_space *mapping = page_mapping(page);
-                       if (!mapping || !mapping_writably_mapped(mapping))
-                               __clear_bit(PG_mapped, &page->flags);
-               }
-       }
-       return pte;
-}
index eaf25147194c80947a5ce74aadd93ca7e22ced26..684891b5c8c0e107c0b65f63e9dd4b6a76065213 100644 (file)
@@ -26,7 +26,7 @@
 #include <asm/mmu_context.h>
 #include <asm/cacheflush.h>
 
-static inline void __flush_purge_virtual_region(void *p1, void *virt, int size)
+static void __flush_purge_virtual_region(void *p1, void *virt, int size)
 {
        unsigned long v;
        unsigned long begin, end;
@@ -75,19 +75,13 @@ static inline void __flush_purge_virtual_region(void *p1, void *virt, int size)
  */
 void clear_user_page(void *to, unsigned long address, struct page *pg)
 {
-       struct page *page = virt_to_page(to);
-
-       __set_bit(PG_mapped, &page->flags);
-       if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) {
-               clear_page(to);
-               __flush_wback_region(to, PAGE_SIZE);
-       } else {
+       if (pages_do_alias(address, (unsigned long)to))
                __flush_purge_virtual_region(to,
                                             (void *)(address & 0xfffff000),
                                             PAGE_SIZE);
-               clear_page(to);
-               __flush_wback_region(to, PAGE_SIZE);
-       }
+
+       clear_page(to);
+       __flush_wback_region(to, PAGE_SIZE);
 }
 
 /*
@@ -98,41 +92,11 @@ void clear_user_page(void *to, unsigned long address, struct page *pg)
  */
 void copy_user_page(void *to, void *from, unsigned long address, struct page *pg)
 {
-       struct page *page = virt_to_page(to);
-
-
-       __set_bit(PG_mapped, &page->flags);
-       if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) {
-               copy_page(to, from);
-               __flush_wback_region(to, PAGE_SIZE);
-       } else {
+       if (pages_do_alias(address, (unsigned long)to))
                __flush_purge_virtual_region(to,
                                             (void *)(address & 0xfffff000),
                                             PAGE_SIZE);
-               copy_page(to, from);
-               __flush_wback_region(to, PAGE_SIZE);
-       }
-}
 
-/*
- * For SH7705, we have our own implementation for ptep_get_and_clear
- * Copied from pg-sh4.c
- */
-pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
-{
-       pte_t pte = *ptep;
-
-       pte_clear(mm, addr, ptep);
-       if (!pte_not_present(pte)) {
-               unsigned long pfn = pte_pfn(pte);
-               if (pfn_valid(pfn)) {
-                       struct page *page = pfn_to_page(pfn);
-                       struct address_space *mapping = page_mapping(page);
-                       if (!mapping || !mapping_writably_mapped(mapping))
-                               __clear_bit(PG_mapped, &page->flags);
-               }
-       }
-
-       return pte;
+       copy_page(to, from);
+       __flush_wback_region(to, PAGE_SIZE);
 }
-
index 2aab3ea934d77909f74215759a5f4fc3072c12d3..c39b7736335226e70fa1710c5607ded0caf8d99c 100644 (file)
@@ -27,23 +27,6 @@ void update_mmu_cache(struct vm_area_struct * vma,
        if (vma && current->active_mm != vma->vm_mm)
                return;
 
-#ifndef CONFIG_CACHE_OFF
-       {
-               unsigned long pfn = pte_pfn(pte);
-
-               if (pfn_valid(pfn)) {
-                       struct page *page = pfn_to_page(pfn);
-
-                       if (!test_bit(PG_mapped, &page->flags)) {
-                               unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
-                               __flush_wback_region((void *)P1SEGADDR(phys),
-                                                    PAGE_SIZE);
-                               __set_bit(PG_mapped, &page->flags);
-                       }
-               }
-       }
-#endif
-
        local_irq_save(flags);
 
        /* Set PTEH register */
index 17cb7c3adf2256b0206ea3d920578c47b9a0dbfc..9b8459c74abd2000d70105e06150cd8744af6d29 100644 (file)
@@ -33,25 +33,25 @@ void update_mmu_cache(struct vm_area_struct * vma,
        unsigned long flags;
        unsigned long pteval;
        unsigned long vpn;
+       unsigned long pfn = pte_pfn(pte);
+       struct page *page;
 
        /* Ptrace may call this routine. */
        if (vma && current->active_mm != vma->vm_mm)
                return;
 
+       page = pfn_to_page(pfn);
+       if (pfn_valid(pfn) && page_mapping(page)) {
 #if defined(CONFIG_SH7705_CACHE_32KB)
-       {
-               struct page *page = pte_page(pte);
-               unsigned long pfn = pte_pfn(pte);
+               int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
+               if (dirty) {
+                       unsigned long addr = (unsigned long)page_address(page);
 
-               if (pfn_valid(pfn) && !test_bit(PG_mapped, &page->flags)) {
-                       unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
-
-                       __flush_wback_region((void *)P1SEGADDR(phys),
-                                            PAGE_SIZE);
-                       __set_bit(PG_mapped, &page->flags);
+                       if (pages_do_alias(addr, address & PAGE_MASK))
+                               __flush_wback_region((void *)addr, PAGE_SIZE);
                }
-       }
 #endif
+       }
 
        local_irq_save(flags);
 
index f0c7b7397fa655804e59f161ffbf48ad37a801fd..cf50082d2435092433a8ec0a64e4090310c84e12 100644 (file)
@@ -21,27 +21,26 @@ void update_mmu_cache(struct vm_area_struct * vma,
        unsigned long flags;
        unsigned long pteval;
        unsigned long vpn;
+       unsigned long pfn = pte_pfn(pte);
+       struct page *page;
 
        /* Ptrace may call this routine. */
        if (vma && current->active_mm != vma->vm_mm)
                return;
 
-#ifndef CONFIG_CACHE_OFF
-       {
-               unsigned long pfn = pte_pfn(pte);
+       page = pfn_to_page(pfn);
+       if (pfn_valid(pfn) && page_mapping(page)) {
+#ifndef CONFIG_SMP
+               int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
+               if (dirty) {
 
-               if (pfn_valid(pfn)) {
-                       struct page *page = pfn_to_page(pfn);
+                       unsigned long addr = (unsigned long)page_address(page);
 
-                       if (!test_bit(PG_mapped, &page->flags)) {
-                               unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
-                               __flush_wback_region((void *)P1SEGADDR(phys),
-                                                    PAGE_SIZE);
-                               __set_bit(PG_mapped, &page->flags);
-                       }
+                       if (pages_do_alias(addr, address & PAGE_MASK))
+                               __flush_wback_region((void *)addr, PAGE_SIZE);
                }
-       }
 #endif
+       }
 
        local_irq_save(flags);