]> git.openfabrics.org - ~emulex/infiniband.git/commitdiff
sh: rename pg-mmu.c -> cache.c, enable generically.
authorPaul Mundt <lethal@linux-sh.org>
Sat, 15 Aug 2009 00:30:39 +0000 (09:30 +0900)
committerPaul Mundt <lethal@linux-sh.org>
Sat, 15 Aug 2009 00:30:39 +0000 (09:30 +0900)
This builds in the newly created cache.c (renamed from pg-mmu.c) for both
MMU and NOMMU configurations. The kmap_coherent() stubs and alias
information recorded by each CPU family takes care of doing the right
thing while enabling the code to be commonly shared.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
arch/sh/include/asm/cacheflush.h
arch/sh/mm/Makefile_32
arch/sh/mm/cache.c [new file with mode: 0644]
arch/sh/mm/pg-mmu.c [deleted file]

index 0e87e87cc01fd4c37c39cfb68069804008abedba..4bf621e4146d9d551c861f87f028eb55826282da 100644 (file)
@@ -45,7 +45,6 @@ extern void __flush_purge_region(void *start, int size);
 extern void __flush_invalidate_region(void *start, int size);
 #endif
 
-#ifdef CONFIG_MMU
 #define ARCH_HAS_FLUSH_ANON_PAGE
 extern void __flush_anon_page(struct page *page, unsigned long);
 
@@ -55,7 +54,6 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
        if (boot_cpu_data.dcache.n_aliases && PageAnon(page))
                __flush_anon_page(page, vmaddr);
 }
-#endif
 
 #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
 static inline void flush_kernel_dcache_page(struct page *page)
index 30771b137f4829aad89969196dbd5b70baedd8fe..affcc9a15ceace86515db31554d226c399e8f629 100644 (file)
@@ -2,20 +2,20 @@
 # Makefile for the Linux SuperH-specific parts of the memory manager.
 #
 
-obj-y                  := init.o extable_32.o consistent.o mmap.o
+obj-y                  := cache.o init.o extable_32.o consistent.o mmap.o
 
 ifndef CONFIG_CACHE_OFF
-cache-$(CONFIG_CPU_SH2)                := cache-sh2.o
-cache-$(CONFIG_CPU_SH2A)       := cache-sh2a.o
-cache-$(CONFIG_CPU_SH3)                := cache-sh3.o
-cache-$(CONFIG_CPU_SH4)                := cache-sh4.o flush-sh4.o
-cache-$(CONFIG_SH7705_CACHE_32KB)      += cache-sh7705.o
+cacheops-$(CONFIG_CPU_SH2)             := cache-sh2.o
+cacheops-$(CONFIG_CPU_SH2A)            := cache-sh2a.o
+cacheops-$(CONFIG_CPU_SH3)             := cache-sh3.o
+cacheops-$(CONFIG_CPU_SH4)             := cache-sh4.o flush-sh4.o
+cacheops-$(CONFIG_SH7705_CACHE_32KB)   += cache-sh7705.o
 endif
 
-obj-y                  += $(cache-y)
+obj-y                  += $(cacheops-y)
 
 mmu-y                  := tlb-nommu.o pg-nommu.o
-mmu-$(CONFIG_MMU)      := fault_32.o kmap.o tlbflush_32.o ioremap_32.o pg-mmu.o
+mmu-$(CONFIG_MMU)      := fault_32.o kmap.o tlbflush_32.o ioremap_32.o
 
 obj-y                  += $(mmu-y)
 obj-$(CONFIG_DEBUG_FS) += asids-debugfs.o
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
new file mode 100644 (file)
index 0000000..f51d0a4
--- /dev/null
@@ -0,0 +1,129 @@
+/*
+ * arch/sh/mm/pg-mmu.c
+ *
+ * Copyright (C) 1999, 2000, 2002  Niibe Yutaka
+ * Copyright (C) 2002 - 2009  Paul Mundt
+ *
+ * Released under the terms of the GNU GPL v2.0.
+ */
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/fs.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <asm/mmu_context.h>
+#include <asm/cacheflush.h>
+
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+                      unsigned long vaddr, void *dst, const void *src,
+                      unsigned long len)
+{
+       if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
+           !test_bit(PG_dcache_dirty, &page->flags)) {
+               void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
+               memcpy(vto, src, len);
+               kunmap_coherent();
+       } else {
+               memcpy(dst, src, len);
+               if (boot_cpu_data.dcache.n_aliases)
+                       set_bit(PG_dcache_dirty, &page->flags);
+       }
+
+       if (vma->vm_flags & VM_EXEC)
+               flush_cache_page(vma, vaddr, page_to_pfn(page));
+}
+
+void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
+                        unsigned long vaddr, void *dst, const void *src,
+                        unsigned long len)
+{
+       if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
+           !test_bit(PG_dcache_dirty, &page->flags)) {
+               void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
+               memcpy(dst, vfrom, len);
+               kunmap_coherent();
+       } else {
+               memcpy(dst, src, len);
+               if (boot_cpu_data.dcache.n_aliases)
+                       set_bit(PG_dcache_dirty, &page->flags);
+       }
+}
+
+void copy_user_highpage(struct page *to, struct page *from,
+                       unsigned long vaddr, struct vm_area_struct *vma)
+{
+       void *vfrom, *vto;
+
+       vto = kmap_atomic(to, KM_USER1);
+
+       if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
+           !test_bit(PG_dcache_dirty, &from->flags)) {
+               vfrom = kmap_coherent(from, vaddr);
+               copy_page(vto, vfrom);
+               kunmap_coherent();
+       } else {
+               vfrom = kmap_atomic(from, KM_USER0);
+               copy_page(vto, vfrom);
+               kunmap_atomic(vfrom, KM_USER0);
+       }
+
+       if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
+               __flush_wback_region(vto, PAGE_SIZE);
+
+       kunmap_atomic(vto, KM_USER1);
+       /* Make sure this page is cleared on other CPU's too before using it */
+       smp_wmb();
+}
+EXPORT_SYMBOL(copy_user_highpage);
+
+void clear_user_highpage(struct page *page, unsigned long vaddr)
+{
+       void *kaddr = kmap_atomic(page, KM_USER0);
+
+       clear_page(kaddr);
+
+       if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
+               __flush_wback_region(kaddr, PAGE_SIZE);
+
+       kunmap_atomic(kaddr, KM_USER0);
+}
+EXPORT_SYMBOL(clear_user_highpage);
+
+void __update_cache(struct vm_area_struct *vma,
+                   unsigned long address, pte_t pte)
+{
+       struct page *page;
+       unsigned long pfn = pte_pfn(pte);
+
+       if (!boot_cpu_data.dcache.n_aliases)
+               return;
+
+       page = pfn_to_page(pfn);
+       if (pfn_valid(pfn) && page_mapping(page)) {
+               int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
+               if (dirty) {
+                       unsigned long addr = (unsigned long)page_address(page);
+
+                       if (pages_do_alias(addr, address & PAGE_MASK))
+                               __flush_wback_region((void *)addr, PAGE_SIZE);
+               }
+       }
+}
+
+void __flush_anon_page(struct page *page, unsigned long vmaddr)
+{
+       unsigned long addr = (unsigned long) page_address(page);
+
+       if (pages_do_alias(addr, vmaddr)) {
+               if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
+                   !test_bit(PG_dcache_dirty, &page->flags)) {
+                       void *kaddr;
+
+                       kaddr = kmap_coherent(page, vmaddr);
+                       __flush_wback_region((void *)kaddr, PAGE_SIZE);
+                       kunmap_coherent();
+               } else
+                       __flush_wback_region((void *)addr, PAGE_SIZE);
+       }
+}
diff --git a/arch/sh/mm/pg-mmu.c b/arch/sh/mm/pg-mmu.c
deleted file mode 100644 (file)
index f51d0a4..0000000
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * arch/sh/mm/pg-mmu.c
- *
- * Copyright (C) 1999, 2000, 2002  Niibe Yutaka
- * Copyright (C) 2002 - 2009  Paul Mundt
- *
- * Released under the terms of the GNU GPL v2.0.
- */
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/mutex.h>
-#include <linux/fs.h>
-#include <linux/highmem.h>
-#include <linux/module.h>
-#include <asm/mmu_context.h>
-#include <asm/cacheflush.h>
-
-void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
-                      unsigned long vaddr, void *dst, const void *src,
-                      unsigned long len)
-{
-       if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
-           !test_bit(PG_dcache_dirty, &page->flags)) {
-               void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
-               memcpy(vto, src, len);
-               kunmap_coherent();
-       } else {
-               memcpy(dst, src, len);
-               if (boot_cpu_data.dcache.n_aliases)
-                       set_bit(PG_dcache_dirty, &page->flags);
-       }
-
-       if (vma->vm_flags & VM_EXEC)
-               flush_cache_page(vma, vaddr, page_to_pfn(page));
-}
-
-void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
-                        unsigned long vaddr, void *dst, const void *src,
-                        unsigned long len)
-{
-       if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
-           !test_bit(PG_dcache_dirty, &page->flags)) {
-               void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
-               memcpy(dst, vfrom, len);
-               kunmap_coherent();
-       } else {
-               memcpy(dst, src, len);
-               if (boot_cpu_data.dcache.n_aliases)
-                       set_bit(PG_dcache_dirty, &page->flags);
-       }
-}
-
-void copy_user_highpage(struct page *to, struct page *from,
-                       unsigned long vaddr, struct vm_area_struct *vma)
-{
-       void *vfrom, *vto;
-
-       vto = kmap_atomic(to, KM_USER1);
-
-       if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
-           !test_bit(PG_dcache_dirty, &from->flags)) {
-               vfrom = kmap_coherent(from, vaddr);
-               copy_page(vto, vfrom);
-               kunmap_coherent();
-       } else {
-               vfrom = kmap_atomic(from, KM_USER0);
-               copy_page(vto, vfrom);
-               kunmap_atomic(vfrom, KM_USER0);
-       }
-
-       if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
-               __flush_wback_region(vto, PAGE_SIZE);
-
-       kunmap_atomic(vto, KM_USER1);
-       /* Make sure this page is cleared on other CPU's too before using it */
-       smp_wmb();
-}
-EXPORT_SYMBOL(copy_user_highpage);
-
-void clear_user_highpage(struct page *page, unsigned long vaddr)
-{
-       void *kaddr = kmap_atomic(page, KM_USER0);
-
-       clear_page(kaddr);
-
-       if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
-               __flush_wback_region(kaddr, PAGE_SIZE);
-
-       kunmap_atomic(kaddr, KM_USER0);
-}
-EXPORT_SYMBOL(clear_user_highpage);
-
-void __update_cache(struct vm_area_struct *vma,
-                   unsigned long address, pte_t pte)
-{
-       struct page *page;
-       unsigned long pfn = pte_pfn(pte);
-
-       if (!boot_cpu_data.dcache.n_aliases)
-               return;
-
-       page = pfn_to_page(pfn);
-       if (pfn_valid(pfn) && page_mapping(page)) {
-               int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
-               if (dirty) {
-                       unsigned long addr = (unsigned long)page_address(page);
-
-                       if (pages_do_alias(addr, address & PAGE_MASK))
-                               __flush_wback_region((void *)addr, PAGE_SIZE);
-               }
-       }
-}
-
-void __flush_anon_page(struct page *page, unsigned long vmaddr)
-{
-       unsigned long addr = (unsigned long) page_address(page);
-
-       if (pages_do_alias(addr, vmaddr)) {
-               if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
-                   !test_bit(PG_dcache_dirty, &page->flags)) {
-                       void *kaddr;
-
-                       kaddr = kmap_coherent(page, vmaddr);
-                       __flush_wback_region((void *)kaddr, PAGE_SIZE);
-                       kunmap_coherent();
-               } else
-                       __flush_wback_region((void *)addr, PAGE_SIZE);
-       }
-}