From: Vitaly Bordug Date: Thu, 6 Mar 2008 10:53:30 +0000 (+0300) Subject: [POWERPC] 8xx: fix swap X-Git-Tag: v2.6.25-rc6~30^2~18 X-Git-Url: https://openfabrics.org/gitweb/?a=commitdiff_plain;h=76db5bd26f2d79712459bf80ce0e5c0c5c31b769;p=~emulex%2Finfiniband.git [POWERPC] 8xx: fix swap This makes swap routines operate correctly on the ppc_8xx based machines. Code has been revalidated on mpc885ads (8M sdram) with recent kernel. Based on patch from Yuri Tikhonov to do the same on arch/ppc instance. Recent kernel's size makes swap feature very important on low-memory platforms, those are actually non-operable without it. Signed-off-by: Yuri Tikhonov Signed-off-by: Vitaly Bordug Signed-off-by: Kumar Gala --- diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index f7458396cd7..3c9452d4308 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -332,8 +332,18 @@ InstructionTLBMiss: mfspr r11, SPRN_MD_TWC /* ....and get the pte address */ lwz r10, 0(r11) /* Get the pte */ +#ifdef CONFIG_SWAP + /* do not set the _PAGE_ACCESSED bit of a non-present page */ + andi. r11, r10, _PAGE_PRESENT + beq 4f + ori r10, r10, _PAGE_ACCESSED + mfspr r11, SPRN_MD_TWC /* get the pte address again */ + stw r10, 0(r11) +4: +#else ori r10, r10, _PAGE_ACCESSED stw r10, 0(r11) +#endif /* The Linux PTE won't go exactly into the MMU TLB. * Software indicator bits 21, 22 and 28 must be clear. @@ -398,8 +408,17 @@ DataStoreTLBMiss: DO_8xx_CPU6(0x3b80, r3) mtspr SPRN_MD_TWC, r11 - mfspr r11, SPRN_MD_TWC /* get the pte address again */ +#ifdef CONFIG_SWAP + /* do not set the _PAGE_ACCESSED bit of a non-present page */ + andi. r11, r10, _PAGE_PRESENT + beq 4f ori r10, r10, _PAGE_ACCESSED +4: + /* and update pte in table */ +#else + ori r10, r10, _PAGE_ACCESSED +#endif + mfspr r11, SPRN_MD_TWC /* get the pte address again */ stw r10, 0(r11) /* The Linux PTE won't go exactly into the MMU TLB. @@ -507,7 +526,16 @@ DataTLBError: /* Update 'changed', among others. */ +#ifdef CONFIG_SWAP + ori r10, r10, _PAGE_DIRTY|_PAGE_HWWRITE + /* do not set the _PAGE_ACCESSED bit of a non-present page */ + andi. r11, r10, _PAGE_PRESENT + beq 4f + ori r10, r10, _PAGE_ACCESSED +4: +#else ori r10, r10, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE +#endif mfspr r11, SPRN_MD_TWC /* Get pte address again */ stw r10, 0(r11) /* and update pte in table */ diff --git a/include/asm-powerpc/pgtable-ppc32.h b/include/asm-powerpc/pgtable-ppc32.h index d1332bbcbd9..2c79f550272 100644 --- a/include/asm-powerpc/pgtable-ppc32.h +++ b/include/asm-powerpc/pgtable-ppc32.h @@ -339,14 +339,6 @@ extern int icache_44x_need_flush; #define _PMD_PAGE_MASK 0x000c #define _PMD_PAGE_8M 0x000c -/* - * The 8xx TLB miss handler allegedly sets _PAGE_ACCESSED in the PTE - * for an address even if _PAGE_PRESENT is not set, as a performance - * optimization. This is a bug if you ever want to use swap unless - * _PAGE_ACCESSED is 2, which it isn't, or unless you have 8xx-specific - * definitions for __swp_entry etc. below, which would be gross. - * -- paulus - */ #define _PTE_NONE_MASK _PAGE_ACCESSED #else /* CONFIG_6xx */