From: Paul Mackerras Date: Mon, 31 Oct 2005 02:37:12 +0000 (+1100) Subject: Merge ../linux-2.6 by hand X-Git-Tag: v2.6.15-rc1~59^2~15 X-Git-Url: https://openfabrics.org/gitweb/?a=commitdiff_plain;h=23fd07750a789a66fe88cf173d52a18f1a387da4;p=~shefty%2Frdma-dev.git Merge ../linux-2.6 by hand --- 23fd07750a789a66fe88cf173d52a18f1a387da4 diff --cc arch/powerpc/mm/mem.c index e43e8ef7008,00000000000..117b00012e1 mode 100644,000000..100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@@ -1,484 -1,0 +1,564 @@@ +/* + * PowerPC version + * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) + * + * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) + * and Cort Dougan (PReP) (cort@cs.nmt.edu) + * Copyright (C) 1996 Paul Mackerras + * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). + * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com) + * + * Derived from "arch/i386/mm/init.c" + * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_PPC64 +#include +#endif + +#include "mmu_decl.h" + +#ifndef CPU_FTR_COHERENT_ICACHE +#define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */ +#define CPU_FTR_NOEXECUTE 0 +#endif + +int init_bootmem_done; +int mem_init_done; +unsigned long memory_limit; + +/* + * This is called by /dev/mem to know if a given address has to + * be mapped non-cacheable or not + */ +int page_is_ram(unsigned long pfn) +{ + unsigned long paddr = (pfn << PAGE_SHIFT); + +#ifndef CONFIG_PPC64 /* XXX for now */ + return paddr < __pa(high_memory); +#else + int i; + for (i=0; i < lmb.memory.cnt; i++) { + unsigned long base; + + base = lmb.memory.region[i].base; + + if ((paddr >= base) && + (paddr < (base + lmb.memory.region[i].size))) { + return 1; + } + } + + return 0; +#endif +} +EXPORT_SYMBOL(page_is_ram); + +pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t vma_prot) +{ + if (ppc_md.phys_mem_access_prot) + return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot); + + if (!page_is_ram(pfn)) + vma_prot = __pgprot(pgprot_val(vma_prot) + | _PAGE_GUARDED | _PAGE_NO_CACHE); + return vma_prot; +} +EXPORT_SYMBOL(phys_mem_access_prot); + ++#ifdef CONFIG_MEMORY_HOTPLUG ++ ++void online_page(struct page *page) ++{ ++ ClearPageReserved(page); ++ free_cold_page(page); ++ totalram_pages++; ++ num_physpages++; ++} ++ ++/* ++ * This works only for the non-NUMA case. Later, we'll need a lookup ++ * to convert from real physical addresses to nid, that doesn't use ++ * pfn_to_nid(). ++ */ ++int __devinit add_memory(u64 start, u64 size) ++{ ++ struct pglist_data *pgdata = NODE_DATA(0); ++ struct zone *zone; ++ unsigned long start_pfn = start >> PAGE_SHIFT; ++ unsigned long nr_pages = size >> PAGE_SHIFT; ++ ++ /* this should work for most non-highmem platforms */ ++ zone = pgdata->node_zones; ++ ++ return __add_pages(zone, start_pfn, nr_pages); ++ ++ return 0; ++} ++ ++/* ++ * First pass at this code will check to determine if the remove ++ * request is within the RMO. Do not allow removal within the RMO. ++ */ ++int __devinit remove_memory(u64 start, u64 size) ++{ ++ struct zone *zone; ++ unsigned long start_pfn, end_pfn, nr_pages; ++ ++ start_pfn = start >> PAGE_SHIFT; ++ nr_pages = size >> PAGE_SHIFT; ++ end_pfn = start_pfn + nr_pages; ++ ++ printk("%s(): Attempting to remove memoy in range " ++ "%lx to %lx\n", __func__, start, start+size); ++ /* ++ * check for range within RMO ++ */ ++ zone = page_zone(pfn_to_page(start_pfn)); ++ ++ printk("%s(): memory will be removed from " ++ "the %s zone\n", __func__, zone->name); ++ ++ /* ++ * not handling removing memory ranges that ++ * overlap multiple zones yet ++ */ ++ if (end_pfn > (zone->zone_start_pfn + zone->spanned_pages)) ++ goto overlap; ++ ++ /* make sure it is NOT in RMO */ ++ if ((start < lmb.rmo_size) || ((start+size) < lmb.rmo_size)) { ++ printk("%s(): range to be removed must NOT be in RMO!\n", ++ __func__); ++ goto in_rmo; ++ } ++ ++ return __remove_pages(zone, start_pfn, nr_pages); ++ ++overlap: ++ printk("%s(): memory range to be removed overlaps " ++ "multiple zones!!!\n", __func__); ++in_rmo: ++ return -1; ++} ++#endif /* CONFIG_MEMORY_HOTPLUG */ ++ +void show_mem(void) +{ + unsigned long total = 0, reserved = 0; + unsigned long shared = 0, cached = 0; + unsigned long highmem = 0; + struct page *page; + pg_data_t *pgdat; + unsigned long i; + + printk("Mem-info:\n"); + show_free_areas(); + printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); + for_each_pgdat(pgdat) { ++ unsigned long flags; ++ pgdat_resize_lock(pgdat, &flags); + for (i = 0; i < pgdat->node_spanned_pages; i++) { + page = pgdat_page_nr(pgdat, i); + total++; + if (PageHighMem(page)) + highmem++; + if (PageReserved(page)) + reserved++; + else if (PageSwapCache(page)) + cached++; + else if (page_count(page)) + shared += page_count(page) - 1; + } ++ pgdat_resize_unlock(pgdat, &flags); + } + printk("%ld pages of RAM\n", total); +#ifdef CONFIG_HIGHMEM + printk("%ld pages of HIGHMEM\n", highmem); +#endif + printk("%ld reserved pages\n", reserved); + printk("%ld pages shared\n", shared); + printk("%ld pages swap cached\n", cached); +} + +/* + * Initialize the bootmem system and give it all the memory we + * have available. If we are using highmem, we only put the + * lowmem into the bootmem system. + */ +#ifndef CONFIG_NEED_MULTIPLE_NODES +void __init do_init_bootmem(void) +{ + unsigned long i; + unsigned long start, bootmap_pages; + unsigned long total_pages; + int boot_mapsize; + + max_pfn = total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT; +#ifdef CONFIG_HIGHMEM + total_pages = total_lowmem >> PAGE_SHIFT; +#endif + + /* + * Find an area to use for the bootmem bitmap. Calculate the size of + * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE. + * Add 1 additional page in case the address isn't page-aligned. + */ + bootmap_pages = bootmem_bootmap_pages(total_pages); + + start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE); + BUG_ON(!start); + + boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages); + + /* Add all physical memory to the bootmem map, mark each area + * present. + */ + for (i = 0; i < lmb.memory.cnt; i++) { + unsigned long base = lmb.memory.region[i].base; + unsigned long size = lmb_size_bytes(&lmb.memory, i); +#ifdef CONFIG_HIGHMEM + if (base >= total_lowmem) + continue; + if (base + size > total_lowmem) + size = total_lowmem - base; +#endif + free_bootmem(base, size); + } + + /* reserve the sections we're already using */ + for (i = 0; i < lmb.reserved.cnt; i++) + reserve_bootmem(lmb.reserved.region[i].base, + lmb_size_bytes(&lmb.reserved, i)); + + /* XXX need to clip this if using highmem? */ + for (i = 0; i < lmb.memory.cnt; i++) + memory_present(0, lmb_start_pfn(&lmb.memory, i), + lmb_end_pfn(&lmb.memory, i)); + init_bootmem_done = 1; +} + +/* + * paging_init() sets up the page tables - in fact we've already done this. + */ +void __init paging_init(void) +{ + unsigned long zones_size[MAX_NR_ZONES]; + unsigned long zholes_size[MAX_NR_ZONES]; + unsigned long total_ram = lmb_phys_mem_size(); + unsigned long top_of_ram = lmb_end_of_DRAM(); + +#ifdef CONFIG_HIGHMEM + map_page(PKMAP_BASE, 0, 0); /* XXX gross */ + pkmap_page_table = pte_offset_kernel(pmd_offset(pgd_offset_k + (PKMAP_BASE), PKMAP_BASE), PKMAP_BASE); + map_page(KMAP_FIX_BEGIN, 0, 0); /* XXX gross */ + kmap_pte = pte_offset_kernel(pmd_offset(pgd_offset_k + (KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN); + kmap_prot = PAGE_KERNEL; +#endif /* CONFIG_HIGHMEM */ + + printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", + top_of_ram, total_ram); + printk(KERN_INFO "Memory hole size: %ldMB\n", + (top_of_ram - total_ram) >> 20); + /* + * All pages are DMA-able so we put them all in the DMA zone. + */ + memset(zones_size, 0, sizeof(zones_size)); + memset(zholes_size, 0, sizeof(zholes_size)); + + zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT; + zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT; + +#ifdef CONFIG_HIGHMEM + zones_size[ZONE_DMA] = total_lowmem >> PAGE_SHIFT; + zones_size[ZONE_HIGHMEM] = (total_memory - total_lowmem) >> PAGE_SHIFT; + zholes_size[ZONE_HIGHMEM] = (top_of_ram - total_ram) >> PAGE_SHIFT; +#else + zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT; + zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT; +#endif /* CONFIG_HIGHMEM */ + + free_area_init_node(0, NODE_DATA(0), zones_size, + __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size); +} +#endif /* ! CONFIG_NEED_MULTIPLE_NODES */ + +void __init mem_init(void) +{ +#ifdef CONFIG_NEED_MULTIPLE_NODES + int nid; +#endif + pg_data_t *pgdat; + unsigned long i; + struct page *page; + unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize; + + num_physpages = max_pfn; /* RAM is assumed contiguous */ + high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); + +#ifdef CONFIG_NEED_MULTIPLE_NODES + for_each_online_node(nid) { + if (NODE_DATA(nid)->node_spanned_pages != 0) { + printk("freeing bootmem node %x\n", nid); + totalram_pages += + free_all_bootmem_node(NODE_DATA(nid)); + } + } +#else + max_mapnr = num_physpages; + totalram_pages += free_all_bootmem(); +#endif + for_each_pgdat(pgdat) { + for (i = 0; i < pgdat->node_spanned_pages; i++) { + page = pgdat_page_nr(pgdat, i); + if (PageReserved(page)) + reservedpages++; + } + } + + codesize = (unsigned long)&_sdata - (unsigned long)&_stext; + datasize = (unsigned long)&__init_begin - (unsigned long)&_sdata; + initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin; + bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start; + +#ifdef CONFIG_HIGHMEM + { + unsigned long pfn, highmem_mapnr; + + highmem_mapnr = total_lowmem >> PAGE_SHIFT; + for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) { + struct page *page = pfn_to_page(pfn); + + ClearPageReserved(page); + set_page_count(page, 1); + __free_page(page); + totalhigh_pages++; + } + totalram_pages += totalhigh_pages; + printk(KERN_INFO "High memory: %luk\n", + totalhigh_pages << (PAGE_SHIFT-10)); + } +#endif /* CONFIG_HIGHMEM */ + + printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, " + "%luk reserved, %luk data, %luk bss, %luk init)\n", + (unsigned long)nr_free_pages() << (PAGE_SHIFT-10), + num_physpages << (PAGE_SHIFT-10), + codesize >> 10, + reservedpages << (PAGE_SHIFT-10), + datasize >> 10, + bsssize >> 10, + initsize >> 10); + + mem_init_done = 1; + +#ifdef CONFIG_PPC64 + /* Initialize the vDSO */ + vdso_init(); +#endif +} + +/* + * This is called when a page has been modified by the kernel. + * It just marks the page as not i-cache clean. We do the i-cache + * flush later when the page is given to a user process, if necessary. + */ +void flush_dcache_page(struct page *page) +{ + if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) + return; + /* avoid an atomic op if possible */ + if (test_bit(PG_arch_1, &page->flags)) + clear_bit(PG_arch_1, &page->flags); +} +EXPORT_SYMBOL(flush_dcache_page); + +void flush_dcache_icache_page(struct page *page) +{ +#ifdef CONFIG_BOOKE + void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE); + __flush_dcache_icache(start); + kunmap_atomic(start, KM_PPC_SYNC_ICACHE); +#elif defined(CONFIG_8xx) || defined(CONFIG_PPC64) + /* On 8xx there is no need to kmap since highmem is not supported */ + __flush_dcache_icache(page_address(page)); +#else + __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT); +#endif + +} +void clear_user_page(void *page, unsigned long vaddr, struct page *pg) +{ + clear_page(page); + + if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) + return; + /* + * We shouldnt have to do this, but some versions of glibc + * require it (ld.so assumes zero filled pages are icache clean) + * - Anton + */ + + /* avoid an atomic op if possible */ + if (test_bit(PG_arch_1, &pg->flags)) + clear_bit(PG_arch_1, &pg->flags); +} +EXPORT_SYMBOL(clear_user_page); + +void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, + struct page *pg) +{ + copy_page(vto, vfrom); + + /* + * We should be able to use the following optimisation, however + * there are two problems. + * Firstly a bug in some versions of binutils meant PLT sections + * were not marked executable. + * Secondly the first word in the GOT section is blrl, used + * to establish the GOT address. Until recently the GOT was + * not marked executable. + * - Anton + */ +#if 0 + if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0)) + return; +#endif + + if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) + return; + + /* avoid an atomic op if possible */ + if (test_bit(PG_arch_1, &pg->flags)) + clear_bit(PG_arch_1, &pg->flags); +} + +void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, + unsigned long addr, int len) +{ + unsigned long maddr; + + maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK); + flush_icache_range(maddr, maddr + len); + kunmap(page); +} +EXPORT_SYMBOL(flush_icache_user_range); + +/* + * This is called at the end of handling a user page fault, when the + * fault has been handled by updating a PTE in the linux page tables. + * We use it to preload an HPTE into the hash table corresponding to + * the updated linux PTE. + * + * This must always be called with the mm->page_table_lock held + */ +void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, + pte_t pte) +{ + /* handle i-cache coherency */ + unsigned long pfn = pte_pfn(pte); +#ifdef CONFIG_PPC32 + pmd_t *pmd; +#else + unsigned long vsid; + void *pgdir; + pte_t *ptep; + int local = 0; + cpumask_t tmp; + unsigned long flags; +#endif + + /* handle i-cache coherency */ + if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) && + !cpu_has_feature(CPU_FTR_NOEXECUTE) && + pfn_valid(pfn)) { + struct page *page = pfn_to_page(pfn); + if (!PageReserved(page) + && !test_bit(PG_arch_1, &page->flags)) { + if (vma->vm_mm == current->active_mm) { +#ifdef CONFIG_8xx + /* On 8xx, cache control instructions (particularly + * "dcbst" from flush_dcache_icache) fault as write + * operation if there is an unpopulated TLB entry + * for the address in question. To workaround that, + * we invalidate the TLB here, thus avoiding dcbst + * misbehaviour. + */ + _tlbie(address); +#endif + __flush_dcache_icache((void *) address); + } else + flush_dcache_icache_page(page); + set_bit(PG_arch_1, &page->flags); + } + } + +#ifdef CONFIG_PPC_STD_MMU + /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ + if (!pte_young(pte) || address >= TASK_SIZE) + return; +#ifdef CONFIG_PPC32 + if (Hash == 0) + return; + pmd = pmd_offset(pgd_offset(vma->vm_mm, address), address); + if (!pmd_none(*pmd)) + add_hash_page(vma->vm_mm->context, address, pmd_val(*pmd)); +#else + pgdir = vma->vm_mm->pgd; + if (pgdir == NULL) + return; + + ptep = find_linux_pte(pgdir, address); + if (!ptep) + return; + + vsid = get_vsid(vma->vm_mm->context.id, address); + + local_irq_save(flags); + tmp = cpumask_of_cpu(smp_processor_id()); + if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp)) + local = 1; + + __hash_page(address, 0, vsid, ptep, 0x300, local); + local_irq_restore(flags); +#endif +#endif +} diff --cc arch/powerpc/mm/pgtable_64.c index 484d24f9208,00000000000..b79a7820613 mode 100644,000000..100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@@ -1,349 -1,0 +1,347 @@@ +/* + * This file contains ioremap and related functions for 64-bit machines. + * + * Derived from arch/ppc64/mm/init.c + * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) + * + * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org) + * and Cort Dougan (PReP) (cort@cs.nmt.edu) + * Copyright (C) 1996 Paul Mackerras + * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). + * + * Derived from "arch/i386/mm/init.c" + * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds + * + * Dave Engebretsen + * Rework for PPC64 port. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +unsigned long ioremap_bot = IMALLOC_BASE; +static unsigned long phbs_io_bot = PHBS_IO_BASE; + +#ifdef CONFIG_PPC_ISERIES + +void __iomem *ioremap(unsigned long addr, unsigned long size) +{ + return (void __iomem *)addr; +} + +extern void __iomem *__ioremap(unsigned long addr, unsigned long size, + unsigned long flags) +{ + return (void __iomem *)addr; +} + +void iounmap(volatile void __iomem *addr) +{ + return; +} + +#else + +/* + * map_io_page currently only called by __ioremap + * map_io_page adds an entry to the ioremap page table + * and adds an entry to the HPT, possibly bolting it + */ +static int map_io_page(unsigned long ea, unsigned long pa, int flags) +{ + pgd_t *pgdp; + pud_t *pudp; + pmd_t *pmdp; + pte_t *ptep; + unsigned long vsid; + + if (mem_init_done) { - spin_lock(&init_mm.page_table_lock); + pgdp = pgd_offset_k(ea); + pudp = pud_alloc(&init_mm, pgdp, ea); + if (!pudp) + return -ENOMEM; + pmdp = pmd_alloc(&init_mm, pudp, ea); + if (!pmdp) + return -ENOMEM; - ptep = pte_alloc_kernel(&init_mm, pmdp, ea); ++ ptep = pte_alloc_kernel(pmdp, ea); + if (!ptep) + return -ENOMEM; + set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, + __pgprot(flags))); - spin_unlock(&init_mm.page_table_lock); + } else { + unsigned long va, vpn, hash, hpteg; + + /* + * If the mm subsystem is not fully up, we cannot create a + * linux page table entry for this mapping. Simply bolt an + * entry in the hardware page table. + */ + vsid = get_kernel_vsid(ea); + va = (vsid << 28) | (ea & 0xFFFFFFF); + vpn = va >> PAGE_SHIFT; + + hash = hpt_hash(vpn, 0); + + hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); + + /* Panic if a pte grpup is full */ + if (ppc_md.hpte_insert(hpteg, va, pa >> PAGE_SHIFT, + HPTE_V_BOLTED, + _PAGE_NO_CACHE|_PAGE_GUARDED|PP_RWXX) + == -1) { + panic("map_io_page: could not insert mapping"); + } + } + return 0; +} + + +static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa, + unsigned long ea, unsigned long size, + unsigned long flags) +{ + unsigned long i; + + if ((flags & _PAGE_PRESENT) == 0) + flags |= pgprot_val(PAGE_KERNEL); + + for (i = 0; i < size; i += PAGE_SIZE) + if (map_io_page(ea+i, pa+i, flags)) + return NULL; + + return (void __iomem *) (ea + (addr & ~PAGE_MASK)); +} + + +void __iomem * +ioremap(unsigned long addr, unsigned long size) +{ + return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED); +} + +void __iomem * __ioremap(unsigned long addr, unsigned long size, + unsigned long flags) +{ + unsigned long pa, ea; + void __iomem *ret; + + /* + * Choose an address to map it to. + * Once the imalloc system is running, we use it. + * Before that, we map using addresses going + * up from ioremap_bot. imalloc will use + * the addresses from ioremap_bot through + * IMALLOC_END + * + */ + pa = addr & PAGE_MASK; + size = PAGE_ALIGN(addr + size) - pa; + + if (size == 0) + return NULL; + + if (mem_init_done) { + struct vm_struct *area; + area = im_get_free_area(size); + if (area == NULL) + return NULL; + ea = (unsigned long)(area->addr); + ret = __ioremap_com(addr, pa, ea, size, flags); + if (!ret) + im_free(area->addr); + } else { + ea = ioremap_bot; + ret = __ioremap_com(addr, pa, ea, size, flags); + if (ret) + ioremap_bot += size; + } + return ret; +} + +#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK)) + +int __ioremap_explicit(unsigned long pa, unsigned long ea, + unsigned long size, unsigned long flags) +{ + struct vm_struct *area; + void __iomem *ret; + + /* For now, require page-aligned values for pa, ea, and size */ + if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) || + !IS_PAGE_ALIGNED(size)) { + printk(KERN_ERR "unaligned value in %s\n", __FUNCTION__); + return 1; + } + + if (!mem_init_done) { + /* Two things to consider in this case: + * 1) No records will be kept (imalloc, etc) that the region + * has been remapped + * 2) It won't be easy to iounmap() the region later (because + * of 1) + */ + ; + } else { + area = im_get_area(ea, size, + IM_REGION_UNUSED|IM_REGION_SUBSET|IM_REGION_EXISTS); + if (area == NULL) { + /* Expected when PHB-dlpar is in play */ + return 1; + } + if (ea != (unsigned long) area->addr) { + printk(KERN_ERR "unexpected addr return from " + "im_get_area\n"); + return 1; + } + } + + ret = __ioremap_com(pa, pa, ea, size, flags); + if (ret == NULL) { + printk(KERN_ERR "ioremap_explicit() allocation failure !\n"); + return 1; + } + if (ret != (void *) ea) { + printk(KERN_ERR "__ioremap_com() returned unexpected addr\n"); + return 1; + } + + return 0; +} + +/* + * Unmap an IO region and remove it from imalloc'd list. + * Access to IO memory should be serialized by driver. + * This code is modeled after vmalloc code - unmap_vm_area() + * + * XXX what about calls before mem_init_done (ie python_countermeasures()) + */ +void iounmap(volatile void __iomem *token) +{ + void *addr; + + if (!mem_init_done) + return; + + addr = (void *) ((unsigned long __force) token & PAGE_MASK); + + im_free(addr); +} + +static int iounmap_subset_regions(unsigned long addr, unsigned long size) +{ + struct vm_struct *area; + + /* Check whether subsets of this region exist */ + area = im_get_area(addr, size, IM_REGION_SUPERSET); + if (area == NULL) + return 1; + + while (area) { + iounmap((void __iomem *) area->addr); + area = im_get_area(addr, size, + IM_REGION_SUPERSET); + } + + return 0; +} + +int iounmap_explicit(volatile void __iomem *start, unsigned long size) +{ + struct vm_struct *area; + unsigned long addr; + int rc; + + addr = (unsigned long __force) start & PAGE_MASK; + + /* Verify that the region either exists or is a subset of an existing + * region. In the latter case, split the parent region to create + * the exact region + */ + area = im_get_area(addr, size, + IM_REGION_EXISTS | IM_REGION_SUBSET); + if (area == NULL) { + /* Determine whether subset regions exist. If so, unmap */ + rc = iounmap_subset_regions(addr, size); + if (rc) { + printk(KERN_ERR + "%s() cannot unmap nonexistent range 0x%lx\n", + __FUNCTION__, addr); + return 1; + } + } else { + iounmap((void __iomem *) area->addr); + } + /* + * FIXME! This can't be right: + iounmap(area->addr); + * Maybe it should be "iounmap(area);" + */ + return 0; +} + +#endif + +EXPORT_SYMBOL(ioremap); +EXPORT_SYMBOL(__ioremap); +EXPORT_SYMBOL(iounmap); + +void __iomem * reserve_phb_iospace(unsigned long size) +{ + void __iomem *virt_addr; + + if (phbs_io_bot >= IMALLOC_BASE) + panic("reserve_phb_iospace(): phb io space overflow\n"); + + virt_addr = (void __iomem *) phbs_io_bot; + phbs_io_bot += size; + + return virt_addr; +} diff --cc include/asm-powerpc/rwsem.h index 0a5b83a3c94,00000000000..79bae4933b7 mode 100644,000000..100644 --- a/include/asm-powerpc/rwsem.h +++ b/include/asm-powerpc/rwsem.h @@@ -1,163 -1,0 +1,168 @@@ +#ifndef _ASM_POWERPC_RWSEM_H +#define _ASM_POWERPC_RWSEM_H + +#ifdef __KERNEL__ + +/* + * include/asm-ppc64/rwsem.h: R/W semaphores for PPC using the stuff + * in lib/rwsem.c. Adapted largely from include/asm-i386/rwsem.h + * by Paul Mackerras . + */ + +#include +#include +#include +#include + +/* + * the semaphore definition + */ +struct rw_semaphore { + /* XXX this should be able to be an atomic_t -- paulus */ + signed int count; +#define RWSEM_UNLOCKED_VALUE 0x00000000 +#define RWSEM_ACTIVE_BIAS 0x00000001 +#define RWSEM_ACTIVE_MASK 0x0000ffff +#define RWSEM_WAITING_BIAS (-0x00010000) +#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS +#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) + spinlock_t wait_lock; + struct list_head wait_list; +#if RWSEM_DEBUG + int debug; +#endif +}; + +/* + * initialisation + */ +#if RWSEM_DEBUG +#define __RWSEM_DEBUG_INIT , 0 +#else +#define __RWSEM_DEBUG_INIT /* */ +#endif + +#define __RWSEM_INITIALIZER(name) \ + { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ + LIST_HEAD_INIT((name).wait_list) \ + __RWSEM_DEBUG_INIT } + +#define DECLARE_RWSEM(name) \ + struct rw_semaphore name = __RWSEM_INITIALIZER(name) + +extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); +extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); +extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); +extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); + +static inline void init_rwsem(struct rw_semaphore *sem) +{ + sem->count = RWSEM_UNLOCKED_VALUE; + spin_lock_init(&sem->wait_lock); + INIT_LIST_HEAD(&sem->wait_list); +#if RWSEM_DEBUG + sem->debug = 0; +#endif +} + +/* + * lock for reading + */ +static inline void __down_read(struct rw_semaphore *sem) +{ + if (unlikely(atomic_inc_return((atomic_t *)(&sem->count)) <= 0)) + rwsem_down_read_failed(sem); +} + +static inline int __down_read_trylock(struct rw_semaphore *sem) +{ + int tmp; + + while ((tmp = sem->count) >= 0) { + if (tmp == cmpxchg(&sem->count, tmp, + tmp + RWSEM_ACTIVE_READ_BIAS)) { + return 1; + } + } + return 0; +} + +/* + * lock for writing + */ +static inline void __down_write(struct rw_semaphore *sem) +{ + int tmp; + + tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS, + (atomic_t *)(&sem->count)); + if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) + rwsem_down_write_failed(sem); +} + +static inline int __down_write_trylock(struct rw_semaphore *sem) +{ + int tmp; + + tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, + RWSEM_ACTIVE_WRITE_BIAS); + return tmp == RWSEM_UNLOCKED_VALUE; +} + +/* + * unlock after reading + */ +static inline void __up_read(struct rw_semaphore *sem) +{ + int tmp; + + tmp = atomic_dec_return((atomic_t *)(&sem->count)); + if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)) + rwsem_wake(sem); +} + +/* + * unlock after writing + */ +static inline void __up_write(struct rw_semaphore *sem) +{ + if (unlikely(atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS, + (atomic_t *)(&sem->count)) < 0)) + rwsem_wake(sem); +} + +/* + * implement atomic add functionality + */ +static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) +{ + atomic_add(delta, (atomic_t *)(&sem->count)); +} + +/* + * downgrade write lock to read lock + */ +static inline void __downgrade_write(struct rw_semaphore *sem) +{ + int tmp; + + tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count)); + if (tmp < 0) + rwsem_downgrade_wake(sem); +} + +/* + * implement exchange and add functionality + */ +static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) +{ + return atomic_add_return(delta, (atomic_t *)(&sem->count)); +} + ++static inline int rwsem_is_locked(struct rw_semaphore *sem) ++{ ++ return (sem->count != 0); ++} ++ +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_RWSEM_H */ diff --cc include/asm-powerpc/semaphore.h index fd42fe97158,00000000000..57369d2cade mode 100644,000000..100644 --- a/include/asm-powerpc/semaphore.h +++ b/include/asm-powerpc/semaphore.h @@@ -1,98 -1,0 +1,95 @@@ +#ifndef _ASM_POWERPC_SEMAPHORE_H +#define _ASM_POWERPC_SEMAPHORE_H + +/* + * Remove spinlock-based RW semaphores; RW semaphore definitions are + * now in rwsem.h and we use the generic lib/rwsem.c implementation. + * Rework semaphores to use atomic_dec_if_positive. + * -- Paul Mackerras (paulus@samba.org) + */ + +#ifdef __KERNEL__ + +#include +#include +#include +#include + +struct semaphore { + /* + * Note that any negative value of count is equivalent to 0, + * but additionally indicates that some process(es) might be + * sleeping on `wait'. + */ + atomic_t count; + wait_queue_head_t wait; +}; + +#define __SEMAPHORE_INITIALIZER(name, n) \ +{ \ + .count = ATOMIC_INIT(n), \ + .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ +} + - #define __MUTEX_INITIALIZER(name) \ - __SEMAPHORE_INITIALIZER(name, 1) - +#define __DECLARE_SEMAPHORE_GENERIC(name, count) \ + struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) + +#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1) +#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name, 0) + +static inline void sema_init (struct semaphore *sem, int val) +{ + atomic_set(&sem->count, val); + init_waitqueue_head(&sem->wait); +} + +static inline void init_MUTEX (struct semaphore *sem) +{ + sema_init(sem, 1); +} + +static inline void init_MUTEX_LOCKED (struct semaphore *sem) +{ + sema_init(sem, 0); +} + +extern void __down(struct semaphore * sem); +extern int __down_interruptible(struct semaphore * sem); +extern void __up(struct semaphore * sem); + +static inline void down(struct semaphore * sem) +{ + might_sleep(); + + /* + * Try to get the semaphore, take the slow path if we fail. + */ + if (unlikely(atomic_dec_return(&sem->count) < 0)) + __down(sem); +} + +static inline int down_interruptible(struct semaphore * sem) +{ + int ret = 0; + + might_sleep(); + + if (unlikely(atomic_dec_return(&sem->count) < 0)) + ret = __down_interruptible(sem); + return ret; +} + +static inline int down_trylock(struct semaphore * sem) +{ + return atomic_dec_if_positive(&sem->count) < 0; +} + +static inline void up(struct semaphore * sem) +{ + if (unlikely(atomic_inc_return(&sem->count) <= 0)) + __up(sem); +} + +#endif /* __KERNEL__ */ + +#endif /* _ASM_POWERPC_SEMAPHORE_H */ diff --cc include/asm-powerpc/unistd.h index c2d039e338a,00000000000..0991dfceef1 mode 100644,000000..100644 --- a/include/asm-powerpc/unistd.h +++ b/include/asm-powerpc/unistd.h @@@ -1,510 -1,0 +1,509 @@@ +#ifndef _ASM_PPC_UNISTD_H_ +#define _ASM_PPC_UNISTD_H_ + +/* + * This file contains the system call numbers. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#define __NR_restart_syscall 0 +#define __NR_exit 1 +#define __NR_fork 2 +#define __NR_read 3 +#define __NR_write 4 +#define __NR_open 5 +#define __NR_close 6 +#define __NR_waitpid 7 +#define __NR_creat 8 +#define __NR_link 9 +#define __NR_unlink 10 +#define __NR_execve 11 +#define __NR_chdir 12 +#define __NR_time 13 +#define __NR_mknod 14 +#define __NR_chmod 15 +#define __NR_lchown 16 +#define __NR_break 17 +#define __NR_oldstat 18 +#define __NR_lseek 19 +#define __NR_getpid 20 +#define __NR_mount 21 +#define __NR_umount 22 +#define __NR_setuid 23 +#define __NR_getuid 24 +#define __NR_stime 25 +#define __NR_ptrace 26 +#define __NR_alarm 27 +#define __NR_oldfstat 28 +#define __NR_pause 29 +#define __NR_utime 30 +#define __NR_stty 31 +#define __NR_gtty 32 +#define __NR_access 33 +#define __NR_nice 34 +#define __NR_ftime 35 +#define __NR_sync 36 +#define __NR_kill 37 +#define __NR_rename 38 +#define __NR_mkdir 39 +#define __NR_rmdir 40 +#define __NR_dup 41 +#define __NR_pipe 42 +#define __NR_times 43 +#define __NR_prof 44 +#define __NR_brk 45 +#define __NR_setgid 46 +#define __NR_getgid 47 +#define __NR_signal 48 +#define __NR_geteuid 49 +#define __NR_getegid 50 +#define __NR_acct 51 +#define __NR_umount2 52 +#define __NR_lock 53 +#define __NR_ioctl 54 +#define __NR_fcntl 55 +#define __NR_mpx 56 +#define __NR_setpgid 57 +#define __NR_ulimit 58 +#define __NR_oldolduname 59 +#define __NR_umask 60 +#define __NR_chroot 61 +#define __NR_ustat 62 +#define __NR_dup2 63 +#define __NR_getppid 64 +#define __NR_getpgrp 65 +#define __NR_setsid 66 +#define __NR_sigaction 67 +#define __NR_sgetmask 68 +#define __NR_ssetmask 69 +#define __NR_setreuid 70 +#define __NR_setregid 71 +#define __NR_sigsuspend 72 +#define __NR_sigpending 73 +#define __NR_sethostname 74 +#define __NR_setrlimit 75 +#define __NR_getrlimit 76 +#define __NR_getrusage 77 +#define __NR_gettimeofday 78 +#define __NR_settimeofday 79 +#define __NR_getgroups 80 +#define __NR_setgroups 81 +#define __NR_select 82 +#define __NR_symlink 83 +#define __NR_oldlstat 84 +#define __NR_readlink 85 +#define __NR_uselib 86 +#define __NR_swapon 87 +#define __NR_reboot 88 +#define __NR_readdir 89 +#define __NR_mmap 90 +#define __NR_munmap 91 +#define __NR_truncate 92 +#define __NR_ftruncate 93 +#define __NR_fchmod 94 +#define __NR_fchown 95 +#define __NR_getpriority 96 +#define __NR_setpriority 97 +#define __NR_profil 98 +#define __NR_statfs 99 +#define __NR_fstatfs 100 +#define __NR_ioperm 101 +#define __NR_socketcall 102 +#define __NR_syslog 103 +#define __NR_setitimer 104 +#define __NR_getitimer 105 +#define __NR_stat 106 +#define __NR_lstat 107 +#define __NR_fstat 108 +#define __NR_olduname 109 +#define __NR_iopl 110 +#define __NR_vhangup 111 +#define __NR_idle 112 +#define __NR_vm86 113 +#define __NR_wait4 114 +#define __NR_swapoff 115 +#define __NR_sysinfo 116 +#define __NR_ipc 117 +#define __NR_fsync 118 +#define __NR_sigreturn 119 +#define __NR_clone 120 +#define __NR_setdomainname 121 +#define __NR_uname 122 +#define __NR_modify_ldt 123 +#define __NR_adjtimex 124 +#define __NR_mprotect 125 +#define __NR_sigprocmask 126 +#define __NR_create_module 127 +#define __NR_init_module 128 +#define __NR_delete_module 129 +#define __NR_get_kernel_syms 130 +#define __NR_quotactl 131 +#define __NR_getpgid 132 +#define __NR_fchdir 133 +#define __NR_bdflush 134 +#define __NR_sysfs 135 +#define __NR_personality 136 +#define __NR_afs_syscall 137 /* Syscall for Andrew File System */ +#define __NR_setfsuid 138 +#define __NR_setfsgid 139 +#define __NR__llseek 140 +#define __NR_getdents 141 +#define __NR__newselect 142 +#define __NR_flock 143 +#define __NR_msync 144 +#define __NR_readv 145 +#define __NR_writev 146 +#define __NR_getsid 147 +#define __NR_fdatasync 148 +#define __NR__sysctl 149 +#define __NR_mlock 150 +#define __NR_munlock 151 +#define __NR_mlockall 152 +#define __NR_munlockall 153 +#define __NR_sched_setparam 154 +#define __NR_sched_getparam 155 +#define __NR_sched_setscheduler 156 +#define __NR_sched_getscheduler 157 +#define __NR_sched_yield 158 +#define __NR_sched_get_priority_max 159 +#define __NR_sched_get_priority_min 160 +#define __NR_sched_rr_get_interval 161 +#define __NR_nanosleep 162 +#define __NR_mremap 163 +#define __NR_setresuid 164 +#define __NR_getresuid 165 +#define __NR_query_module 166 +#define __NR_poll 167 +#define __NR_nfsservctl 168 +#define __NR_setresgid 169 +#define __NR_getresgid 170 +#define __NR_prctl 171 +#define __NR_rt_sigreturn 172 +#define __NR_rt_sigaction 173 +#define __NR_rt_sigprocmask 174 +#define __NR_rt_sigpending 175 +#define __NR_rt_sigtimedwait 176 +#define __NR_rt_sigqueueinfo 177 +#define __NR_rt_sigsuspend 178 +#define __NR_pread64 179 +#define __NR_pwrite64 180 +#define __NR_chown 181 +#define __NR_getcwd 182 +#define __NR_capget 183 +#define __NR_capset 184 +#define __NR_sigaltstack 185 +#define __NR_sendfile 186 +#define __NR_getpmsg 187 /* some people actually want streams */ +#define __NR_putpmsg 188 /* some people actually want streams */ +#define __NR_vfork 189 +#define __NR_ugetrlimit 190 /* SuS compliant getrlimit */ +#define __NR_readahead 191 +#ifndef __powerpc64__ /* these are 32-bit only */ +#define __NR_mmap2 192 +#define __NR_truncate64 193 +#define __NR_ftruncate64 194 +#define __NR_stat64 195 +#define __NR_lstat64 196 +#define __NR_fstat64 197 +#endif +#define __NR_pciconfig_read 198 +#define __NR_pciconfig_write 199 +#define __NR_pciconfig_iobase 200 +#define __NR_multiplexer 201 +#define __NR_getdents64 202 +#define __NR_pivot_root 203 +#ifndef __powerpc64__ +#define __NR_fcntl64 204 +#endif +#define __NR_madvise 205 +#define __NR_mincore 206 +#define __NR_gettid 207 +#define __NR_tkill 208 +#define __NR_setxattr 209 +#define __NR_lsetxattr 210 +#define __NR_fsetxattr 211 +#define __NR_getxattr 212 +#define __NR_lgetxattr 213 +#define __NR_fgetxattr 214 +#define __NR_listxattr 215 +#define __NR_llistxattr 216 +#define __NR_flistxattr 217 +#define __NR_removexattr 218 +#define __NR_lremovexattr 219 +#define __NR_fremovexattr 220 +#define __NR_futex 221 +#define __NR_sched_setaffinity 222 +#define __NR_sched_getaffinity 223 +/* 224 currently unused */ +#define __NR_tuxcall 225 +#ifndef __powerpc64__ +#define __NR_sendfile64 226 +#endif +#define __NR_io_setup 227 +#define __NR_io_destroy 228 +#define __NR_io_getevents 229 +#define __NR_io_submit 230 +#define __NR_io_cancel 231 +#define __NR_set_tid_address 232 +#define __NR_fadvise64 233 +#define __NR_exit_group 234 +#define __NR_lookup_dcookie 235 +#define __NR_epoll_create 236 +#define __NR_epoll_ctl 237 +#define __NR_epoll_wait 238 +#define __NR_remap_file_pages 239 +#define __NR_timer_create 240 +#define __NR_timer_settime 241 +#define __NR_timer_gettime 242 +#define __NR_timer_getoverrun 243 +#define __NR_timer_delete 244 +#define __NR_clock_settime 245 +#define __NR_clock_gettime 246 +#define __NR_clock_getres 247 +#define __NR_clock_nanosleep 248 +#define __NR_swapcontext 249 +#define __NR_tgkill 250 +#define __NR_utimes 251 +#define __NR_statfs64 252 +#define __NR_fstatfs64 253 +#ifndef __powerpc64__ +#define __NR_fadvise64_64 254 +#endif +#define __NR_rtas 255 +#define __NR_sys_debug_setcontext 256 +/* Number 257 is reserved for vserver */ +/* 258 currently unused */ +#define __NR_mbind 259 +#define __NR_get_mempolicy 260 +#define __NR_set_mempolicy 261 +#define __NR_mq_open 262 +#define __NR_mq_unlink 263 +#define __NR_mq_timedsend 264 +#define __NR_mq_timedreceive 265 +#define __NR_mq_notify 266 +#define __NR_mq_getsetattr 267 +#define __NR_kexec_load 268 +#define __NR_add_key 269 +#define __NR_request_key 270 +#define __NR_keyctl 271 +#define __NR_waitid 272 +#define __NR_ioprio_set 273 +#define __NR_ioprio_get 274 +#define __NR_inotify_init 275 +#define __NR_inotify_add_watch 276 +#define __NR_inotify_rm_watch 277 + +#define __NR_syscalls 278 + +#ifdef __KERNEL__ +#define __NR__exit __NR_exit +#define NR_syscalls __NR_syscalls +#endif + +#ifndef __ASSEMBLY__ + +/* On powerpc a system call basically clobbers the same registers like a + * function call, with the exception of LR (which is needed for the + * "sc; bnslr" sequence) and CR (where only CR0.SO is clobbered to signal + * an error return status). + */ + +#define __syscall_nr(nr, type, name, args...) \ + unsigned long __sc_ret, __sc_err; \ + { \ + register unsigned long __sc_0 __asm__ ("r0"); \ + register unsigned long __sc_3 __asm__ ("r3"); \ + register unsigned long __sc_4 __asm__ ("r4"); \ + register unsigned long __sc_5 __asm__ ("r5"); \ + register unsigned long __sc_6 __asm__ ("r6"); \ + register unsigned long __sc_7 __asm__ ("r7"); \ + register unsigned long __sc_8 __asm__ ("r8"); \ + \ + __sc_loadargs_##nr(name, args); \ + __asm__ __volatile__ \ + ("sc \n\t" \ + "mfcr %0 " \ + : "=&r" (__sc_0), \ + "=&r" (__sc_3), "=&r" (__sc_4), \ + "=&r" (__sc_5), "=&r" (__sc_6), \ + "=&r" (__sc_7), "=&r" (__sc_8) \ + : __sc_asm_input_##nr \ + : "cr0", "ctr", "memory", \ + "r9", "r10","r11", "r12"); \ + __sc_ret = __sc_3; \ + __sc_err = __sc_0; \ + } \ + if (__sc_err & 0x10000000) \ + { \ + errno = __sc_ret; \ + __sc_ret = -1; \ + } \ + return (type) __sc_ret + +#define __sc_loadargs_0(name, dummy...) \ + __sc_0 = __NR_##name +#define __sc_loadargs_1(name, arg1) \ + __sc_loadargs_0(name); \ + __sc_3 = (unsigned long) (arg1) +#define __sc_loadargs_2(name, arg1, arg2) \ + __sc_loadargs_1(name, arg1); \ + __sc_4 = (unsigned long) (arg2) +#define __sc_loadargs_3(name, arg1, arg2, arg3) \ + __sc_loadargs_2(name, arg1, arg2); \ + __sc_5 = (unsigned long) (arg3) +#define __sc_loadargs_4(name, arg1, arg2, arg3, arg4) \ + __sc_loadargs_3(name, arg1, arg2, arg3); \ + __sc_6 = (unsigned long) (arg4) +#define __sc_loadargs_5(name, arg1, arg2, arg3, arg4, arg5) \ + __sc_loadargs_4(name, arg1, arg2, arg3, arg4); \ + __sc_7 = (unsigned long) (arg5) +#define __sc_loadargs_6(name, arg1, arg2, arg3, arg4, arg5, arg6) \ + __sc_loadargs_5(name, arg1, arg2, arg3, arg4, arg5); \ + __sc_8 = (unsigned long) (arg6) + +#define __sc_asm_input_0 "0" (__sc_0) +#define __sc_asm_input_1 __sc_asm_input_0, "1" (__sc_3) +#define __sc_asm_input_2 __sc_asm_input_1, "2" (__sc_4) +#define __sc_asm_input_3 __sc_asm_input_2, "3" (__sc_5) +#define __sc_asm_input_4 __sc_asm_input_3, "4" (__sc_6) +#define __sc_asm_input_5 __sc_asm_input_4, "5" (__sc_7) +#define __sc_asm_input_6 __sc_asm_input_5, "6" (__sc_8) + +#define _syscall0(type,name) \ +type name(void) \ +{ \ + __syscall_nr(0, type, name); \ +} + +#define _syscall1(type,name,type1,arg1) \ +type name(type1 arg1) \ +{ \ + __syscall_nr(1, type, name, arg1); \ +} + +#define _syscall2(type,name,type1,arg1,type2,arg2) \ +type name(type1 arg1, type2 arg2) \ +{ \ + __syscall_nr(2, type, name, arg1, arg2); \ +} + +#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ +type name(type1 arg1, type2 arg2, type3 arg3) \ +{ \ + __syscall_nr(3, type, name, arg1, arg2, arg3); \ +} + +#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ +type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ +{ \ + __syscall_nr(4, type, name, arg1, arg2, arg3, arg4); \ +} + +#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \ +type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \ +{ \ + __syscall_nr(5, type, name, arg1, arg2, arg3, arg4, arg5); \ +} +#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6) \ +type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \ +{ \ + __syscall_nr(6, type, name, arg1, arg2, arg3, arg4, arg5, arg6); \ +} + +#ifdef __KERNEL__ + +#include +#include +#include +#include + +#define __ARCH_WANT_IPC_PARSE_VERSION +#define __ARCH_WANT_OLD_READDIR +#define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SYS_ALARM +#define __ARCH_WANT_SYS_GETHOSTNAME +#define __ARCH_WANT_SYS_PAUSE +#define __ARCH_WANT_SYS_SGETMASK +#define __ARCH_WANT_SYS_SIGNAL +#define __ARCH_WANT_SYS_TIME +#define __ARCH_WANT_SYS_UTIME +#define __ARCH_WANT_SYS_WAITPID +#define __ARCH_WANT_SYS_SOCKETCALL +#define __ARCH_WANT_SYS_FADVISE64 +#define __ARCH_WANT_SYS_GETPGRP +#define __ARCH_WANT_SYS_LLSEEK +#define __ARCH_WANT_SYS_NICE +#define __ARCH_WANT_SYS_OLD_GETRLIMIT +#define __ARCH_WANT_SYS_OLDUMOUNT +#define __ARCH_WANT_SYS_SIGPENDING +#define __ARCH_WANT_SYS_SIGPROCMASK +#define __ARCH_WANT_SYS_RT_SIGACTION +#ifdef CONFIG_PPC32 +#define __ARCH_WANT_OLD_STAT +#endif +#ifdef CONFIG_PPC64 +#define __ARCH_WANT_COMPAT_SYS_TIME +#endif + +/* + * System call prototypes. + */ +#ifdef __KERNEL_SYSCALLS__ +extern pid_t setsid(void); +extern int write(int fd, const char *buf, off_t count); +extern int read(int fd, char *buf, off_t count); +extern off_t lseek(int fd, off_t offset, int count); +extern int dup(int fd); +extern int execve(const char *file, char **argv, char **envp); +extern int open(const char *file, int flag, int mode); +extern int close(int fd); +extern pid_t waitpid(pid_t pid, int *wait_stat, int options); +#endif /* __KERNEL_SYSCALLS__ */ + +/* + * Functions that implement syscalls. + */ +unsigned long sys_mmap(unsigned long addr, size_t len, unsigned long prot, + unsigned long flags, unsigned long fd, off_t offset); +unsigned long sys_mmap2(unsigned long addr, size_t len, + unsigned long prot, unsigned long flags, + unsigned long fd, unsigned long pgoff); +struct pt_regs; +int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2, + unsigned long a3, unsigned long a4, unsigned long a5, + struct pt_regs *regs); +int sys_clone(unsigned long clone_flags, unsigned long usp, + int __user *parent_tidp, void __user *child_threadptr, + int __user *child_tidp, int p6, struct pt_regs *regs); +int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3, + unsigned long p4, unsigned long p5, unsigned long p6, + struct pt_regs *regs); +int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3, + unsigned long p4, unsigned long p5, unsigned long p6, + struct pt_regs *regs); +int sys_pipe(int __user *fildes); - int sys_ptrace(long request, long pid, long addr, long data); +struct sigaction; +long sys_rt_sigaction(int sig, const struct sigaction __user *act, + struct sigaction __user *oact, size_t sigsetsize); + +/* + * "Conditional" syscalls + * + * What we want is __attribute__((weak,alias("sys_ni_syscall"))), + * but it doesn't work on all toolchains, so we just do it by hand + */ +#ifdef CONFIG_PPC32 +#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") +#else +#define cond_syscall(x) asm(".weak\t." #x "\n\t.set\t." #x ",.sys_ni_syscall") +#endif + +#endif /* __KERNEL__ */ + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_PPC_UNISTD_H_ */