]> git.openfabrics.org - ~shefty/rdma-dev.git/commitdiff
powerpc: Move most remaining ppc64 files over to arch/powerpc
authorPaul Mackerras <paulus@samba.org>
Mon, 14 Nov 2005 06:30:17 +0000 (17:30 +1100)
committerPaul Mackerras <paulus@samba.org>
Mon, 14 Nov 2005 06:30:17 +0000 (17:30 +1100)
Also deletes files in arch/ppc64 that are no longer used now that
we don't compile with ARCH=ppc64 any more.

Signed-off-by: Paul Mackerras <paulus@samba.org>
41 files changed:
arch/powerpc/Kconfig
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/dma_64.c [new file with mode: 0644]
arch/powerpc/kernel/iomap.c [new file with mode: 0644]
arch/powerpc/kernel/iommu.c [new file with mode: 0644]
arch/powerpc/kernel/kprobes.c [new file with mode: 0644]
arch/powerpc/kernel/machine_kexec_64.c [new file with mode: 0644]
arch/powerpc/kernel/module_64.c [new file with mode: 0644]
arch/powerpc/kernel/pci_64.c [new file with mode: 0644]
arch/powerpc/kernel/pci_direct_iommu.c [new file with mode: 0644]
arch/powerpc/kernel/pci_dn.c [new file with mode: 0644]
arch/powerpc/kernel/pci_iommu.c [new file with mode: 0644]
arch/powerpc/platforms/pseries/Makefile
arch/powerpc/platforms/pseries/hvconsole.c [new file with mode: 0644]
arch/powerpc/platforms/pseries/hvcserver.c [new file with mode: 0644]
arch/ppc64/Kconfig [deleted file]
arch/ppc64/kernel/Makefile
arch/ppc64/kernel/asm-offsets.c [deleted file]
arch/ppc64/kernel/btext.c [deleted file]
arch/ppc64/kernel/dma.c [deleted file]
arch/ppc64/kernel/head.S [deleted file]
arch/ppc64/kernel/hvconsole.c [deleted file]
arch/ppc64/kernel/hvcserver.c [deleted file]
arch/ppc64/kernel/iomap.c [deleted file]
arch/ppc64/kernel/iommu.c [deleted file]
arch/ppc64/kernel/kprobes.c [deleted file]
arch/ppc64/kernel/machine_kexec.c [deleted file]
arch/ppc64/kernel/misc.S [deleted file]
arch/ppc64/kernel/module.c [deleted file]
arch/ppc64/kernel/pci.c [deleted file]
arch/ppc64/kernel/pci_direct_iommu.c [deleted file]
arch/ppc64/kernel/pci_dn.c [deleted file]
arch/ppc64/kernel/pci_iommu.c [deleted file]
arch/ppc64/kernel/ppc_ksyms.c [deleted file]
arch/ppc64/kernel/prom.c [deleted file]
arch/ppc64/kernel/prom_init.c [deleted file]
arch/ppc64/kernel/semaphore.c [deleted file]
arch/ppc64/kernel/vdso.c [deleted file]
arch/ppc64/kernel/vmlinux.lds.S [deleted file]
arch/ppc64/xmon/privinst.h [deleted file]
include/asm-ppc64/page.h [deleted file]

index c523029674e6e31a86ddcc8ad9675fea14e27de9..c5c3f4213cd99076a982b8e4dd6a4df5c4232672 100644 (file)
@@ -932,6 +932,7 @@ source "arch/powerpc/oprofile/Kconfig"
 
 config KPROBES
        bool "Kprobes (EXPERIMENTAL)"
+       depends on PPC64
        help
          Kprobes allows you to trap at almost any kernel address and
          execute a callback function.  register_kprobe() establishes
index 046b4bf1f21e306f986da556bce4909f0413d34d..4970e3721a84ad0edddb960068082e0bd0bc1ada 100644 (file)
@@ -49,12 +49,23 @@ extra-y                             += vmlinux.lds
 obj-y                          += process.o init_task.o time.o \
                                   prom.o traps.o setup-common.o
 obj-$(CONFIG_PPC32)            += entry_32.o setup_32.o misc_32.o systbl.o
-obj-$(CONFIG_PPC64)            += misc_64.o
+obj-$(CONFIG_PPC64)            += misc_64.o dma_64.o iommu.o
 obj-$(CONFIG_PPC_OF)           += prom_init.o
 obj-$(CONFIG_MODULES)          += ppc_ksyms.o
 obj-$(CONFIG_BOOTX_TEXT)       += btext.o
 obj-$(CONFIG_6xx)              += idle_6xx.o
 obj-$(CONFIG_SMP)              += smp.o
+obj-$(CONFIG_KPROBES)          += kprobes.o
+
+module-$(CONFIG_PPC64)         += module_64.o
+obj-$(CONFIG_MODULES)          += $(module-y)
+
+pci64-$(CONFIG_PPC64)          += pci_64.o pci_dn.o pci_iommu.o \
+                                  pci_direct_iommu.o iomap.o
+obj-$(CONFIG_PCI)              += $(pci64-y)
+
+kexec64-$(CONFIG_PPC64)                += machine_kexec_64.o
+obj-$(CONFIG_KEXEC)            += $(kexec64-y)
 
 ifeq ($(CONFIG_PPC_ISERIES),y)
 $(obj)/head_64.o: $(obj)/lparmap.s
@@ -62,11 +73,8 @@ AFLAGS_head_64.o += -I$(obj)
 endif
 
 else
-# stuff used from here for ARCH=ppc or ARCH=ppc64
+# stuff used from here for ARCH=ppc
 smpobj-$(CONFIG_SMP)           += smp.o
-obj-$(CONFIG_PPC64)            += traps.o process.o init_task.o time.o \
-                                  setup-common.o $(smpobj-y)
-
 
 endif
 
diff --git a/arch/powerpc/kernel/dma_64.c b/arch/powerpc/kernel/dma_64.c
new file mode 100644 (file)
index 0000000..7c34196
--- /dev/null
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2004 IBM Corporation
+ *
+ * Implements the generic device dma API for ppc64. Handles
+ * the pci and vio busses
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+/* Include the busses we support */
+#include <linux/pci.h>
+#include <asm/vio.h>
+#include <asm/scatterlist.h>
+#include <asm/bug.h>
+
+static struct dma_mapping_ops *get_dma_ops(struct device *dev)
+{
+#ifdef CONFIG_PCI
+       if (dev->bus == &pci_bus_type)
+               return &pci_dma_ops;
+#endif
+#ifdef CONFIG_IBMVIO
+       if (dev->bus == &vio_bus_type)
+               return &vio_dma_ops;
+#endif
+       return NULL;
+}
+
+int dma_supported(struct device *dev, u64 mask)
+{
+       struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+       if (dma_ops)
+               return dma_ops->dma_supported(dev, mask);
+       BUG();
+       return 0;
+}
+EXPORT_SYMBOL(dma_supported);
+
+int dma_set_mask(struct device *dev, u64 dma_mask)
+{
+#ifdef CONFIG_PCI
+       if (dev->bus == &pci_bus_type)
+               return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
+#endif
+#ifdef CONFIG_IBMVIO
+       if (dev->bus == &vio_bus_type)
+               return -EIO;
+#endif /* CONFIG_IBMVIO */
+       BUG();
+       return 0;
+}
+EXPORT_SYMBOL(dma_set_mask);
+
+void *dma_alloc_coherent(struct device *dev, size_t size,
+               dma_addr_t *dma_handle, gfp_t flag)
+{
+       struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+       if (dma_ops)
+               return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
+       BUG();
+       return NULL;
+}
+EXPORT_SYMBOL(dma_alloc_coherent);
+
+void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
+               dma_addr_t dma_handle)
+{
+       struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+       if (dma_ops)
+               dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
+       else
+               BUG();
+}
+EXPORT_SYMBOL(dma_free_coherent);
+
+dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size,
+               enum dma_data_direction direction)
+{
+       struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+       if (dma_ops)
+               return dma_ops->map_single(dev, cpu_addr, size, direction);
+       BUG();
+       return (dma_addr_t)0;
+}
+EXPORT_SYMBOL(dma_map_single);
+
+void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+               enum dma_data_direction direction)
+{
+       struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+       if (dma_ops)
+               dma_ops->unmap_single(dev, dma_addr, size, direction);
+       else
+               BUG();
+}
+EXPORT_SYMBOL(dma_unmap_single);
+
+dma_addr_t dma_map_page(struct device *dev, struct page *page,
+               unsigned long offset, size_t size,
+               enum dma_data_direction direction)
+{
+       struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+       if (dma_ops)
+               return dma_ops->map_single(dev,
+                               (page_address(page) + offset), size, direction);
+       BUG();
+       return (dma_addr_t)0;
+}
+EXPORT_SYMBOL(dma_map_page);
+
+void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
+               enum dma_data_direction direction)
+{
+       struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+       if (dma_ops)
+               dma_ops->unmap_single(dev, dma_address, size, direction);
+       else
+               BUG();
+}
+EXPORT_SYMBOL(dma_unmap_page);
+
+int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+               enum dma_data_direction direction)
+{
+       struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+       if (dma_ops)
+               return dma_ops->map_sg(dev, sg, nents, direction);
+       BUG();
+       return 0;
+}
+EXPORT_SYMBOL(dma_map_sg);
+
+void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
+               enum dma_data_direction direction)
+{
+       struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+       if (dma_ops)
+               dma_ops->unmap_sg(dev, sg, nhwentries, direction);
+       else
+               BUG();
+}
+EXPORT_SYMBOL(dma_unmap_sg);
diff --git a/arch/powerpc/kernel/iomap.c b/arch/powerpc/kernel/iomap.c
new file mode 100644 (file)
index 0000000..6160c8d
--- /dev/null
@@ -0,0 +1,146 @@
+/*
+ * arch/ppc64/kernel/iomap.c
+ *
+ * ppc64 "iomap" interface implementation.
+ *
+ * (C) Copyright 2004 Linus Torvalds
+ */
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/mm.h>
+#include <asm/io.h>
+
+/*
+ * Here comes the ppc64 implementation of the IOMAP 
+ * interfaces.
+ */
+unsigned int fastcall ioread8(void __iomem *addr)
+{
+       return readb(addr);
+}
+unsigned int fastcall ioread16(void __iomem *addr)
+{
+       return readw(addr);
+}
+unsigned int fastcall ioread16be(void __iomem *addr)
+{
+       return in_be16(addr);
+}
+unsigned int fastcall ioread32(void __iomem *addr)
+{
+       return readl(addr);
+}
+unsigned int fastcall ioread32be(void __iomem *addr)
+{
+       return in_be32(addr);
+}
+EXPORT_SYMBOL(ioread8);
+EXPORT_SYMBOL(ioread16);
+EXPORT_SYMBOL(ioread16be);
+EXPORT_SYMBOL(ioread32);
+EXPORT_SYMBOL(ioread32be);
+
+void fastcall iowrite8(u8 val, void __iomem *addr)
+{
+       writeb(val, addr);
+}
+void fastcall iowrite16(u16 val, void __iomem *addr)
+{
+       writew(val, addr);
+}
+void fastcall iowrite16be(u16 val, void __iomem *addr)
+{
+       out_be16(addr, val);
+}
+void fastcall iowrite32(u32 val, void __iomem *addr)
+{
+       writel(val, addr);
+}
+void fastcall iowrite32be(u32 val, void __iomem *addr)
+{
+       out_be32(addr, val);
+}
+EXPORT_SYMBOL(iowrite8);
+EXPORT_SYMBOL(iowrite16);
+EXPORT_SYMBOL(iowrite16be);
+EXPORT_SYMBOL(iowrite32);
+EXPORT_SYMBOL(iowrite32be);
+
+/*
+ * These are the "repeat read/write" functions. Note the
+ * non-CPU byte order. We do things in "IO byteorder"
+ * here.
+ *
+ * FIXME! We could make these do EEH handling if we really
+ * wanted. Not clear if we do.
+ */
+void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
+{
+       _insb((u8 __iomem *) addr, dst, count);
+}
+void ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
+{
+       _insw_ns((u16 __iomem *) addr, dst, count);
+}
+void ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
+{
+       _insl_ns((u32 __iomem *) addr, dst, count);
+}
+EXPORT_SYMBOL(ioread8_rep);
+EXPORT_SYMBOL(ioread16_rep);
+EXPORT_SYMBOL(ioread32_rep);
+
+void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
+{
+       _outsb((u8 __iomem *) addr, src, count);
+}
+void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
+{
+       _outsw_ns((u16 __iomem *) addr, src, count);
+}
+void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
+{
+       _outsl_ns((u32 __iomem *) addr, src, count);
+}
+EXPORT_SYMBOL(iowrite8_rep);
+EXPORT_SYMBOL(iowrite16_rep);
+EXPORT_SYMBOL(iowrite32_rep);
+
+void __iomem *ioport_map(unsigned long port, unsigned int len)
+{
+       if (!_IO_IS_VALID(port))
+               return NULL;
+       return (void __iomem *) (port+pci_io_base);
+}
+
+void ioport_unmap(void __iomem *addr)
+{
+       /* Nothing to do */
+}
+EXPORT_SYMBOL(ioport_map);
+EXPORT_SYMBOL(ioport_unmap);
+
+void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
+{
+       unsigned long start = pci_resource_start(dev, bar);
+       unsigned long len = pci_resource_len(dev, bar);
+       unsigned long flags = pci_resource_flags(dev, bar);
+
+       if (!len)
+               return NULL;
+       if (max && len > max)
+               len = max;
+       if (flags & IORESOURCE_IO)
+               return ioport_map(start, len);
+       if (flags & IORESOURCE_MEM)
+               return ioremap(start, len);
+       /* What? */
+       return NULL;
+}
+
+void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
+{
+       /* Nothing to do */
+}
+EXPORT_SYMBOL(pci_iomap);
+EXPORT_SYMBOL(pci_iounmap);
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
new file mode 100644 (file)
index 0000000..4d9b438
--- /dev/null
@@ -0,0 +1,572 @@
+/*
+ * arch/ppc64/kernel/iommu.c
+ * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
+ * 
+ * Rewrite, cleanup, new allocation schemes, virtual merging: 
+ * Copyright (C) 2004 Olof Johansson, IBM Corporation
+ *               and  Ben. Herrenschmidt, IBM Corporation
+ *
+ * Dynamic DMA mapping support, bus-independent parts.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ * 
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ * 
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/iommu.h>
+#include <asm/pci-bridge.h>
+#include <asm/machdep.h>
+
+#define DBG(...)
+
+#ifdef CONFIG_IOMMU_VMERGE
+static int novmerge = 0;
+#else
+static int novmerge = 1;
+#endif
+
+static int __init setup_iommu(char *str)
+{
+       if (!strcmp(str, "novmerge"))
+               novmerge = 1;
+       else if (!strcmp(str, "vmerge"))
+               novmerge = 0;
+       return 1;
+}
+
+__setup("iommu=", setup_iommu);
+
+static unsigned long iommu_range_alloc(struct iommu_table *tbl,
+                                       unsigned long npages,
+                                       unsigned long *handle,
+                                       unsigned int align_order)
+{ 
+       unsigned long n, end, i, start;
+       unsigned long limit;
+       int largealloc = npages > 15;
+       int pass = 0;
+       unsigned long align_mask;
+
+       align_mask = 0xffffffffffffffffl >> (64 - align_order);
+
+       /* This allocator was derived from x86_64's bit string search */
+
+       /* Sanity check */
+       if (unlikely(npages) == 0) {
+               if (printk_ratelimit())
+                       WARN_ON(1);
+               return DMA_ERROR_CODE;
+       }
+
+       if (handle && *handle)
+               start = *handle;
+       else
+               start = largealloc ? tbl->it_largehint : tbl->it_hint;
+
+       /* Use only half of the table for small allocs (15 pages or less) */
+       limit = largealloc ? tbl->it_size : tbl->it_halfpoint;
+
+       if (largealloc && start < tbl->it_halfpoint)
+               start = tbl->it_halfpoint;
+
+       /* The case below can happen if we have a small segment appended
+        * to a large, or when the previous alloc was at the very end of
+        * the available space. If so, go back to the initial start.
+        */
+       if (start >= limit)
+               start = largealloc ? tbl->it_largehint : tbl->it_hint;
+       
+ again:
+
+       n = find_next_zero_bit(tbl->it_map, limit, start);
+
+       /* Align allocation */
+       n = (n + align_mask) & ~align_mask;
+
+       end = n + npages;
+
+       if (unlikely(end >= limit)) {
+               if (likely(pass < 2)) {
+                       /* First failure, just rescan the half of the table.
+                        * Second failure, rescan the other half of the table.
+                        */
+                       start = (largealloc ^ pass) ? tbl->it_halfpoint : 0;
+                       limit = pass ? tbl->it_size : limit;
+                       pass++;
+                       goto again;
+               } else {
+                       /* Third failure, give up */
+                       return DMA_ERROR_CODE;
+               }
+       }
+
+       for (i = n; i < end; i++)
+               if (test_bit(i, tbl->it_map)) {
+                       start = i+1;
+                       goto again;
+               }
+
+       for (i = n; i < end; i++)
+               __set_bit(i, tbl->it_map);
+
+       /* Bump the hint to a new block for small allocs. */
+       if (largealloc) {
+               /* Don't bump to new block to avoid fragmentation */
+               tbl->it_largehint = end;
+       } else {
+               /* Overflow will be taken care of at the next allocation */
+               tbl->it_hint = (end + tbl->it_blocksize - 1) &
+                               ~(tbl->it_blocksize - 1);
+       }
+
+       /* Update handle for SG allocations */
+       if (handle)
+               *handle = end;
+
+       return n;
+}
+
+static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page,
+                      unsigned int npages, enum dma_data_direction direction,
+                      unsigned int align_order)
+{
+       unsigned long entry, flags;
+       dma_addr_t ret = DMA_ERROR_CODE;
+       
+       spin_lock_irqsave(&(tbl->it_lock), flags);
+
+       entry = iommu_range_alloc(tbl, npages, NULL, align_order);
+
+       if (unlikely(entry == DMA_ERROR_CODE)) {
+               spin_unlock_irqrestore(&(tbl->it_lock), flags);
+               return DMA_ERROR_CODE;
+       }
+
+       entry += tbl->it_offset;        /* Offset into real TCE table */
+       ret = entry << PAGE_SHIFT;      /* Set the return dma address */
+
+       /* Put the TCEs in the HW table */
+       ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & PAGE_MASK,
+                        direction);
+
+
+       /* Flush/invalidate TLB caches if necessary */
+       if (ppc_md.tce_flush)
+               ppc_md.tce_flush(tbl);
+
+       spin_unlock_irqrestore(&(tbl->it_lock), flags);
+
+       /* Make sure updates are seen by hardware */
+       mb();
+
+       return ret;
+}
+
+static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, 
+                        unsigned int npages)
+{
+       unsigned long entry, free_entry;
+       unsigned long i;
+
+       entry = dma_addr >> PAGE_SHIFT;
+       free_entry = entry - tbl->it_offset;
+
+       if (((free_entry + npages) > tbl->it_size) ||
+           (entry < tbl->it_offset)) {
+               if (printk_ratelimit()) {
+                       printk(KERN_INFO "iommu_free: invalid entry\n");
+                       printk(KERN_INFO "\tentry     = 0x%lx\n", entry); 
+                       printk(KERN_INFO "\tdma_addr  = 0x%lx\n", (u64)dma_addr);
+                       printk(KERN_INFO "\tTable     = 0x%lx\n", (u64)tbl);
+                       printk(KERN_INFO "\tbus#      = 0x%lx\n", (u64)tbl->it_busno);
+                       printk(KERN_INFO "\tsize      = 0x%lx\n", (u64)tbl->it_size);
+                       printk(KERN_INFO "\tstartOff  = 0x%lx\n", (u64)tbl->it_offset);
+                       printk(KERN_INFO "\tindex     = 0x%lx\n", (u64)tbl->it_index);
+                       WARN_ON(1);
+               }
+               return;
+       }
+
+       ppc_md.tce_free(tbl, entry, npages);
+       
+       for (i = 0; i < npages; i++)
+               __clear_bit(free_entry+i, tbl->it_map);
+}
+
+static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
+               unsigned int npages)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&(tbl->it_lock), flags);
+
+       __iommu_free(tbl, dma_addr, npages);
+
+       /* Make sure TLB cache is flushed if the HW needs it. We do
+        * not do an mb() here on purpose, it is not needed on any of
+        * the current platforms.
+        */
+       if (ppc_md.tce_flush)
+               ppc_md.tce_flush(tbl);
+
+       spin_unlock_irqrestore(&(tbl->it_lock), flags);
+}
+
+int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
+               struct scatterlist *sglist, int nelems,
+               enum dma_data_direction direction)
+{
+       dma_addr_t dma_next = 0, dma_addr;
+       unsigned long flags;
+       struct scatterlist *s, *outs, *segstart;
+       int outcount, incount;
+       unsigned long handle;
+
+       BUG_ON(direction == DMA_NONE);
+
+       if ((nelems == 0) || !tbl)
+               return 0;
+
+       outs = s = segstart = &sglist[0];
+       outcount = 1;
+       incount = nelems;
+       handle = 0;
+
+       /* Init first segment length for backout at failure */
+       outs->dma_length = 0;
+
+       DBG("mapping %d elements:\n", nelems);
+
+       spin_lock_irqsave(&(tbl->it_lock), flags);
+
+       for (s = outs; nelems; nelems--, s++) {
+               unsigned long vaddr, npages, entry, slen;
+
+               slen = s->length;
+               /* Sanity check */
+               if (slen == 0) {
+                       dma_next = 0;
+                       continue;
+               }
+               /* Allocate iommu entries for that segment */
+               vaddr = (unsigned long)page_address(s->page) + s->offset;
+               npages = PAGE_ALIGN(vaddr + slen) - (vaddr & PAGE_MASK);
+               npages >>= PAGE_SHIFT;
+               entry = iommu_range_alloc(tbl, npages, &handle, 0);
+
+               DBG("  - vaddr: %lx, size: %lx\n", vaddr, slen);
+
+               /* Handle failure */
+               if (unlikely(entry == DMA_ERROR_CODE)) {
+                       if (printk_ratelimit())
+                               printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx"
+                                      " npages %lx\n", tbl, vaddr, npages);
+                       goto failure;
+               }
+
+               /* Convert entry to a dma_addr_t */
+               entry += tbl->it_offset;
+               dma_addr = entry << PAGE_SHIFT;
+               dma_addr |= s->offset;
+
+               DBG("  - %lx pages, entry: %lx, dma_addr: %lx\n",
+                           npages, entry, dma_addr);
+
+               /* Insert into HW table */
+               ppc_md.tce_build(tbl, entry, npages, vaddr & PAGE_MASK, direction);
+
+               /* If we are in an open segment, try merging */
+               if (segstart != s) {
+                       DBG("  - trying merge...\n");
+                       /* We cannot merge if:
+                        * - allocated dma_addr isn't contiguous to previous allocation
+                        */
+                       if (novmerge || (dma_addr != dma_next)) {
+                               /* Can't merge: create a new segment */
+                               segstart = s;
+                               outcount++; outs++;
+                               DBG("    can't merge, new segment.\n");
+                       } else {
+                               outs->dma_length += s->length;
+                               DBG("    merged, new len: %lx\n", outs->dma_length);
+                       }
+               }
+
+               if (segstart == s) {
+                       /* This is a new segment, fill entries */
+                       DBG("  - filling new segment.\n");
+                       outs->dma_address = dma_addr;
+                       outs->dma_length = slen;
+               }
+
+               /* Calculate next page pointer for contiguous check */
+               dma_next = dma_addr + slen;
+
+               DBG("  - dma next is: %lx\n", dma_next);
+       }
+
+       /* Flush/invalidate TLB caches if necessary */
+       if (ppc_md.tce_flush)
+               ppc_md.tce_flush(tbl);
+
+       spin_unlock_irqrestore(&(tbl->it_lock), flags);
+
+       /* Make sure updates are seen by hardware */
+       mb();
+
+       DBG("mapped %d elements:\n", outcount);
+
+       /* For the sake of iommu_unmap_sg, we clear out the length in the
+        * next entry of the sglist if we didn't fill the list completely
+        */
+       if (outcount < incount) {
+               outs++;
+               outs->dma_address = DMA_ERROR_CODE;
+               outs->dma_length = 0;
+       }
+       return outcount;
+
+ failure:
+       for (s = &sglist[0]; s <= outs; s++) {
+               if (s->dma_length != 0) {
+                       unsigned long vaddr, npages;
+
+                       vaddr = s->dma_address & PAGE_MASK;
+                       npages = (PAGE_ALIGN(s->dma_address + s->dma_length) - vaddr)
+                               >> PAGE_SHIFT;
+                       __iommu_free(tbl, vaddr, npages);
+               }
+       }
+       spin_unlock_irqrestore(&(tbl->it_lock), flags);
+       return 0;
+}
+
+
+void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
+               int nelems, enum dma_data_direction direction)
+{
+       unsigned long flags;
+
+       BUG_ON(direction == DMA_NONE);
+
+       if (!tbl)
+               return;
+
+       spin_lock_irqsave(&(tbl->it_lock), flags);
+
+       while (nelems--) {
+               unsigned int npages;
+               dma_addr_t dma_handle = sglist->dma_address;
+
+               if (sglist->dma_length == 0)
+                       break;
+               npages = (PAGE_ALIGN(dma_handle + sglist->dma_length)
+                         - (dma_handle & PAGE_MASK)) >> PAGE_SHIFT;
+               __iommu_free(tbl, dma_handle, npages);
+               sglist++;
+       }
+
+       /* Flush/invalidate TLBs if necessary. As for iommu_free(), we
+        * do not do an mb() here, the affected platforms do not need it
+        * when freeing.
+        */
+       if (ppc_md.tce_flush)
+               ppc_md.tce_flush(tbl);
+
+       spin_unlock_irqrestore(&(tbl->it_lock), flags);
+}
+
+/*
+ * Build a iommu_table structure.  This contains a bit map which
+ * is used to manage allocation of the tce space.
+ */
+struct iommu_table *iommu_init_table(struct iommu_table *tbl)
+{
+       unsigned long sz;
+       static int welcomed = 0;
+
+       /* Set aside 1/4 of the table for large allocations. */
+       tbl->it_halfpoint = tbl->it_size * 3 / 4;
+
+       /* number of bytes needed for the bitmap */
+       sz = (tbl->it_size + 7) >> 3;
+
+       tbl->it_map = (unsigned long *)__get_free_pages(GFP_ATOMIC, get_order(sz));
+       if (!tbl->it_map)
+               panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
+
+       memset(tbl->it_map, 0, sz);
+
+       tbl->it_hint = 0;
+       tbl->it_largehint = tbl->it_halfpoint;
+       spin_lock_init(&tbl->it_lock);
+
+       /* Clear the hardware table in case firmware left allocations in it */
+       ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
+
+       if (!welcomed) {
+               printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
+                      novmerge ? "disabled" : "enabled");
+               welcomed = 1;
+       }
+
+       return tbl;
+}
+
+void iommu_free_table(struct device_node *dn)
+{
+       struct pci_dn *pdn = dn->data;
+       struct iommu_table *tbl = pdn->iommu_table;
+       unsigned long bitmap_sz, i;
+       unsigned int order;
+
+       if (!tbl || !tbl->it_map) {
+               printk(KERN_ERR "%s: expected TCE map for %s\n", __FUNCTION__,
+                               dn->full_name);
+               return;
+       }
+
+       /* verify that table contains no entries */
+       /* it_size is in entries, and we're examining 64 at a time */
+       for (i = 0; i < (tbl->it_size/64); i++) {
+               if (tbl->it_map[i] != 0) {
+                       printk(KERN_WARNING "%s: Unexpected TCEs for %s\n",
+                               __FUNCTION__, dn->full_name);
+                       break;
+               }
+       }
+
+       /* calculate bitmap size in bytes */
+       bitmap_sz = (tbl->it_size + 7) / 8;
+
+       /* free bitmap */
+       order = get_order(bitmap_sz);
+       free_pages((unsigned long) tbl->it_map, order);
+
+       /* free table */
+       kfree(tbl);
+}
+
+/* Creates TCEs for a user provided buffer.  The user buffer must be
+ * contiguous real kernel storage (not vmalloc).  The address of the buffer
+ * passed here is the kernel (virtual) address of the buffer.  The buffer
+ * need not be page aligned, the dma_addr_t returned will point to the same
+ * byte within the page as vaddr.
+ */
+dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
+               size_t size, enum dma_data_direction direction)
+{
+       dma_addr_t dma_handle = DMA_ERROR_CODE;
+       unsigned long uaddr;
+       unsigned int npages;
+
+       BUG_ON(direction == DMA_NONE);
+
+       uaddr = (unsigned long)vaddr;
+       npages = PAGE_ALIGN(uaddr + size) - (uaddr & PAGE_MASK);
+       npages >>= PAGE_SHIFT;
+
+       if (tbl) {
+               dma_handle = iommu_alloc(tbl, vaddr, npages, direction, 0);
+               if (dma_handle == DMA_ERROR_CODE) {
+                       if (printk_ratelimit())  {
+                               printk(KERN_INFO "iommu_alloc failed, "
+                                               "tbl %p vaddr %p npages %d\n",
+                                               tbl, vaddr, npages);
+                       }
+               } else
+                       dma_handle |= (uaddr & ~PAGE_MASK);
+       }
+
+       return dma_handle;
+}
+
+void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
+               size_t size, enum dma_data_direction direction)
+{
+       BUG_ON(direction == DMA_NONE);
+
+       if (tbl)
+               iommu_free(tbl, dma_handle, (PAGE_ALIGN(dma_handle + size) -
+                                       (dma_handle & PAGE_MASK)) >> PAGE_SHIFT);
+}
+
+/* Allocates a contiguous real buffer and creates mappings over it.
+ * Returns the virtual address of the buffer and sets dma_handle
+ * to the dma address (mapping) of the first page.
+ */
+void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
+               dma_addr_t *dma_handle, gfp_t flag)
+{
+       void *ret = NULL;
+       dma_addr_t mapping;
+       unsigned int npages, order;
+
+       size = PAGE_ALIGN(size);
+       npages = size >> PAGE_SHIFT;
+       order = get_order(size);
+
+       /*
+        * Client asked for way too much space.  This is checked later
+        * anyway.  It is easier to debug here for the drivers than in
+        * the tce tables.
+        */
+       if (order >= IOMAP_MAX_ORDER) {
+               printk("iommu_alloc_consistent size too large: 0x%lx\n", size);
+               return NULL;
+       }
+
+       if (!tbl)
+               return NULL;
+
+       /* Alloc enough pages (and possibly more) */
+       ret = (void *)__get_free_pages(flag, order);
+       if (!ret)
+               return NULL;
+       memset(ret, 0, size);
+
+       /* Set up tces to cover the allocated range */
+       mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL, order);
+       if (mapping == DMA_ERROR_CODE) {
+               free_pages((unsigned long)ret, order);
+               ret = NULL;
+       } else
+               *dma_handle = mapping;
+       return ret;
+}
+
+void iommu_free_coherent(struct iommu_table *tbl, size_t size,
+                        void *vaddr, dma_addr_t dma_handle)
+{
+       unsigned int npages;
+
+       if (tbl) {
+               size = PAGE_ALIGN(size);
+               npages = size >> PAGE_SHIFT;
+               iommu_free(tbl, dma_handle, npages);
+               free_pages((unsigned long)vaddr, get_order(size));
+       }
+}
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
new file mode 100644 (file)
index 0000000..511af54
--- /dev/null
@@ -0,0 +1,459 @@
+/*
+ *  Kernel Probes (KProbes)
+ *  arch/ppc64/kernel/kprobes.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2004
+ *
+ * 2002-Oct    Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
+ *             Probes initial implementation ( includes contributions from
+ *             Rusty Russell).
+ * 2004-July   Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
+ *             interface to access function arguments.
+ * 2004-Nov    Ananth N Mavinakayanahalli <ananth@in.ibm.com> kprobes port
+ *             for PPC64
+ */
+
+#include <linux/config.h>
+#include <linux/kprobes.h>
+#include <linux/ptrace.h>
+#include <linux/preempt.h>
+#include <asm/cacheflush.h>
+#include <asm/kdebug.h>
+#include <asm/sstep.h>
+
+static DECLARE_MUTEX(kprobe_mutex);
+DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
+{
+       int ret = 0;
+       kprobe_opcode_t insn = *p->addr;
+
+       if ((unsigned long)p->addr & 0x03) {
+               printk("Attempt to register kprobe at an unaligned address\n");
+               ret = -EINVAL;
+       } else if (IS_MTMSRD(insn) || IS_RFID(insn)) {
+               printk("Cannot register a kprobe on rfid or mtmsrd\n");
+               ret = -EINVAL;
+       }
+
+       /* insn must be on a special executable page on ppc64 */
+       if (!ret) {
+               down(&kprobe_mutex);
+               p->ainsn.insn = get_insn_slot();
+               up(&kprobe_mutex);
+               if (!p->ainsn.insn)
+                       ret = -ENOMEM;
+       }
+       return ret;
+}
+
+void __kprobes arch_copy_kprobe(struct kprobe *p)
+{
+       memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+       p->opcode = *p->addr;
+}
+
+void __kprobes arch_arm_kprobe(struct kprobe *p)
+{
+       *p->addr = BREAKPOINT_INSTRUCTION;
+       flush_icache_range((unsigned long) p->addr,
+                          (unsigned long) p->addr + sizeof(kprobe_opcode_t));
+}
+
+void __kprobes arch_disarm_kprobe(struct kprobe *p)
+{
+       *p->addr = p->opcode;
+       flush_icache_range((unsigned long) p->addr,
+                          (unsigned long) p->addr + sizeof(kprobe_opcode_t));
+}
+
+void __kprobes arch_remove_kprobe(struct kprobe *p)
+{
+       down(&kprobe_mutex);
+       free_insn_slot(p->ainsn.insn);
+       up(&kprobe_mutex);
+}
+
+static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
+{
+       kprobe_opcode_t insn = *p->ainsn.insn;
+
+       regs->msr |= MSR_SE;
+
+       /* single step inline if it is a trap variant */
+       if (is_trap(insn))
+               regs->nip = (unsigned long)p->addr;
+       else
+               regs->nip = (unsigned long)p->ainsn.insn;
+}
+
+static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+       kcb->prev_kprobe.kp = kprobe_running();
+       kcb->prev_kprobe.status = kcb->kprobe_status;
+       kcb->prev_kprobe.saved_msr = kcb->kprobe_saved_msr;
+}
+
+static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+       __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+       kcb->kprobe_status = kcb->prev_kprobe.status;
+       kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr;
+}
+
+static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
+                               struct kprobe_ctlblk *kcb)
+{
+       __get_cpu_var(current_kprobe) = p;
+       kcb->kprobe_saved_msr = regs->msr;
+}
+
+/* Called with kretprobe_lock held */
+void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
+                                     struct pt_regs *regs)
+{
+       struct kretprobe_instance *ri;
+
+       if ((ri = get_free_rp_inst(rp)) != NULL) {
+               ri->rp = rp;
+               ri->task = current;
+               ri->ret_addr = (kprobe_opcode_t *)regs->link;
+
+               /* Replace the return addr with trampoline addr */
+               regs->link = (unsigned long)kretprobe_trampoline;
+               add_rp_inst(ri);
+       } else {
+               rp->nmissed++;
+       }
+}
+
+static inline int kprobe_handler(struct pt_regs *regs)
+{
+       struct kprobe *p;
+       int ret = 0;
+       unsigned int *addr = (unsigned int *)regs->nip;
+       struct kprobe_ctlblk *kcb;
+
+       /*
+        * We don't want to be preempted for the entire
+        * duration of kprobe processing
+        */
+       preempt_disable();
+       kcb = get_kprobe_ctlblk();
+
+       /* Check we're not actually recursing */
+       if (kprobe_running()) {
+               p = get_kprobe(addr);
+               if (p) {
+                       kprobe_opcode_t insn = *p->ainsn.insn;
+                       if (kcb->kprobe_status == KPROBE_HIT_SS &&
+                                       is_trap(insn)) {
+                               regs->msr &= ~MSR_SE;
+                               regs->msr |= kcb->kprobe_saved_msr;
+                               goto no_kprobe;
+                       }
+                       /* We have reentered the kprobe_handler(), since
+                        * another probe was hit while within the handler.
+                        * We here save the original kprobes variables and
+                        * just single step on the instruction of the new probe
+                        * without calling any user handlers.
+                        */
+                       save_previous_kprobe(kcb);
+                       set_current_kprobe(p, regs, kcb);
+                       kcb->kprobe_saved_msr = regs->msr;
+                       p->nmissed++;
+                       prepare_singlestep(p, regs);
+                       kcb->kprobe_status = KPROBE_REENTER;
+                       return 1;
+               } else {
+                       p = __get_cpu_var(current_kprobe);
+                       if (p->break_handler && p->break_handler(p, regs)) {
+                               goto ss_probe;
+                       }
+               }
+               goto no_kprobe;
+       }
+
+       p = get_kprobe(addr);
+       if (!p) {
+               if (*addr != BREAKPOINT_INSTRUCTION) {
+                       /*
+                        * PowerPC has multiple variants of the "trap"
+                        * instruction. If the current instruction is a
+                        * trap variant, it could belong to someone else
+                        */
+                       kprobe_opcode_t cur_insn = *addr;
+                       if (is_trap(cur_insn))
+                               goto no_kprobe;
+                       /*
+                        * The breakpoint instruction was removed right
+                        * after we hit it.  Another cpu has removed
+                        * either a probepoint or a debugger breakpoint
+                        * at this address.  In either case, no further
+                        * handling of this interrupt is appropriate.
+                        */
+                       ret = 1;
+               }
+               /* Not one of ours: let kernel handle it */
+               goto no_kprobe;
+       }
+
+       kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+       set_current_kprobe(p, regs, kcb);
+       if (p->pre_handler && p->pre_handler(p, regs))
+               /* handler has already set things up, so skip ss setup */
+               return 1;
+
+ss_probe:
+       prepare_singlestep(p, regs);
+       kcb->kprobe_status = KPROBE_HIT_SS;
+       return 1;
+
+no_kprobe:
+       preempt_enable_no_resched();
+       return ret;
+}
+
+/*
+ * Function return probe trampoline:
+ *     - init_kprobes() establishes a probepoint here
+ *     - When the probed function returns, this probe
+ *             causes the handlers to fire
+ */
+void kretprobe_trampoline_holder(void)
+{
+       asm volatile(".global kretprobe_trampoline\n"
+                       "kretprobe_trampoline:\n"
+                       "nop\n");
+}
+
+/*
+ * Called when the probe at kretprobe trampoline is hit
+ */
+int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
+{
+        struct kretprobe_instance *ri = NULL;
+        struct hlist_head *head;
+        struct hlist_node *node, *tmp;
+       unsigned long flags, orig_ret_address = 0;
+       unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
+
+       spin_lock_irqsave(&kretprobe_lock, flags);
+        head = kretprobe_inst_table_head(current);
+
+       /*
+        * It is possible to have multiple instances associated with a given
+        * task either because an multiple functions in the call path
+        * have a return probe installed on them, and/or more then one return
+        * return probe was registered for a target function.
+        *
+        * We can handle this because:
+        *     - instances are always inserted at the head of the list
+        *     - when multiple return probes are registered for the same
+         *       function, the first instance's ret_addr will point to the
+        *       real return address, and all the rest will point to
+        *       kretprobe_trampoline
+        */
+       hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+                if (ri->task != current)
+                       /* another task is sharing our hash bucket */
+                        continue;
+
+               if (ri->rp && ri->rp->handler)
+                       ri->rp->handler(ri, regs);
+
+               orig_ret_address = (unsigned long)ri->ret_addr;
+               recycle_rp_inst(ri);
+
+               if (orig_ret_address != trampoline_address)
+                       /*
+                        * This is the real return address. Any other
+                        * instances associated with this task are for
+                        * other calls deeper on the call stack
+                        */
+                       break;
+       }
+
+       BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address));
+       regs->nip = orig_ret_address;
+
+       reset_current_kprobe();
+       spin_unlock_irqrestore(&kretprobe_lock, flags);
+       preempt_enable_no_resched();
+
+        /*
+         * By returning a non-zero value, we are telling
+         * kprobe_handler() that we don't want the post_handler
+         * to run (and have re-enabled preemption)
+         */
+        return 1;
+}
+
+/*
+ * Called after single-stepping.  p->addr is the address of the
+ * instruction whose first byte has been replaced by the "breakpoint"
+ * instruction.  To avoid the SMP problems that can occur when we
+ * temporarily put back the original opcode to single-step, we
+ * single-stepped a copy of the instruction.  The address of this
+ * copy is p->ainsn.insn.
+ */
+static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
+{
+       int ret;
+       unsigned int insn = *p->ainsn.insn;
+
+       regs->nip = (unsigned long)p->addr;
+       ret = emulate_step(regs, insn);
+       if (ret == 0)
+               regs->nip = (unsigned long)p->addr + 4;
+}
+
+static inline int post_kprobe_handler(struct pt_regs *regs)
+{
+       struct kprobe *cur = kprobe_running();
+       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+       if (!cur)
+               return 0;
+
+       if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
+               kcb->kprobe_status = KPROBE_HIT_SSDONE;
+               cur->post_handler(cur, regs, 0);
+       }
+
+       resume_execution(cur, regs);
+       regs->msr |= kcb->kprobe_saved_msr;
+
+       /*Restore back the original saved kprobes variables and continue. */
+       if (kcb->kprobe_status == KPROBE_REENTER) {
+               restore_previous_kprobe(kcb);
+               goto out;
+       }
+       reset_current_kprobe();
+out:
+       preempt_enable_no_resched();
+
+       /*
+        * if somebody else is singlestepping across a probe point, msr
+        * will have SE set, in which case, continue the remaining processing
+        * of do_debug, as if this is not a probe hit.
+        */
+       if (regs->msr & MSR_SE)
+               return 0;
+
+       return 1;
+}
+
+static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+{
+       struct kprobe *cur = kprobe_running();
+       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+       if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
+               return 1;
+
+       if (kcb->kprobe_status & KPROBE_HIT_SS) {
+               resume_execution(cur, regs);
+               regs->msr &= ~MSR_SE;
+               regs->msr |= kcb->kprobe_saved_msr;
+
+               reset_current_kprobe();
+               preempt_enable_no_resched();
+       }
+       return 0;
+}
+
+/*
+ * Wrapper routine to for handling exceptions.
+ */
+int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+                                      unsigned long val, void *data)
+{
+       struct die_args *args = (struct die_args *)data;
+       int ret = NOTIFY_DONE;
+
+       switch (val) {
+       case DIE_BPT:
+               if (kprobe_handler(args->regs))
+                       ret = NOTIFY_STOP;
+               break;
+       case DIE_SSTEP:
+               if (post_kprobe_handler(args->regs))
+                       ret = NOTIFY_STOP;
+               break;
+       case DIE_PAGE_FAULT:
+               /* kprobe_running() needs smp_processor_id() */
+               preempt_disable();
+               if (kprobe_running() &&
+                   kprobe_fault_handler(args->regs, args->trapnr))
+                       ret = NOTIFY_STOP;
+               preempt_enable();
+               break;
+       default:
+               break;
+       }
+       return ret;
+}
+
+int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+       struct jprobe *jp = container_of(p, struct jprobe, kp);
+       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+       memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
+
+       /* setup return addr to the jprobe handler routine */
+       regs->nip = (unsigned long)(((func_descr_t *)jp->entry)->entry);
+       regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
+
+       return 1;
+}
+
+void __kprobes jprobe_return(void)
+{
+       asm volatile("trap" ::: "memory");
+}
+
+void __kprobes jprobe_return_end(void)
+{
+};
+
+int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+{
+       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+       /*
+        * FIXME - we should ideally be validating that we got here 'cos
+        * of the "trap" in jprobe_return() above, before restoring the
+        * saved regs...
+        */
+       memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
+       preempt_enable_no_resched();
+       return 1;
+}
+
+static struct kprobe trampoline_p = {
+       .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
+       .pre_handler = trampoline_probe_handler
+};
+
+int __init arch_init_kprobes(void)
+{
+       return register_kprobe(&trampoline_p);
+}
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
new file mode 100644 (file)
index 0000000..97c51e4
--- /dev/null
@@ -0,0 +1,358 @@
+/*
+ * machine_kexec.c - handle transition of Linux booting another kernel
+ *
+ * Copyright (C) 2004-2005, IBM Corp.
+ *
+ * Created by: Milton D Miller II
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2.  See the file COPYING for more details.
+ */
+
+
+#include <linux/cpumask.h>
+#include <linux/kexec.h>
+#include <linux/smp.h>
+#include <linux/thread_info.h>
+#include <linux/errno.h>
+
+#include <asm/page.h>
+#include <asm/current.h>
+#include <asm/machdep.h>
+#include <asm/cacheflush.h>
+#include <asm/paca.h>
+#include <asm/mmu.h>
+#include <asm/sections.h>      /* _end */
+#include <asm/prom.h>
+#include <asm/smp.h>
+
+#define HASH_GROUP_SIZE 0x80   /* size of each hash group, asm/mmu.h */
+
+/* Have this around till we move it into crash specific file */
+note_buf_t crash_notes[NR_CPUS];
+
+/* Dummy for now. Not sure if we need to have a crash shutdown in here
+ * and if what it will achieve. Letting it be now to compile the code
+ * in generic kexec environment
+ */
+void machine_crash_shutdown(struct pt_regs *regs)
+{
+       /* do nothing right now */
+       /* smp_relase_cpus() if we want smp on panic kernel */
+       /* cpu_irq_down to isolate us until we are ready */
+}
+
+int machine_kexec_prepare(struct kimage *image)
+{
+       int i;
+       unsigned long begin, end;       /* limits of segment */
+       unsigned long low, high;        /* limits of blocked memory range */
+       struct device_node *node;
+       unsigned long *basep;
+       unsigned int *sizep;
+
+       if (!ppc_md.hpte_clear_all)
+               return -ENOENT;
+
+       /*
+        * Since we use the kernel fault handlers and paging code to
+        * handle the virtual mode, we must make sure no destination
+        * overlaps kernel static data or bss.
+        */
+       for (i = 0; i < image->nr_segments; i++)
+               if (image->segment[i].mem < __pa(_end))
+                       return -ETXTBSY;
+
+       /*
+        * For non-LPAR, we absolutely can not overwrite the mmu hash
+        * table, since we are still using the bolted entries in it to
+        * do the copy.  Check that here.
+        *
+        * It is safe if the end is below the start of the blocked
+        * region (end <= low), or if the beginning is after the
+        * end of the blocked region (begin >= high).  Use the
+        * boolean identity !(a || b)  === (!a && !b).
+        */
+       if (htab_address) {
+               low = __pa(htab_address);
+               high = low + (htab_hash_mask + 1) * HASH_GROUP_SIZE;
+
+               for (i = 0; i < image->nr_segments; i++) {
+                       begin = image->segment[i].mem;
+                       end = begin + image->segment[i].memsz;
+
+                       if ((begin < high) && (end > low))
+                               return -ETXTBSY;
+               }
+       }
+
+       /* We also should not overwrite the tce tables */
+       for (node = of_find_node_by_type(NULL, "pci"); node != NULL;
+                       node = of_find_node_by_type(node, "pci")) {
+               basep = (unsigned long *)get_property(node, "linux,tce-base",
+                                                       NULL);
+               sizep = (unsigned int *)get_property(node, "linux,tce-size",
+                                                       NULL);
+               if (basep == NULL || sizep == NULL)
+                       continue;
+
+               low = *basep;
+               high = low + (*sizep);
+
+               for (i = 0; i < image->nr_segments; i++) {
+                       begin = image->segment[i].mem;
+                       end = begin + image->segment[i].memsz;
+
+                       if ((begin < high) && (end > low))
+                               return -ETXTBSY;
+               }
+       }
+
+       return 0;
+}
+
+void machine_kexec_cleanup(struct kimage *image)
+{
+       /* we do nothing in prepare that needs to be undone */
+}
+
+#define IND_FLAGS (IND_DESTINATION | IND_INDIRECTION | IND_DONE | IND_SOURCE)
+
+static void copy_segments(unsigned long ind)
+{
+       unsigned long entry;
+       unsigned long *ptr;
+       void *dest;
+       void *addr;
+
+       /*
+        * We rely on kexec_load to create a lists that properly
+        * initializes these pointers before they are used.
+        * We will still crash if the list is wrong, but at least
+        * the compiler will be quiet.
+        */
+       ptr = NULL;
+       dest = NULL;
+
+       for (entry = ind; !(entry & IND_DONE); entry = *ptr++) {
+               addr = __va(entry & PAGE_MASK);
+
+               switch (entry & IND_FLAGS) {
+               case IND_DESTINATION:
+                       dest = addr;
+                       break;
+               case IND_INDIRECTION:
+                       ptr = addr;
+                       break;
+               case IND_SOURCE:
+                       copy_page(dest, addr);
+                       dest += PAGE_SIZE;
+               }
+       }
+}
+
+void kexec_copy_flush(struct kimage *image)
+{
+       long i, nr_segments = image->nr_segments;
+       struct  kexec_segment ranges[KEXEC_SEGMENT_MAX];
+
+       /* save the ranges on the stack to efficiently flush the icache */
+       memcpy(ranges, image->segment, sizeof(ranges));
+
+       /*
+        * After this call we may not use anything allocated in dynamic
+        * memory, including *image.
+        *
+        * Only globals and the stack are allowed.
+        */
+       copy_segments(image->head);
+
+       /*
+        * we need to clear the icache for all dest pages sometime,
+        * including ones that were in place on the original copy
+        */
+       for (i = 0; i < nr_segments; i++)
+               flush_icache_range(ranges[i].mem + KERNELBASE,
+                               ranges[i].mem + KERNELBASE +
+                               ranges[i].memsz);
+}
+
+#ifdef CONFIG_SMP
+
+/* FIXME: we should schedule this function to be called on all cpus based
+ * on calling the interrupts, but we would like to call it off irq level
+ * so that the interrupt controller is clean.
+ */
+void kexec_smp_down(void *arg)
+{
+       if (ppc_md.kexec_cpu_down)
+               ppc_md.kexec_cpu_down(0, 1);
+
+       local_irq_disable();
+       kexec_smp_wait();
+       /* NOTREACHED */
+}
+
+static void kexec_prepare_cpus(void)
+{
+       int my_cpu, i, notified=-1;
+
+       smp_call_function(kexec_smp_down, NULL, 0, /* wait */0);
+       my_cpu = get_cpu();
+
+       /* check the others cpus are now down (via paca hw cpu id == -1) */
+       for (i=0; i < NR_CPUS; i++) {
+               if (i == my_cpu)
+                       continue;
+
+               while (paca[i].hw_cpu_id != -1) {
+                       barrier();
+                       if (!cpu_possible(i)) {
+                               printk("kexec: cpu %d hw_cpu_id %d is not"
+                                               " possible, ignoring\n",
+                                               i, paca[i].hw_cpu_id);
+                               break;
+                       }
+                       if (!cpu_online(i)) {
+                               /* Fixme: this can be spinning in
+                                * pSeries_secondary_wait with a paca
+                                * waiting for it to go online.
+                                */
+                               printk("kexec: cpu %d hw_cpu_id %d is not"
+                                               " online, ignoring\n",
+                                               i, paca[i].hw_cpu_id);
+                               break;
+                       }
+                       if (i != notified) {
+                               printk( "kexec: waiting for cpu %d (physical"
+                                               " %d) to go down\n",
+                                               i, paca[i].hw_cpu_id);
+                               notified = i;
+                       }
+               }
+       }
+
+       /* after we tell the others to go down */
+       if (ppc_md.kexec_cpu_down)
+               ppc_md.kexec_cpu_down(0, 0);
+
+       put_cpu();
+
+       local_irq_disable();
+}
+
+#else /* ! SMP */
+
+static void kexec_prepare_cpus(void)
+{
+       /*
+        * move the secondarys to us so that we can copy
+        * the new kernel 0-0x100 safely
+        *
+        * do this if kexec in setup.c ?
+        *
+        * We need to release the cpus if we are ever going from an
+        * UP to an SMP kernel.
+        */
+       smp_release_cpus();
+       if (ppc_md.kexec_cpu_down)
+               ppc_md.kexec_cpu_down(0, 0);
+       local_irq_disable();
+}
+
+#endif /* SMP */
+
+/*
+ * kexec thread structure and stack.
+ *
+ * We need to make sure that this is 16384-byte aligned due to the
+ * way process stacks are handled.  It also must be statically allocated
+ * or allocated as part of the kimage, because everything else may be
+ * overwritten when we copy the kexec image.  We piggyback on the
+ * "init_task" linker section here to statically allocate a stack.
+ *
+ * We could use a smaller stack if we don't care about anything using
+ * current, but that audit has not been performed.
+ */
+union thread_union kexec_stack
+       __attribute__((__section__(".data.init_task"))) = { };
+
+/* Our assembly helper, in kexec_stub.S */
+extern NORET_TYPE void kexec_sequence(void *newstack, unsigned long start,
+                                       void *image, void *control,
+                                       void (*clear_all)(void)) ATTRIB_NORET;
+
+/* too late to fail here */
+void machine_kexec(struct kimage *image)
+{
+
+       /* prepare control code if any */
+
+       /* shutdown other cpus into our wait loop and quiesce interrupts */
+       kexec_prepare_cpus();
+
+       /* switch to a staticly allocated stack.  Based on irq stack code.
+        * XXX: the task struct will likely be invalid once we do the copy!
+        */
+       kexec_stack.thread_info.task = current_thread_info()->task;
+       kexec_stack.thread_info.flags = 0;
+
+       /* Some things are best done in assembly.  Finding globals with
+        * a toc is easier in C, so pass in what we can.
+        */
+       kexec_sequence(&kexec_stack, image->start, image,
+                       page_address(image->control_code_page),
+                       ppc_md.hpte_clear_all);
+       /* NOTREACHED */
+}
+
+/* Values we need to export to the second kernel via the device tree. */
+static unsigned long htab_base, htab_size, kernel_end;
+
+static struct property htab_base_prop = {
+       .name = "linux,htab-base",
+       .length = sizeof(unsigned long),
+       .value = (unsigned char *)&htab_base,
+};
+
+static struct property htab_size_prop = {
+       .name = "linux,htab-size",
+       .length = sizeof(unsigned long),
+       .value = (unsigned char *)&htab_size,
+};
+
+static struct property kernel_end_prop = {
+       .name = "linux,kernel-end",
+       .length = sizeof(unsigned long),
+       .value = (unsigned char *)&kernel_end,
+};
+
+static void __init export_htab_values(void)
+{
+       struct device_node *node;
+
+       node = of_find_node_by_path("/chosen");
+       if (!node)
+               return;
+
+       kernel_end = __pa(_end);
+       prom_add_property(node, &kernel_end_prop);
+
+       /* On machines with no htab htab_address is NULL */
+       if (NULL == htab_address)
+               goto out;
+
+       htab_base = __pa(htab_address);
+       prom_add_property(node, &htab_base_prop);
+
+       htab_size = 1UL << ppc64_pft_size;
+       prom_add_property(node, &htab_size_prop);
+
+ out:
+       of_node_put(node);
+}
+
+void __init kexec_setup(void)
+{
+       export_htab_values();
+}
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
new file mode 100644 (file)
index 0000000..928b858
--- /dev/null
@@ -0,0 +1,455 @@
+/*  Kernel module help for PPC64.
+    Copyright (C) 2001, 2003 Rusty Russell IBM Corporation.
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+*/
+#include <linux/module.h>
+#include <linux/elf.h>
+#include <linux/moduleloader.h>
+#include <linux/err.h>
+#include <linux/vmalloc.h>
+#include <asm/module.h>
+#include <asm/uaccess.h>
+
+/* FIXME: We don't do .init separately.  To do this, we'd need to have
+   a separate r2 value in the init and core section, and stub between
+   them, too.
+
+   Using a magic allocator which places modules within 32MB solves
+   this, and makes other things simpler.  Anton?
+   --RR.  */
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(fmt , ...)
+#endif
+
+/* There's actually a third entry here, but it's unused */
+struct ppc64_opd_entry
+{
+       unsigned long funcaddr;
+       unsigned long r2;
+};
+
+/* Like PPC32, we need little trampolines to do > 24-bit jumps (into
+   the kernel itself).  But on PPC64, these need to be used for every
+   jump, actually, to reset r2 (TOC+0x8000). */
+struct ppc64_stub_entry
+{
+       /* 28 byte jump instruction sequence (7 instructions) */
+       unsigned char jump[28];
+       unsigned char unused[4];
+       /* Data for the above code */
+       struct ppc64_opd_entry opd;
+};
+
+/* We use a stub to fix up r2 (TOC ptr) and to jump to the (external)
+   function which may be more than 24-bits away.  We could simply
+   patch the new r2 value and function pointer into the stub, but it's
+   significantly shorter to put these values at the end of the stub
+   code, and patch the stub address (32-bits relative to the TOC ptr,
+   r2) into the stub. */
+static struct ppc64_stub_entry ppc64_stub =
+{ .jump = {
+       0x3d, 0x82, 0x00, 0x00, /* addis   r12,r2, <high> */
+       0x39, 0x8c, 0x00, 0x00, /* addi    r12,r12, <low> */
+       /* Save current r2 value in magic place on the stack. */
+       0xf8, 0x41, 0x00, 0x28, /* std     r2,40(r1) */
+       0xe9, 0x6c, 0x00, 0x20, /* ld      r11,32(r12) */
+       0xe8, 0x4c, 0x00, 0x28, /* ld      r2,40(r12) */
+       0x7d, 0x69, 0x03, 0xa6, /* mtctr   r11 */
+       0x4e, 0x80, 0x04, 0x20  /* bctr */
+} };
+
+/* Count how many different 24-bit relocations (different symbol,
+   different addend) */
+static unsigned int count_relocs(const Elf64_Rela *rela, unsigned int num)
+{
+       unsigned int i, j, ret = 0;
+
+       /* FIXME: Only count external ones --RR */
+       /* Sure, this is order(n^2), but it's usually short, and not
+           time critical */
+       for (i = 0; i < num; i++) {
+               /* Only count 24-bit relocs, others don't need stubs */
+               if (ELF64_R_TYPE(rela[i].r_info) != R_PPC_REL24)
+                       continue;
+               for (j = 0; j < i; j++) {
+                       /* If this addend appeared before, it's
+                           already been counted */
+                       if (rela[i].r_info == rela[j].r_info
+                           && rela[i].r_addend == rela[j].r_addend)
+                               break;
+               }
+               if (j == i) ret++;
+       }
+       return ret;
+}
+
+void *module_alloc(unsigned long size)
+{
+       if (size == 0)
+               return NULL;
+
+       return vmalloc_exec(size);
+}
+
+/* Free memory returned from module_alloc */
+void module_free(struct module *mod, void *module_region)
+{
+       vfree(module_region);
+       /* FIXME: If module_region == mod->init_region, trim exception
+           table entries. */
+}
+
+/* Get size of potential trampolines required. */
+static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
+                                   const Elf64_Shdr *sechdrs)
+{
+       /* One extra reloc so it's always 0-funcaddr terminated */
+       unsigned long relocs = 1;
+       unsigned i;
+
+       /* Every relocated section... */
+       for (i = 1; i < hdr->e_shnum; i++) {
+               if (sechdrs[i].sh_type == SHT_RELA) {
+                       DEBUGP("Found relocations in section %u\n", i);
+                       DEBUGP("Ptr: %p.  Number: %lu\n",
+                              (void *)sechdrs[i].sh_addr,
+                              sechdrs[i].sh_size / sizeof(Elf64_Rela));
+                       relocs += count_relocs((void *)sechdrs[i].sh_addr,
+                                              sechdrs[i].sh_size
+                                              / sizeof(Elf64_Rela));
+               }
+       }
+
+       DEBUGP("Looks like a total of %lu stubs, max\n", relocs);
+       return relocs * sizeof(struct ppc64_stub_entry);
+}
+
+static void dedotify_versions(struct modversion_info *vers,
+                             unsigned long size)
+{
+       struct modversion_info *end;
+
+       for (end = (void *)vers + size; vers < end; vers++)
+               if (vers->name[0] == '.')
+                       memmove(vers->name, vers->name+1, strlen(vers->name));
+}
+
+/* Undefined symbols which refer to .funcname, hack to funcname */
+static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab)
+{
+       unsigned int i;
+
+       for (i = 1; i < numsyms; i++) {
+               if (syms[i].st_shndx == SHN_UNDEF) {
+                       char *name = strtab + syms[i].st_name;
+                       if (name[0] == '.')
+                               memmove(name, name+1, strlen(name));
+               }
+       }
+}
+
+int module_frob_arch_sections(Elf64_Ehdr *hdr,
+                             Elf64_Shdr *sechdrs,
+                             char *secstrings,
+                             struct module *me)
+{
+       unsigned int i;
+
+       /* Find .toc and .stubs sections, symtab and strtab */
+       for (i = 1; i < hdr->e_shnum; i++) {
+               char *p;
+               if (strcmp(secstrings + sechdrs[i].sh_name, ".stubs") == 0)
+                       me->arch.stubs_section = i;
+               else if (strcmp(secstrings + sechdrs[i].sh_name, ".toc") == 0)
+                       me->arch.toc_section = i;
+               else if (strcmp(secstrings+sechdrs[i].sh_name,"__versions")==0)
+                       dedotify_versions((void *)hdr + sechdrs[i].sh_offset,
+                                         sechdrs[i].sh_size);
+
+               /* We don't handle .init for the moment: rename to _init */
+               while ((p = strstr(secstrings + sechdrs[i].sh_name, ".init")))
+                       p[0] = '_';
+
+               if (sechdrs[i].sh_type == SHT_SYMTAB)
+                       dedotify((void *)hdr + sechdrs[i].sh_offset,
+                                sechdrs[i].sh_size / sizeof(Elf64_Sym),
+                                (void *)hdr
+                                + sechdrs[sechdrs[i].sh_link].sh_offset);
+       }
+       if (!me->arch.stubs_section || !me->arch.toc_section) {
+               printk("%s: doesn't contain .toc or .stubs.\n", me->name);
+               return -ENOEXEC;
+       }
+
+       /* Override the stubs size */
+       sechdrs[me->arch.stubs_section].sh_size = get_stubs_size(hdr, sechdrs);
+       return 0;
+}
+
+int apply_relocate(Elf64_Shdr *sechdrs,
+                  const char *strtab,
+                  unsigned int symindex,
+                  unsigned int relsec,
+                  struct module *me)
+{
+       printk(KERN_ERR "%s: Non-ADD RELOCATION unsupported\n", me->name);
+       return -ENOEXEC;
+}
+
+/* r2 is the TOC pointer: it actually points 0x8000 into the TOC (this
+   gives the value maximum span in an instruction which uses a signed
+   offset) */
+static inline unsigned long my_r2(Elf64_Shdr *sechdrs, struct module *me)
+{
+       return sechdrs[me->arch.toc_section].sh_addr + 0x8000;
+}
+
+/* Both low and high 16 bits are added as SIGNED additions, so if low
+   16 bits has high bit set, high 16 bits must be adjusted.  These
+   macros do that (stolen from binutils). */
+#define PPC_LO(v) ((v) & 0xffff)
+#define PPC_HI(v) (((v) >> 16) & 0xffff)
+#define PPC_HA(v) PPC_HI ((v) + 0x8000)
+
+/* Patch stub to reference function and correct r2 value. */
+static inline int create_stub(Elf64_Shdr *sechdrs,
+                             struct ppc64_stub_entry *entry,
+                             struct ppc64_opd_entry *opd,
+                             struct module *me)
+{
+       Elf64_Half *loc1, *loc2;
+       long reladdr;
+
+       *entry = ppc64_stub;
+
+       loc1 = (Elf64_Half *)&entry->jump[2];
+       loc2 = (Elf64_Half *)&entry->jump[6];
+
+       /* Stub uses address relative to r2. */
+       reladdr = (unsigned long)entry - my_r2(sechdrs, me);
+       if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
+               printk("%s: Address %p of stub out of range of %p.\n",
+                      me->name, (void *)reladdr, (void *)my_r2);
+               return 0;
+       }
+       DEBUGP("Stub %p get data from reladdr %li\n", entry, reladdr);
+
+       *loc1 = PPC_HA(reladdr);
+       *loc2 = PPC_LO(reladdr);
+       entry->opd.funcaddr = opd->funcaddr;
+       entry->opd.r2 = opd->r2;
+       return 1;
+}
+
+/* Create stub to jump to function described in this OPD: we need the
+   stub to set up the TOC ptr (r2) for the function. */
+static unsigned long stub_for_addr(Elf64_Shdr *sechdrs,
+                                  unsigned long opdaddr,
+                                  struct module *me)
+{
+       struct ppc64_stub_entry *stubs;
+       struct ppc64_opd_entry *opd = (void *)opdaddr;
+       unsigned int i, num_stubs;
+
+       num_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*stubs);
+
+       /* Find this stub, or if that fails, the next avail. entry */
+       stubs = (void *)sechdrs[me->arch.stubs_section].sh_addr;
+       for (i = 0; stubs[i].opd.funcaddr; i++) {
+               BUG_ON(i >= num_stubs);
+
+               if (stubs[i].opd.funcaddr == opd->funcaddr)
+                       return (unsigned long)&stubs[i];
+       }
+
+       if (!create_stub(sechdrs, &stubs[i], opd, me))
+               return 0;
+
+       return (unsigned long)&stubs[i];
+}
+
+/* We expect a noop next: if it is, replace it with instruction to
+   restore r2. */
+static int restore_r2(u32 *instruction, struct module *me)
+{
+       if (*instruction != 0x60000000) {
+               printk("%s: Expect noop after relocate, got %08x\n",
+                      me->name, *instruction);
+               return 0;
+       }
+       *instruction = 0xe8410028;      /* ld r2,40(r1) */
+       return 1;
+}
+
+int apply_relocate_add(Elf64_Shdr *sechdrs,
+                      const char *strtab,
+                      unsigned int symindex,
+                      unsigned int relsec,
+                      struct module *me)
+{
+       unsigned int i;
+       Elf64_Rela *rela = (void *)sechdrs[relsec].sh_addr;
+       Elf64_Sym *sym;
+       unsigned long *location;
+       unsigned long value;
+
+       DEBUGP("Applying ADD relocate section %u to %u\n", relsec,
+              sechdrs[relsec].sh_info);
+       for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
+               /* This is where to make the change */
+               location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+                       + rela[i].r_offset;
+               /* This is the symbol it is referring to */
+               sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
+                       + ELF64_R_SYM(rela[i].r_info);
+
+               DEBUGP("RELOC at %p: %li-type as %s (%lu) + %li\n",
+                      location, (long)ELF64_R_TYPE(rela[i].r_info),
+                      strtab + sym->st_name, (unsigned long)sym->st_value,
+                      (long)rela[i].r_addend);
+
+               /* `Everything is relative'. */
+               value = sym->st_value + rela[i].r_addend;
+
+               switch (ELF64_R_TYPE(rela[i].r_info)) {
+               case R_PPC64_ADDR32:
+                       /* Simply set it */
+                       *(u32 *)location = value;
+                       break;
+                       
+               case R_PPC64_ADDR64:
+                       /* Simply set it */
+                       *(unsigned long *)location = value;
+                       break;
+
+               case R_PPC64_TOC:
+                       *(unsigned long *)location = my_r2(sechdrs, me);
+                       break;
+
+               case R_PPC64_TOC16:
+                       /* Subtact TOC pointer */
+                       value -= my_r2(sechdrs, me);
+                       if (value + 0x8000 > 0xffff) {
+                               printk("%s: bad TOC16 relocation (%lu)\n",
+                                      me->name, value);
+                               return -ENOEXEC;
+                       }
+                       *((uint16_t *) location)
+                               = (*((uint16_t *) location) & ~0xffff)
+                               | (value & 0xffff);
+                       break;
+
+               case R_PPC64_TOC16_DS:
+                       /* Subtact TOC pointer */
+                       value -= my_r2(sechdrs, me);
+                       if ((value & 3) != 0 || value + 0x8000 > 0xffff) {
+                               printk("%s: bad TOC16_DS relocation (%lu)\n",
+                                      me->name, value);
+                               return -ENOEXEC;
+                       }
+                       *((uint16_t *) location)
+                               = (*((uint16_t *) location) & ~0xfffc)
+                               | (value & 0xfffc);
+                       break;
+
+               case R_PPC_REL24:
+                       /* FIXME: Handle weak symbols here --RR */
+                       if (sym->st_shndx == SHN_UNDEF) {
+                               /* External: go via stub */
+                               value = stub_for_addr(sechdrs, value, me);
+                               if (!value)
+                                       return -ENOENT;
+                               if (!restore_r2((u32 *)location + 1, me))
+                                       return -ENOEXEC;
+                       }
+
+                       /* Convert value to relative */
+                       value -= (unsigned long)location;
+                       if (value + 0x2000000 > 0x3ffffff || (value & 3) != 0){
+                               printk("%s: REL24 %li out of range!\n",
+                                      me->name, (long int)value);
+                               return -ENOEXEC;
+                       }
+
+                       /* Only replace bits 2 through 26 */
+                       *(uint32_t *)location 
+                               = (*(uint32_t *)location & ~0x03fffffc)
+                               | (value & 0x03fffffc);
+                       break;
+
+               default:
+                       printk("%s: Unknown ADD relocation: %lu\n",
+                              me->name,
+                              (unsigned long)ELF64_R_TYPE(rela[i].r_info));
+                       return -ENOEXEC;
+               }
+       }
+
+       return 0;
+}
+
+LIST_HEAD(module_bug_list);
+
+int module_finalize(const Elf_Ehdr *hdr,
+               const Elf_Shdr *sechdrs, struct module *me)
+{
+       char *secstrings;
+       unsigned int i;
+
+       me->arch.bug_table = NULL;
+       me->arch.num_bugs = 0;
+
+       /* Find the __bug_table section, if present */
+       secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+       for (i = 1; i < hdr->e_shnum; i++) {
+               if (strcmp(secstrings+sechdrs[i].sh_name, "__bug_table"))
+                       continue;
+               me->arch.bug_table = (void *) sechdrs[i].sh_addr;
+               me->arch.num_bugs = sechdrs[i].sh_size / sizeof(struct bug_entry);
+               break;
+       }
+
+       /*
+        * Strictly speaking this should have a spinlock to protect against
+        * traversals, but since we only traverse on BUG()s, a spinlock
+        * could potentially lead to deadlock and thus be counter-productive.
+        */
+       list_add(&me->arch.bug_list, &module_bug_list);
+
+       return 0;
+}
+
+void module_arch_cleanup(struct module *mod)
+{
+       list_del(&mod->arch.bug_list);
+}
+
+struct bug_entry *module_find_bug(unsigned long bugaddr)
+{
+       struct mod_arch_specific *mod;
+       unsigned int i;
+       struct bug_entry *bug;
+
+       list_for_each_entry(mod, &module_bug_list, bug_list) {
+               bug = mod->bug_table;
+               for (i = 0; i < mod->num_bugs; ++i, ++bug)
+                       if (bugaddr == bug->bug_addr)
+                               return bug;
+       }
+       return NULL;
+}
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
new file mode 100644 (file)
index 0000000..3cef1b8
--- /dev/null
@@ -0,0 +1,1319 @@
+/*
+ * Port for PPC64 David Engebretsen, IBM Corp.
+ * Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
+ * 
+ * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
+ *   Rework, based on alpha PCI code.
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+#undef DEBUG
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/mm.h>
+#include <linux/list.h>
+#include <linux/syscalls.h>
+
+#include <asm/processor.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/pci-bridge.h>
+#include <asm/byteorder.h>
+#include <asm/irq.h>
+#include <asm/machdep.h>
+#include <asm/udbg.h>
+#include <asm/ppc-pci.h>
+
+#ifdef DEBUG
+#define DBG(fmt...) udbg_printf(fmt)
+#else
+#define DBG(fmt...)
+#endif
+
+unsigned long pci_probe_only = 1;
+unsigned long pci_assign_all_buses = 0;
+
+/*
+ * legal IO pages under MAX_ISA_PORT.  This is to ensure we don't touch
+ * devices we don't have access to.
+ */
+unsigned long io_page_mask;
+
+EXPORT_SYMBOL(io_page_mask);
+
+#ifdef CONFIG_PPC_MULTIPLATFORM
+static void fixup_resource(struct resource *res, struct pci_dev *dev);
+static void do_bus_setup(struct pci_bus *bus);
+#endif
+
+unsigned int pcibios_assign_all_busses(void)
+{
+       return pci_assign_all_buses;
+}
+
+/* pci_io_base -- the base address from which io bars are offsets.
+ * This is the lowest I/O base address (so bar values are always positive),
+ * and it *must* be the start of ISA space if an ISA bus exists because
+ * ISA drivers use hard coded offsets.  If no ISA bus exists a dummy
+ * page is mapped and isa_io_limit prevents access to it.
+ */
+unsigned long isa_io_base;     /* NULL if no ISA bus */
+EXPORT_SYMBOL(isa_io_base);
+unsigned long pci_io_base;
+EXPORT_SYMBOL(pci_io_base);
+
+void iSeries_pcibios_init(void);
+
+LIST_HEAD(hose_list);
+
+struct dma_mapping_ops pci_dma_ops;
+EXPORT_SYMBOL(pci_dma_ops);
+
+int global_phb_number;         /* Global phb counter */
+
+/* Cached ISA bridge dev. */
+struct pci_dev *ppc64_isabridge_dev = NULL;
+
+static void fixup_broken_pcnet32(struct pci_dev* dev)
+{
+       if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) {
+               dev->vendor = PCI_VENDOR_ID_AMD;
+               pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD);
+       }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32);
+
+void  pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
+                             struct resource *res)
+{
+       unsigned long offset = 0;
+       struct pci_controller *hose = pci_bus_to_host(dev->bus);
+
+       if (!hose)
+               return;
+
+       if (res->flags & IORESOURCE_IO)
+               offset = (unsigned long)hose->io_base_virt - pci_io_base;
+
+       if (res->flags & IORESOURCE_MEM)
+               offset = hose->pci_mem_offset;
+
+       region->start = res->start - offset;
+       region->end = res->end - offset;
+}
+
+void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
+                             struct pci_bus_region *region)
+{
+       unsigned long offset = 0;
+       struct pci_controller *hose = pci_bus_to_host(dev->bus);
+
+       if (!hose)
+               return;
+
+       if (res->flags & IORESOURCE_IO)
+               offset = (unsigned long)hose->io_base_virt - pci_io_base;
+
+       if (res->flags & IORESOURCE_MEM)
+               offset = hose->pci_mem_offset;
+
+       res->start = region->start + offset;
+       res->end = region->end + offset;
+}
+
+#ifdef CONFIG_HOTPLUG
+EXPORT_SYMBOL(pcibios_resource_to_bus);
+EXPORT_SYMBOL(pcibios_bus_to_resource);
+#endif
+
+/*
+ * We need to avoid collisions with `mirrored' VGA ports
+ * and other strange ISA hardware, so we always want the
+ * addresses to be allocated in the 0x000-0x0ff region
+ * modulo 0x400.
+ *
+ * Why? Because some silly external IO cards only decode
+ * the low 10 bits of the IO address. The 0x00-0xff region
+ * is reserved for motherboard devices that decode all 16
+ * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
+ * but we want to try to avoid allocating at 0x2900-0x2bff
+ * which might have be mirrored at 0x0100-0x03ff..
+ */
+void pcibios_align_resource(void *data, struct resource *res,
+                           unsigned long size, unsigned long align)
+{
+       struct pci_dev *dev = data;
+       struct pci_controller *hose = pci_bus_to_host(dev->bus);
+       unsigned long start = res->start;
+       unsigned long alignto;
+
+       if (res->flags & IORESOURCE_IO) {
+               unsigned long offset = (unsigned long)hose->io_base_virt -
+                                       pci_io_base;
+               /* Make sure we start at our min on all hoses */
+               if (start - offset < PCIBIOS_MIN_IO)
+                       start = PCIBIOS_MIN_IO + offset;
+
+               /*
+                * Put everything into 0x00-0xff region modulo 0x400
+                */
+               if (start & 0x300)
+                       start = (start + 0x3ff) & ~0x3ff;
+
+       } else if (res->flags & IORESOURCE_MEM) {
+               /* Make sure we start at our min on all hoses */
+               if (start - hose->pci_mem_offset < PCIBIOS_MIN_MEM)
+                       start = PCIBIOS_MIN_MEM + hose->pci_mem_offset;
+
+               /* Align to multiple of size of minimum base.  */
+               alignto = max(0x1000UL, align);
+               start = ALIGN(start, alignto);
+       }
+
+       res->start = start;
+}
+
+static DEFINE_SPINLOCK(hose_spinlock);
+
+/*
+ * pci_controller(phb) initialized common variables.
+ */
+void __devinit pci_setup_pci_controller(struct pci_controller *hose)
+{
+       memset(hose, 0, sizeof(struct pci_controller));
+
+       spin_lock(&hose_spinlock);
+       hose->global_number = global_phb_number++;
+       list_add_tail(&hose->list_node, &hose_list);
+       spin_unlock(&hose_spinlock);
+}
+
+static void __init pcibios_claim_one_bus(struct pci_bus *b)
+{
+       struct pci_dev *dev;
+       struct pci_bus *child_bus;
+
+       list_for_each_entry(dev, &b->devices, bus_list) {
+               int i;
+
+               for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+                       struct resource *r = &dev->resource[i];
+
+                       if (r->parent || !r->start || !r->flags)
+                               continue;
+                       pci_claim_resource(dev, i);
+               }
+       }
+
+       list_for_each_entry(child_bus, &b->children, node)
+               pcibios_claim_one_bus(child_bus);
+}
+
+#ifndef CONFIG_PPC_ISERIES
+static void __init pcibios_claim_of_setup(void)
+{
+       struct pci_bus *b;
+
+       list_for_each_entry(b, &pci_root_buses, node)
+               pcibios_claim_one_bus(b);
+}
+#endif
+
+#ifdef CONFIG_PPC_MULTIPLATFORM
+static u32 get_int_prop(struct device_node *np, const char *name, u32 def)
+{
+       u32 *prop;
+       int len;
+
+       prop = (u32 *) get_property(np, name, &len);
+       if (prop && len >= 4)
+               return *prop;
+       return def;
+}
+
+static unsigned int pci_parse_of_flags(u32 addr0)
+{
+       unsigned int flags = 0;
+
+       if (addr0 & 0x02000000) {
+               flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY;
+               flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64;
+               flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M;
+               if (addr0 & 0x40000000)
+                       flags |= IORESOURCE_PREFETCH
+                                | PCI_BASE_ADDRESS_MEM_PREFETCH;
+       } else if (addr0 & 0x01000000)
+               flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO;
+       return flags;
+}
+
+#define GET_64BIT(prop, i)     ((((u64) (prop)[(i)]) << 32) | (prop)[(i)+1])
+
+static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev)
+{
+       u64 base, size;
+       unsigned int flags;
+       struct resource *res;
+       u32 *addrs, i;
+       int proplen;
+
+       addrs = (u32 *) get_property(node, "assigned-addresses", &proplen);
+       if (!addrs)
+               return;
+       for (; proplen >= 20; proplen -= 20, addrs += 5) {
+               flags = pci_parse_of_flags(addrs[0]);
+               if (!flags)
+                       continue;
+               base = GET_64BIT(addrs, 1);
+               size = GET_64BIT(addrs, 3);
+               if (!size)
+                       continue;
+               i = addrs[0] & 0xff;
+               if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) {
+                       res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
+               } else if (i == dev->rom_base_reg) {
+                       res = &dev->resource[PCI_ROM_RESOURCE];
+                       flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE;
+               } else {
+                       printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i);
+                       continue;
+               }
+               res->start = base;
+               res->end = base + size - 1;
+               res->flags = flags;
+               res->name = pci_name(dev);
+               fixup_resource(res, dev);
+       }
+}
+
+struct pci_dev *of_create_pci_dev(struct device_node *node,
+                                struct pci_bus *bus, int devfn)
+{
+       struct pci_dev *dev;
+       const char *type;
+
+       dev = kmalloc(sizeof(struct pci_dev), GFP_KERNEL);
+       if (!dev)
+               return NULL;
+       type = get_property(node, "device_type", NULL);
+       if (type == NULL)
+               type = "";
+
+       memset(dev, 0, sizeof(struct pci_dev));
+       dev->bus = bus;
+       dev->sysdata = node;
+       dev->dev.parent = bus->bridge;
+       dev->dev.bus = &pci_bus_type;
+       dev->devfn = devfn;
+       dev->multifunction = 0;         /* maybe a lie? */
+
+       dev->vendor = get_int_prop(node, "vendor-id", 0xffff);
+       dev->device = get_int_prop(node, "device-id", 0xffff);
+       dev->subsystem_vendor = get_int_prop(node, "subsystem-vendor-id", 0);
+       dev->subsystem_device = get_int_prop(node, "subsystem-id", 0);
+
+       dev->cfg_size = 256; /*pci_cfg_space_size(dev);*/
+
+       sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus),
+               dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
+       dev->class = get_int_prop(node, "class-code", 0);
+
+       dev->current_state = 4;         /* unknown power state */
+
+       if (!strcmp(type, "pci")) {
+               /* a PCI-PCI bridge */
+               dev->hdr_type = PCI_HEADER_TYPE_BRIDGE;
+               dev->rom_base_reg = PCI_ROM_ADDRESS1;
+       } else if (!strcmp(type, "cardbus")) {
+               dev->hdr_type = PCI_HEADER_TYPE_CARDBUS;
+       } else {
+               dev->hdr_type = PCI_HEADER_TYPE_NORMAL;
+               dev->rom_base_reg = PCI_ROM_ADDRESS;
+               dev->irq = NO_IRQ;
+               if (node->n_intrs > 0) {
+                       dev->irq = node->intrs[0].line;
+                       pci_write_config_byte(dev, PCI_INTERRUPT_LINE,
+                                             dev->irq);
+               }
+       }
+
+       pci_parse_of_addrs(node, dev);
+
+       pci_device_add(dev, bus);
+
+       /* XXX pci_scan_msi_device(dev); */
+
+       return dev;
+}
+EXPORT_SYMBOL(of_create_pci_dev);
+
+void __devinit of_scan_bus(struct device_node *node,
+                                 struct pci_bus *bus)
+{
+       struct device_node *child = NULL;
+       u32 *reg;
+       int reglen, devfn;
+       struct pci_dev *dev;
+
+       while ((child = of_get_next_child(node, child)) != NULL) {
+               reg = (u32 *) get_property(child, "reg", &reglen);
+               if (reg == NULL || reglen < 20)
+                       continue;
+               devfn = (reg[0] >> 8) & 0xff;
+               /* create a new pci_dev for this device */
+               dev = of_create_pci_dev(child, bus, devfn);
+               if (!dev)
+                       continue;
+               if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
+                   dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
+                       of_scan_pci_bridge(child, dev);
+       }
+
+       do_bus_setup(bus);
+}
+EXPORT_SYMBOL(of_scan_bus);
+
+void __devinit of_scan_pci_bridge(struct device_node *node,
+                               struct pci_dev *dev)
+{
+       struct pci_bus *bus;
+       u32 *busrange, *ranges;
+       int len, i, mode;
+       struct resource *res;
+       unsigned int flags;
+       u64 size;
+
+       /* parse bus-range property */
+       busrange = (u32 *) get_property(node, "bus-range", &len);
+       if (busrange == NULL || len != 8) {
+               printk(KERN_ERR "Can't get bus-range for PCI-PCI bridge %s\n",
+                      node->full_name);
+               return;
+       }
+       ranges = (u32 *) get_property(node, "ranges", &len);
+       if (ranges == NULL) {
+               printk(KERN_ERR "Can't get ranges for PCI-PCI bridge %s\n",
+                      node->full_name);
+               return;
+       }
+
+       bus = pci_add_new_bus(dev->bus, dev, busrange[0]);
+       if (!bus) {
+               printk(KERN_ERR "Failed to create pci bus for %s\n",
+                      node->full_name);
+               return;
+       }
+
+       bus->primary = dev->bus->number;
+       bus->subordinate = busrange[1];
+       bus->bridge_ctl = 0;
+       bus->sysdata = node;
+
+       /* parse ranges property */
+       /* PCI #address-cells == 3 and #size-cells == 2 always */
+       res = &dev->resource[PCI_BRIDGE_RESOURCES];
+       for (i = 0; i < PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES; ++i) {
+               res->flags = 0;
+               bus->resource[i] = res;
+               ++res;
+       }
+       i = 1;
+       for (; len >= 32; len -= 32, ranges += 8) {
+               flags = pci_parse_of_flags(ranges[0]);
+               size = GET_64BIT(ranges, 6);
+               if (flags == 0 || size == 0)
+                       continue;
+               if (flags & IORESOURCE_IO) {
+                       res = bus->resource[0];
+                       if (res->flags) {
+                               printk(KERN_ERR "PCI: ignoring extra I/O range"
+                                      " for bridge %s\n", node->full_name);
+                               continue;
+                       }
+               } else {
+                       if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) {
+                               printk(KERN_ERR "PCI: too many memory ranges"
+                                      " for bridge %s\n", node->full_name);
+                               continue;
+                       }
+                       res = bus->resource[i];
+                       ++i;
+               }
+               res->start = GET_64BIT(ranges, 1);
+               res->end = res->start + size - 1;
+               res->flags = flags;
+               fixup_resource(res, dev);
+       }
+       sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
+               bus->number);
+
+       mode = PCI_PROBE_NORMAL;
+       if (ppc_md.pci_probe_mode)
+               mode = ppc_md.pci_probe_mode(bus);
+       if (mode == PCI_PROBE_DEVTREE)
+               of_scan_bus(node, bus);
+       else if (mode == PCI_PROBE_NORMAL)
+               pci_scan_child_bus(bus);
+}
+EXPORT_SYMBOL(of_scan_pci_bridge);
+#endif /* CONFIG_PPC_MULTIPLATFORM */
+
+void __devinit scan_phb(struct pci_controller *hose)
+{
+       struct pci_bus *bus;
+       struct device_node *node = hose->arch_data;
+       int i, mode;
+       struct resource *res;
+
+       bus = pci_create_bus(NULL, hose->first_busno, hose->ops, node);
+       if (bus == NULL) {
+               printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",
+                      hose->global_number);
+               return;
+       }
+       bus->secondary = hose->first_busno;
+       hose->bus = bus;
+
+       bus->resource[0] = res = &hose->io_resource;
+       if (res->flags && request_resource(&ioport_resource, res))
+               printk(KERN_ERR "Failed to request PCI IO region "
+                      "on PCI domain %04x\n", hose->global_number);
+
+       for (i = 0; i < 3; ++i) {
+               res = &hose->mem_resources[i];
+               bus->resource[i+1] = res;
+               if (res->flags && request_resource(&iomem_resource, res))
+                       printk(KERN_ERR "Failed to request PCI memory region "
+                              "on PCI domain %04x\n", hose->global_number);
+       }
+
+       mode = PCI_PROBE_NORMAL;
+#ifdef CONFIG_PPC_MULTIPLATFORM
+       if (ppc_md.pci_probe_mode)
+               mode = ppc_md.pci_probe_mode(bus);
+       if (mode == PCI_PROBE_DEVTREE) {
+               bus->subordinate = hose->last_busno;
+               of_scan_bus(node, bus);
+       }
+#endif /* CONFIG_PPC_MULTIPLATFORM */
+       if (mode == PCI_PROBE_NORMAL)
+               hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
+       pci_bus_add_devices(bus);
+}
+
+static int __init pcibios_init(void)
+{
+       struct pci_controller *hose, *tmp;
+
+       /* For now, override phys_mem_access_prot. If we need it,
+        * later, we may move that initialization to each ppc_md
+        */
+       ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot;
+
+#ifdef CONFIG_PPC_ISERIES
+       iSeries_pcibios_init(); 
+#endif
+
+       printk("PCI: Probing PCI hardware\n");
+
+       /* Scan all of the recorded PCI controllers.  */
+       list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
+               scan_phb(hose);
+
+#ifndef CONFIG_PPC_ISERIES
+       if (pci_probe_only)
+               pcibios_claim_of_setup();
+       else
+               /* FIXME: `else' will be removed when
+                  pci_assign_unassigned_resources() is able to work
+                  correctly with [partially] allocated PCI tree. */
+               pci_assign_unassigned_resources();
+#endif /* !CONFIG_PPC_ISERIES */
+
+       /* Call machine dependent final fixup */
+       if (ppc_md.pcibios_fixup)
+               ppc_md.pcibios_fixup();
+
+       /* Cache the location of the ISA bridge (if we have one) */
+       ppc64_isabridge_dev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
+       if (ppc64_isabridge_dev != NULL)
+               printk("ISA bridge at %s\n", pci_name(ppc64_isabridge_dev));
+
+#ifdef CONFIG_PPC_MULTIPLATFORM
+       /* map in PCI I/O space */
+       phbs_remap_io();
+#endif
+
+       printk("PCI: Probing PCI hardware done\n");
+
+       return 0;
+}
+
+subsys_initcall(pcibios_init);
+
+char __init *pcibios_setup(char *str)
+{
+       return str;
+}
+
+int pcibios_enable_device(struct pci_dev *dev, int mask)
+{
+       u16 cmd, oldcmd;
+       int i;
+
+       pci_read_config_word(dev, PCI_COMMAND, &cmd);
+       oldcmd = cmd;
+
+       for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+               struct resource *res = &dev->resource[i];
+
+               /* Only set up the requested stuff */
+               if (!(mask & (1<<i)))
+                       continue;
+
+               if (res->flags & IORESOURCE_IO)
+                       cmd |= PCI_COMMAND_IO;
+               if (res->flags & IORESOURCE_MEM)
+                       cmd |= PCI_COMMAND_MEMORY;
+       }
+
+       if (cmd != oldcmd) {
+               printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n",
+                      pci_name(dev), cmd);
+                /* Enable the appropriate bits in the PCI command register.  */
+               pci_write_config_word(dev, PCI_COMMAND, cmd);
+       }
+       return 0;
+}
+
+/*
+ * Return the domain number for this bus.
+ */
+int pci_domain_nr(struct pci_bus *bus)
+{
+#ifdef CONFIG_PPC_ISERIES
+       return 0;
+#else
+       struct pci_controller *hose = pci_bus_to_host(bus);
+
+       return hose->global_number;
+#endif
+}
+
+EXPORT_SYMBOL(pci_domain_nr);
+
+/* Decide whether to display the domain number in /proc */
+int pci_proc_domain(struct pci_bus *bus)
+{
+#ifdef CONFIG_PPC_ISERIES
+       return 0;
+#else
+       struct pci_controller *hose = pci_bus_to_host(bus);
+       return hose->buid;
+#endif
+}
+
+/*
+ * Platform support for /proc/bus/pci/X/Y mmap()s,
+ * modelled on the sparc64 implementation by Dave Miller.
+ *  -- paulus.
+ */
+
+/*
+ * Adjust vm_pgoff of VMA such that it is the physical page offset
+ * corresponding to the 32-bit pci bus offset for DEV requested by the user.
+ *
+ * Basically, the user finds the base address for his device which he wishes
+ * to mmap.  They read the 32-bit value from the config space base register,
+ * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
+ * offset parameter of mmap on /proc/bus/pci/XXX for that device.
+ *
+ * Returns negative error code on failure, zero on success.
+ */
+static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
+                                              unsigned long *offset,
+                                              enum pci_mmap_state mmap_state)
+{
+       struct pci_controller *hose = pci_bus_to_host(dev->bus);
+       unsigned long io_offset = 0;
+       int i, res_bit;
+
+       if (hose == 0)
+               return NULL;            /* should never happen */
+
+       /* If memory, add on the PCI bridge address offset */
+       if (mmap_state == pci_mmap_mem) {
+               *offset += hose->pci_mem_offset;
+               res_bit = IORESOURCE_MEM;
+       } else {
+               io_offset = (unsigned long)hose->io_base_virt - pci_io_base;
+               *offset += io_offset;
+               res_bit = IORESOURCE_IO;
+       }
+
+       /*
+        * Check that the offset requested corresponds to one of the
+        * resources of the device.
+        */
+       for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
+               struct resource *rp = &dev->resource[i];
+               int flags = rp->flags;
+
+               /* treat ROM as memory (should be already) */
+               if (i == PCI_ROM_RESOURCE)
+                       flags |= IORESOURCE_MEM;
+
+               /* Active and same type? */
+               if ((flags & res_bit) == 0)
+                       continue;
+
+               /* In the range of this resource? */
+               if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end)
+                       continue;
+
+               /* found it! construct the final physical address */
+               if (mmap_state == pci_mmap_io)
+                       *offset += hose->io_base_phys - io_offset;
+               return rp;
+       }
+
+       return NULL;
+}
+
+/*
+ * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
+ * device mapping.
+ */
+static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
+                                     pgprot_t protection,
+                                     enum pci_mmap_state mmap_state,
+                                     int write_combine)
+{
+       unsigned long prot = pgprot_val(protection);
+
+       /* Write combine is always 0 on non-memory space mappings. On
+        * memory space, if the user didn't pass 1, we check for a
+        * "prefetchable" resource. This is a bit hackish, but we use
+        * this to workaround the inability of /sysfs to provide a write
+        * combine bit
+        */
+       if (mmap_state != pci_mmap_mem)
+               write_combine = 0;
+       else if (write_combine == 0) {
+               if (rp->flags & IORESOURCE_PREFETCH)
+                       write_combine = 1;
+       }
+
+       /* XXX would be nice to have a way to ask for write-through */
+       prot |= _PAGE_NO_CACHE;
+       if (write_combine)
+               prot &= ~_PAGE_GUARDED;
+       else
+               prot |= _PAGE_GUARDED;
+
+       printk("PCI map for %s:%lx, prot: %lx\n", pci_name(dev), rp->start,
+              prot);
+
+       return __pgprot(prot);
+}
+
+/*
+ * This one is used by /dev/mem and fbdev who have no clue about the
+ * PCI device, it tries to find the PCI device first and calls the
+ * above routine
+ */
+pgprot_t pci_phys_mem_access_prot(struct file *file,
+                                 unsigned long pfn,
+                                 unsigned long size,
+                                 pgprot_t protection)
+{
+       struct pci_dev *pdev = NULL;
+       struct resource *found = NULL;
+       unsigned long prot = pgprot_val(protection);
+       unsigned long offset = pfn << PAGE_SHIFT;
+       int i;
+
+       if (page_is_ram(pfn))
+               return __pgprot(prot);
+
+       prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
+
+       for_each_pci_dev(pdev) {
+               for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
+                       struct resource *rp = &pdev->resource[i];
+                       int flags = rp->flags;
+
+                       /* Active and same type? */
+                       if ((flags & IORESOURCE_MEM) == 0)
+                               continue;
+                       /* In the range of this resource? */
+                       if (offset < (rp->start & PAGE_MASK) ||
+                           offset > rp->end)
+                               continue;
+                       found = rp;
+                       break;
+               }
+               if (found)
+                       break;
+       }
+       if (found) {
+               if (found->flags & IORESOURCE_PREFETCH)
+                       prot &= ~_PAGE_GUARDED;
+               pci_dev_put(pdev);
+       }
+
+       DBG("non-PCI map for %lx, prot: %lx\n", offset, prot);
+
+       return __pgprot(prot);
+}
+
+
+/*
+ * Perform the actual remap of the pages for a PCI device mapping, as
+ * appropriate for this architecture.  The region in the process to map
+ * is described by vm_start and vm_end members of VMA, the base physical
+ * address is found in vm_pgoff.
+ * The pci device structure is provided so that architectures may make mapping
+ * decisions on a per-device or per-bus basis.
+ *
+ * Returns a negative error code on failure, zero on success.
+ */
+int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+                       enum pci_mmap_state mmap_state,
+                       int write_combine)
+{
+       unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+       struct resource *rp;
+       int ret;
+
+       rp = __pci_mmap_make_offset(dev, &offset, mmap_state);
+       if (rp == NULL)
+               return -EINVAL;
+
+       vma->vm_pgoff = offset >> PAGE_SHIFT;
+       vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO;
+       vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
+                                                 vma->vm_page_prot,
+                                                 mmap_state, write_combine);
+
+       ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+                              vma->vm_end - vma->vm_start, vma->vm_page_prot);
+
+       return ret;
+}
+
+#ifdef CONFIG_PPC_MULTIPLATFORM
+static ssize_t pci_show_devspec(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct pci_dev *pdev;
+       struct device_node *np;
+
+       pdev = to_pci_dev (dev);
+       np = pci_device_to_OF_node(pdev);
+       if (np == NULL || np->full_name == NULL)
+               return 0;
+       return sprintf(buf, "%s", np->full_name);
+}
+static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
+#endif /* CONFIG_PPC_MULTIPLATFORM */
+
+void pcibios_add_platform_entries(struct pci_dev *pdev)
+{
+#ifdef CONFIG_PPC_MULTIPLATFORM
+       device_create_file(&pdev->dev, &dev_attr_devspec);
+#endif /* CONFIG_PPC_MULTIPLATFORM */
+}
+
+#ifdef CONFIG_PPC_MULTIPLATFORM
+
+#define ISA_SPACE_MASK 0x1
+#define ISA_SPACE_IO 0x1
+
+static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node,
+                                     unsigned long phb_io_base_phys,
+                                     void __iomem * phb_io_base_virt)
+{
+       struct isa_range *range;
+       unsigned long pci_addr;
+       unsigned int isa_addr;
+       unsigned int size;
+       int rlen = 0;
+
+       range = (struct isa_range *) get_property(isa_node, "ranges", &rlen);
+       if (range == NULL || (rlen < sizeof(struct isa_range))) {
+               printk(KERN_ERR "no ISA ranges or unexpected isa range size,"
+                      "mapping 64k\n");
+               __ioremap_explicit(phb_io_base_phys,
+                                  (unsigned long)phb_io_base_virt,
+                                  0x10000, _PAGE_NO_CACHE | _PAGE_GUARDED);
+               return; 
+       }
+       
+       /* From "ISA Binding to 1275"
+        * The ranges property is laid out as an array of elements,
+        * each of which comprises:
+        *   cells 0 - 1:       an ISA address
+        *   cells 2 - 4:       a PCI address 
+        *                      (size depending on dev->n_addr_cells)
+        *   cell 5:            the size of the range
+        */
+       if ((range->isa_addr.a_hi && ISA_SPACE_MASK) == ISA_SPACE_IO) {
+               isa_addr = range->isa_addr.a_lo;
+               pci_addr = (unsigned long) range->pci_addr.a_mid << 32 | 
+                       range->pci_addr.a_lo;
+
+               /* Assume these are both zero */
+               if ((pci_addr != 0) || (isa_addr != 0)) {
+                       printk(KERN_ERR "unexpected isa to pci mapping: %s\n",
+                                       __FUNCTION__);
+                       return;
+               }
+               
+               size = PAGE_ALIGN(range->size);
+
+               __ioremap_explicit(phb_io_base_phys, 
+                                  (unsigned long) phb_io_base_virt, 
+                                  size, _PAGE_NO_CACHE | _PAGE_GUARDED);
+       }
+}
+
+void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
+                                           struct device_node *dev, int prim)
+{
+       unsigned int *ranges, pci_space;
+       unsigned long size;
+       int rlen = 0;
+       int memno = 0;
+       struct resource *res;
+       int np, na = prom_n_addr_cells(dev);
+       unsigned long pci_addr, cpu_phys_addr;
+
+       np = na + 5;
+
+       /* From "PCI Binding to 1275"
+        * The ranges property is laid out as an array of elements,
+        * each of which comprises:
+        *   cells 0 - 2:       a PCI address
+        *   cells 3 or 3+4:    a CPU physical address
+        *                      (size depending on dev->n_addr_cells)
+        *   cells 4+5 or 5+6:  the size of the range
+        */
+       rlen = 0;
+       hose->io_base_phys = 0;
+       ranges = (unsigned int *) get_property(dev, "ranges", &rlen);
+       while ((rlen -= np * sizeof(unsigned int)) >= 0) {
+               res = NULL;
+               pci_space = ranges[0];
+               pci_addr = ((unsigned long)ranges[1] << 32) | ranges[2];
+
+               cpu_phys_addr = ranges[3];
+               if (na >= 2)
+                       cpu_phys_addr = (cpu_phys_addr << 32) | ranges[4];
+
+               size = ((unsigned long)ranges[na+3] << 32) | ranges[na+4];
+               ranges += np;
+               if (size == 0)
+                       continue;
+
+               /* Now consume following elements while they are contiguous */
+               while (rlen >= np * sizeof(unsigned int)) {
+                       unsigned long addr, phys;
+
+                       if (ranges[0] != pci_space)
+                               break;
+                       addr = ((unsigned long)ranges[1] << 32) | ranges[2];
+                       phys = ranges[3];
+                       if (na >= 2)
+                               phys = (phys << 32) | ranges[4];
+                       if (addr != pci_addr + size ||
+                           phys != cpu_phys_addr + size)
+                               break;
+
+                       size += ((unsigned long)ranges[na+3] << 32)
+                               | ranges[na+4];
+                       ranges += np;
+                       rlen -= np * sizeof(unsigned int);
+               }
+
+               switch ((pci_space >> 24) & 0x3) {
+               case 1:         /* I/O space */
+                       hose->io_base_phys = cpu_phys_addr;
+                       hose->pci_io_size = size;
+
+                       res = &hose->io_resource;
+                       res->flags = IORESOURCE_IO;
+                       res->start = pci_addr;
+                       DBG("phb%d: IO 0x%lx -> 0x%lx\n", hose->global_number,
+                                   res->start, res->start + size - 1);
+                       break;
+               case 2:         /* memory space */
+                       memno = 0;
+                       while (memno < 3 && hose->mem_resources[memno].flags)
+                               ++memno;
+
+                       if (memno == 0)
+                               hose->pci_mem_offset = cpu_phys_addr - pci_addr;
+                       if (memno < 3) {
+                               res = &hose->mem_resources[memno];
+                               res->flags = IORESOURCE_MEM;
+                               res->start = cpu_phys_addr;
+                               DBG("phb%d: MEM 0x%lx -> 0x%lx\n", hose->global_number,
+                                           res->start, res->start + size - 1);
+                       }
+                       break;
+               }
+               if (res != NULL) {
+                       res->name = dev->full_name;
+                       res->end = res->start + size - 1;
+                       res->parent = NULL;
+                       res->sibling = NULL;
+                       res->child = NULL;
+               }
+       }
+}
+
+void __init pci_setup_phb_io(struct pci_controller *hose, int primary)
+{
+       unsigned long size = hose->pci_io_size;
+       unsigned long io_virt_offset;
+       struct resource *res;
+       struct device_node *isa_dn;
+
+       hose->io_base_virt = reserve_phb_iospace(size);
+       DBG("phb%d io_base_phys 0x%lx io_base_virt 0x%lx\n",
+               hose->global_number, hose->io_base_phys,
+               (unsigned long) hose->io_base_virt);
+
+       if (primary) {
+               pci_io_base = (unsigned long)hose->io_base_virt;
+               isa_dn = of_find_node_by_type(NULL, "isa");
+               if (isa_dn) {
+                       isa_io_base = pci_io_base;
+                       pci_process_ISA_OF_ranges(isa_dn, hose->io_base_phys,
+                                               hose->io_base_virt);
+                       of_node_put(isa_dn);
+                       /* Allow all IO */
+                       io_page_mask = -1;
+               }
+       }
+
+       io_virt_offset = (unsigned long)hose->io_base_virt - pci_io_base;
+       res = &hose->io_resource;
+       res->start += io_virt_offset;
+       res->end += io_virt_offset;
+}
+
+void __devinit pci_setup_phb_io_dynamic(struct pci_controller *hose,
+                                       int primary)
+{
+       unsigned long size = hose->pci_io_size;
+       unsigned long io_virt_offset;
+       struct resource *res;
+
+       hose->io_base_virt = __ioremap(hose->io_base_phys, size,
+                                       _PAGE_NO_CACHE | _PAGE_GUARDED);
+       DBG("phb%d io_base_phys 0x%lx io_base_virt 0x%lx\n",
+               hose->global_number, hose->io_base_phys,
+               (unsigned long) hose->io_base_virt);
+
+       if (primary)
+               pci_io_base = (unsigned long)hose->io_base_virt;
+
+       io_virt_offset = (unsigned long)hose->io_base_virt - pci_io_base;
+       res = &hose->io_resource;
+       res->start += io_virt_offset;
+       res->end += io_virt_offset;
+}
+
+
+static int get_bus_io_range(struct pci_bus *bus, unsigned long *start_phys,
+                               unsigned long *start_virt, unsigned long *size)
+{
+       struct pci_controller *hose = pci_bus_to_host(bus);
+       struct pci_bus_region region;
+       struct resource *res;
+
+       if (bus->self) {
+               res = bus->resource[0];
+               pcibios_resource_to_bus(bus->self, &region, res);
+               *start_phys = hose->io_base_phys + region.start;
+               *start_virt = (unsigned long) hose->io_base_virt + 
+                               region.start;
+               if (region.end > region.start) 
+                       *size = region.end - region.start + 1;
+               else {
+                       printk("%s(): unexpected region 0x%lx->0x%lx\n", 
+                                       __FUNCTION__, region.start, region.end);
+                       return 1;
+               }
+               
+       } else {
+               /* Root Bus */
+               res = &hose->io_resource;
+               *start_phys = hose->io_base_phys;
+               *start_virt = (unsigned long) hose->io_base_virt;
+               if (res->end > res->start)
+                       *size = res->end - res->start + 1;
+               else {
+                       printk("%s(): unexpected region 0x%lx->0x%lx\n", 
+                                       __FUNCTION__, res->start, res->end);
+                       return 1;
+               }
+       }
+
+       return 0;
+}
+
+int unmap_bus_range(struct pci_bus *bus)
+{
+       unsigned long start_phys;
+       unsigned long start_virt;
+       unsigned long size;
+
+       if (!bus) {
+               printk(KERN_ERR "%s() expected bus\n", __FUNCTION__);
+               return 1;
+       }
+       
+       if (get_bus_io_range(bus, &start_phys, &start_virt, &size))
+               return 1;
+       if (iounmap_explicit((void __iomem *) start_virt, size))
+               return 1;
+
+       return 0;
+}
+EXPORT_SYMBOL(unmap_bus_range);
+
+int remap_bus_range(struct pci_bus *bus)
+{
+       unsigned long start_phys;
+       unsigned long start_virt;
+       unsigned long size;
+
+       if (!bus) {
+               printk(KERN_ERR "%s() expected bus\n", __FUNCTION__);
+               return 1;
+       }
+       
+       
+       if (get_bus_io_range(bus, &start_phys, &start_virt, &size))
+               return 1;
+       printk("mapping IO %lx -> %lx, size: %lx\n", start_phys, start_virt, size);
+       if (__ioremap_explicit(start_phys, start_virt, size,
+                              _PAGE_NO_CACHE | _PAGE_GUARDED))
+               return 1;
+
+       return 0;
+}
+EXPORT_SYMBOL(remap_bus_range);
+
+void phbs_remap_io(void)
+{
+       struct pci_controller *hose, *tmp;
+
+       list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
+               remap_bus_range(hose->bus);
+}
+
+/*
+ * ppc64 can have multifunction devices that do not respond to function 0.
+ * In this case we must scan all functions.
+ * XXX this can go now, we use the OF device tree in all the
+ * cases that caused problems. -- paulus
+ */
+int pcibios_scan_all_fns(struct pci_bus *bus, int devfn)
+{
+       return 0;
+}
+
+static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev)
+{
+       struct pci_controller *hose = pci_bus_to_host(dev->bus);
+       unsigned long start, end, mask, offset;
+
+       if (res->flags & IORESOURCE_IO) {
+               offset = (unsigned long)hose->io_base_virt - pci_io_base;
+
+               start = res->start += offset;
+               end = res->end += offset;
+
+               /* Need to allow IO access to pages that are in the
+                  ISA range */
+               if (start < MAX_ISA_PORT) {
+                       if (end > MAX_ISA_PORT)
+                               end = MAX_ISA_PORT;
+
+                       start >>= PAGE_SHIFT;
+                       end >>= PAGE_SHIFT;
+
+                       /* get the range of pages for the map */
+                       mask = ((1 << (end+1)) - 1) ^ ((1 << start) - 1);
+                       io_page_mask |= mask;
+               }
+       } else if (res->flags & IORESOURCE_MEM) {
+               res->start += hose->pci_mem_offset;
+               res->end += hose->pci_mem_offset;
+       }
+}
+
+void __devinit pcibios_fixup_device_resources(struct pci_dev *dev,
+                                             struct pci_bus *bus)
+{
+       /* Update device resources.  */
+       int i;
+
+       for (i = 0; i < PCI_NUM_RESOURCES; i++)
+               if (dev->resource[i].flags)
+                       fixup_resource(&dev->resource[i], dev);
+}
+EXPORT_SYMBOL(pcibios_fixup_device_resources);
+
+static void __devinit do_bus_setup(struct pci_bus *bus)
+{
+       struct pci_dev *dev;
+
+       ppc_md.iommu_bus_setup(bus);
+
+       list_for_each_entry(dev, &bus->devices, bus_list)
+               ppc_md.iommu_dev_setup(dev);
+
+       if (ppc_md.irq_bus_setup)
+               ppc_md.irq_bus_setup(bus);
+}
+
+void __devinit pcibios_fixup_bus(struct pci_bus *bus)
+{
+       struct pci_dev *dev = bus->self;
+
+       if (dev && pci_probe_only &&
+           (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
+               /* This is a subordinate bridge */
+
+               pci_read_bridge_bases(bus);
+               pcibios_fixup_device_resources(dev, bus);
+       }
+
+       do_bus_setup(bus);
+
+       if (!pci_probe_only)
+               return;
+
+       list_for_each_entry(dev, &bus->devices, bus_list)
+               if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
+                       pcibios_fixup_device_resources(dev, bus);
+}
+EXPORT_SYMBOL(pcibios_fixup_bus);
+
+/*
+ * Reads the interrupt pin to determine if interrupt is use by card.
+ * If the interrupt is used, then gets the interrupt line from the 
+ * openfirmware and sets it in the pci_dev and pci_config line.
+ */
+int pci_read_irq_line(struct pci_dev *pci_dev)
+{
+       u8 intpin;
+       struct device_node *node;
+
+       pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &intpin);
+       if (intpin == 0)
+               return 0;
+
+       node = pci_device_to_OF_node(pci_dev);
+       if (node == NULL)
+               return -1;
+
+       if (node->n_intrs == 0)
+               return -1;
+
+       pci_dev->irq = node->intrs[0].line;
+
+       pci_write_config_byte(pci_dev, PCI_INTERRUPT_LINE, pci_dev->irq);
+
+       return 0;
+}
+EXPORT_SYMBOL(pci_read_irq_line);
+
+void pci_resource_to_user(const struct pci_dev *dev, int bar,
+                         const struct resource *rsrc,
+                         u64 *start, u64 *end)
+{
+       struct pci_controller *hose = pci_bus_to_host(dev->bus);
+       unsigned long offset = 0;
+
+       if (hose == NULL)
+               return;
+
+       if (rsrc->flags & IORESOURCE_IO)
+               offset = pci_io_base - (unsigned long)hose->io_base_virt +
+                       hose->io_base_phys;
+
+       *start = rsrc->start + offset;
+       *end = rsrc->end + offset;
+}
+
+#endif /* CONFIG_PPC_MULTIPLATFORM */
+
+
+#define IOBASE_BRIDGE_NUMBER   0
+#define IOBASE_MEMORY          1
+#define IOBASE_IO              2
+#define IOBASE_ISA_IO          3
+#define IOBASE_ISA_MEM         4
+
+long sys_pciconfig_iobase(long which, unsigned long in_bus,
+                         unsigned long in_devfn)
+{
+       struct pci_controller* hose;
+       struct list_head *ln;
+       struct pci_bus *bus = NULL;
+       struct device_node *hose_node;
+
+       /* Argh ! Please forgive me for that hack, but that's the
+        * simplest way to get existing XFree to not lockup on some
+        * G5 machines... So when something asks for bus 0 io base
+        * (bus 0 is HT root), we return the AGP one instead.
+        */
+       if (machine_is_compatible("MacRISC4"))
+               if (in_bus == 0)
+                       in_bus = 0xf0;
+
+       /* That syscall isn't quite compatible with PCI domains, but it's
+        * used on pre-domains setup. We return the first match
+        */
+
+       for (ln = pci_root_buses.next; ln != &pci_root_buses; ln = ln->next) {
+               bus = pci_bus_b(ln);
+               if (in_bus >= bus->number && in_bus < (bus->number + bus->subordinate))
+                       break;
+               bus = NULL;
+       }
+       if (bus == NULL || bus->sysdata == NULL)
+               return -ENODEV;
+
+       hose_node = (struct device_node *)bus->sysdata;
+       hose = PCI_DN(hose_node)->phb;
+
+       switch (which) {
+       case IOBASE_BRIDGE_NUMBER:
+               return (long)hose->first_busno;
+       case IOBASE_MEMORY:
+               return (long)hose->pci_mem_offset;
+       case IOBASE_IO:
+               return (long)hose->io_base_phys;
+       case IOBASE_ISA_IO:
+               return (long)isa_io_base;
+       case IOBASE_ISA_MEM:
+               return -EINVAL;
+       }
+
+       return -EOPNOTSUPP;
+}
diff --git a/arch/powerpc/kernel/pci_direct_iommu.c b/arch/powerpc/kernel/pci_direct_iommu.c
new file mode 100644 (file)
index 0000000..e1a32f8
--- /dev/null
@@ -0,0 +1,94 @@
+/*
+ * Support for DMA from PCI devices to main memory on
+ * machines without an iommu or with directly addressable
+ * RAM (typically a pmac with 2Gb of RAM or less)
+ *
+ * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/sections.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/pci-bridge.h>
+#include <asm/machdep.h>
+#include <asm/pmac_feature.h>
+#include <asm/abs_addr.h>
+#include <asm/ppc-pci.h>
+
+static void *pci_direct_alloc_coherent(struct device *hwdev, size_t size,
+                                  dma_addr_t *dma_handle, gfp_t flag)
+{
+       void *ret;
+
+       ret = (void *)__get_free_pages(flag, get_order(size));
+       if (ret != NULL) {
+               memset(ret, 0, size);
+               *dma_handle = virt_to_abs(ret);
+       }
+       return ret;
+}
+
+static void pci_direct_free_coherent(struct device *hwdev, size_t size,
+                                void *vaddr, dma_addr_t dma_handle)
+{
+       free_pages((unsigned long)vaddr, get_order(size));
+}
+
+static dma_addr_t pci_direct_map_single(struct device *hwdev, void *ptr,
+               size_t size, enum dma_data_direction direction)
+{
+       return virt_to_abs(ptr);
+}
+
+static void pci_direct_unmap_single(struct device *hwdev, dma_addr_t dma_addr,
+               size_t size, enum dma_data_direction direction)
+{
+}
+
+static int pci_direct_map_sg(struct device *hwdev, struct scatterlist *sg,
+               int nents, enum dma_data_direction direction)
+{
+       int i;
+
+       for (i = 0; i < nents; i++, sg++) {
+               sg->dma_address = page_to_phys(sg->page) + sg->offset;
+               sg->dma_length = sg->length;
+       }
+
+       return nents;
+}
+
+static void pci_direct_unmap_sg(struct device *hwdev, struct scatterlist *sg,
+               int nents, enum dma_data_direction direction)
+{
+}
+
+static int pci_direct_dma_supported(struct device *dev, u64 mask)
+{
+       return mask < 0x100000000ull;
+}
+
+void __init pci_direct_iommu_init(void)
+{
+       pci_dma_ops.alloc_coherent = pci_direct_alloc_coherent;
+       pci_dma_ops.free_coherent = pci_direct_free_coherent;
+       pci_dma_ops.map_single = pci_direct_map_single;
+       pci_dma_ops.unmap_single = pci_direct_unmap_single;
+       pci_dma_ops.map_sg = pci_direct_map_sg;
+       pci_dma_ops.unmap_sg = pci_direct_unmap_sg;
+       pci_dma_ops.dma_supported = pci_direct_dma_supported;
+}
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
new file mode 100644 (file)
index 0000000..12c4c9e
--- /dev/null
@@ -0,0 +1,230 @@
+/*
+ * pci_dn.c
+ *
+ * Copyright (C) 2001 Todd Inglett, IBM Corporation
+ *
+ * PCI manipulation via device_nodes.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *    
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ * 
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/bootmem.h>
+
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/pci-bridge.h>
+#include <asm/pSeries_reconfig.h>
+#include <asm/ppc-pci.h>
+
+/*
+ * Traverse_func that inits the PCI fields of the device node.
+ * NOTE: this *must* be done before read/write config to the device.
+ */
+static void * __devinit update_dn_pci_info(struct device_node *dn, void *data)
+{
+       struct pci_controller *phb = data;
+       int *type = (int *)get_property(dn, "ibm,pci-config-space-type", NULL);
+       u32 *regs;
+       struct pci_dn *pdn;
+
+       if (mem_init_done)
+               pdn = kmalloc(sizeof(*pdn), GFP_KERNEL);
+       else
+               pdn = alloc_bootmem(sizeof(*pdn));
+       if (pdn == NULL)
+               return NULL;
+       memset(pdn, 0, sizeof(*pdn));
+       dn->data = pdn;
+       pdn->node = dn;
+       pdn->phb = phb;
+       regs = (u32 *)get_property(dn, "reg", NULL);
+       if (regs) {
+               /* First register entry is addr (00BBSS00)  */
+               pdn->busno = (regs[0] >> 16) & 0xff;
+               pdn->devfn = (regs[0] >> 8) & 0xff;
+       }
+
+       pdn->pci_ext_config_space = (type && *type == 1);
+       return NULL;
+}
+
+/*
+ * Traverse a device tree stopping each PCI device in the tree.
+ * This is done depth first.  As each node is processed, a "pre"
+ * function is called and the children are processed recursively.
+ *
+ * The "pre" func returns a value.  If non-zero is returned from
+ * the "pre" func, the traversal stops and this value is returned.
+ * This return value is useful when using traverse as a method of
+ * finding a device.
+ *
+ * NOTE: we do not run the func for devices that do not appear to
+ * be PCI except for the start node which we assume (this is good
+ * because the start node is often a phb which may be missing PCI
+ * properties).
+ * We use the class-code as an indicator. If we run into
+ * one of these nodes we also assume its siblings are non-pci for
+ * performance.
+ */
+void *traverse_pci_devices(struct device_node *start, traverse_func pre,
+               void *data)
+{
+       struct device_node *dn, *nextdn;
+       void *ret;
+
+       /* We started with a phb, iterate all childs */
+       for (dn = start->child; dn; dn = nextdn) {
+               u32 *classp, class;
+
+               nextdn = NULL;
+               classp = (u32 *)get_property(dn, "class-code", NULL);
+               class = classp ? *classp : 0;
+
+               if (pre && ((ret = pre(dn, data)) != NULL))
+                       return ret;
+
+               /* If we are a PCI bridge, go down */
+               if (dn->child && ((class >> 8) == PCI_CLASS_BRIDGE_PCI ||
+                                 (class >> 8) == PCI_CLASS_BRIDGE_CARDBUS))
+                       /* Depth first...do children */
+                       nextdn = dn->child;
+               else if (dn->sibling)
+                       /* ok, try next sibling instead. */
+                       nextdn = dn->sibling;
+               if (!nextdn) {
+                       /* Walk up to next valid sibling. */
+                       do {
+                               dn = dn->parent;
+                               if (dn == start)
+                                       return NULL;
+                       } while (dn->sibling == NULL);
+                       nextdn = dn->sibling;
+               }
+       }
+       return NULL;
+}
+
+/** 
+ * pci_devs_phb_init_dynamic - setup pci devices under this PHB
+ * phb: pci-to-host bridge (top-level bridge connecting to cpu)
+ *
+ * This routine is called both during boot, (before the memory
+ * subsystem is set up, before kmalloc is valid) and during the 
+ * dynamic lpar operation of adding a PHB to a running system.
+ */
+void __devinit pci_devs_phb_init_dynamic(struct pci_controller *phb)
+{
+       struct device_node * dn = (struct device_node *) phb->arch_data;
+       struct pci_dn *pdn;
+
+       /* PHB nodes themselves must not match */
+       update_dn_pci_info(dn, phb);
+       pdn = dn->data;
+       if (pdn) {
+               pdn->devfn = pdn->busno = -1;
+               pdn->phb = phb;
+       }
+
+       /* Update dn->phb ptrs for new phb and children devices */
+       traverse_pci_devices(dn, update_dn_pci_info, phb);
+}
+
+/*
+ * Traversal func that looks for a <busno,devfcn> value.
+ * If found, the pci_dn is returned (thus terminating the traversal).
+ */
+static void *is_devfn_node(struct device_node *dn, void *data)
+{
+       int busno = ((unsigned long)data >> 8) & 0xff;
+       int devfn = ((unsigned long)data) & 0xff;
+       struct pci_dn *pci = dn->data;
+
+       if (pci && (devfn == pci->devfn) && (busno == pci->busno))
+               return dn;
+       return NULL;
+}
+
+/*
+ * This is the "slow" path for looking up a device_node from a
+ * pci_dev.  It will hunt for the device under its parent's
+ * phb and then update sysdata for a future fastpath.
+ *
+ * It may also do fixups on the actual device since this happens
+ * on the first read/write.
+ *
+ * Note that it also must deal with devices that don't exist.
+ * In this case it may probe for real hardware ("just in case")
+ * and add a device_node to the device tree if necessary.
+ *
+ */
+struct device_node *fetch_dev_dn(struct pci_dev *dev)
+{
+       struct device_node *orig_dn = dev->sysdata;
+       struct device_node *dn;
+       unsigned long searchval = (dev->bus->number << 8) | dev->devfn;
+
+       dn = traverse_pci_devices(orig_dn, is_devfn_node, (void *)searchval);
+       if (dn)
+               dev->sysdata = dn;
+       return dn;
+}
+EXPORT_SYMBOL(fetch_dev_dn);
+
+static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *node)
+{
+       struct device_node *np = node;
+       struct pci_dn *pci = NULL;
+       int err = NOTIFY_OK;
+
+       switch (action) {
+       case PSERIES_RECONFIG_ADD:
+               pci = np->parent->data;
+               if (pci)
+                       update_dn_pci_info(np, pci->phb);
+               break;
+       default:
+               err = NOTIFY_DONE;
+               break;
+       }
+       return err;
+}
+
+static struct notifier_block pci_dn_reconfig_nb = {
+       .notifier_call = pci_dn_reconfig_notifier,
+};
+
+/** 
+ * pci_devs_phb_init - Initialize phbs and pci devs under them.
+ * 
+ * This routine walks over all phb's (pci-host bridges) on the
+ * system, and sets up assorted pci-related structures 
+ * (including pci info in the device node structs) for each
+ * pci device found underneath.  This routine runs once,
+ * early in the boot sequence.
+ */
+void __init pci_devs_phb_init(void)
+{
+       struct pci_controller *phb, *tmp;
+
+       /* This must be done first so the device nodes have valid pci info! */
+       list_for_each_entry_safe(phb, tmp, &hose_list, list_node)
+               pci_devs_phb_init_dynamic(phb);
+
+       pSeries_reconfig_notifier_register(&pci_dn_reconfig_nb);
+}
diff --git a/arch/powerpc/kernel/pci_iommu.c b/arch/powerpc/kernel/pci_iommu.c
new file mode 100644 (file)
index 0000000..bdf15db
--- /dev/null
@@ -0,0 +1,128 @@
+/*
+ * arch/ppc64/kernel/pci_iommu.c
+ * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
+ *
+ * Rewrite, cleanup, new allocation schemes:
+ * Copyright (C) 2004 Olof Johansson, IBM Corporation
+ *
+ * Dynamic DMA mapping support, platform-independent parts.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/iommu.h>
+#include <asm/pci-bridge.h>
+#include <asm/machdep.h>
+#include <asm/ppc-pci.h>
+
+/*
+ * We can use ->sysdata directly and avoid the extra work in
+ * pci_device_to_OF_node since ->sysdata will have been initialised
+ * in the iommu init code for all devices.
+ */
+#define PCI_GET_DN(dev) ((struct device_node *)((dev)->sysdata))
+
+static inline struct iommu_table *devnode_table(struct device *dev)
+{
+       struct pci_dev *pdev;
+
+       if (!dev) {
+               pdev = ppc64_isabridge_dev;
+               if (!pdev)
+                       return NULL;
+       } else
+               pdev = to_pci_dev(dev);
+
+       return PCI_DN(PCI_GET_DN(pdev))->iommu_table;
+}
+
+
+/* Allocates a contiguous real buffer and creates mappings over it.
+ * Returns the virtual address of the buffer and sets dma_handle
+ * to the dma address (mapping) of the first page.
+ */
+static void *pci_iommu_alloc_coherent(struct device *hwdev, size_t size,
+                          dma_addr_t *dma_handle, gfp_t flag)
+{
+       return iommu_alloc_coherent(devnode_table(hwdev), size, dma_handle,
+                       flag);
+}
+
+static void pci_iommu_free_coherent(struct device *hwdev, size_t size,
+                        void *vaddr, dma_addr_t dma_handle)
+{
+       iommu_free_coherent(devnode_table(hwdev), size, vaddr, dma_handle);
+}
+
+/* Creates TCEs for a user provided buffer.  The user buffer must be 
+ * contiguous real kernel storage (not vmalloc).  The address of the buffer
+ * passed here is the kernel (virtual) address of the buffer.  The buffer
+ * need not be page aligned, the dma_addr_t returned will point to the same
+ * byte within the page as vaddr.
+ */
+static dma_addr_t pci_iommu_map_single(struct device *hwdev, void *vaddr,
+               size_t size, enum dma_data_direction direction)
+{
+       return iommu_map_single(devnode_table(hwdev), vaddr, size, direction);
+}
+
+
+static void pci_iommu_unmap_single(struct device *hwdev, dma_addr_t dma_handle,
+               size_t size, enum dma_data_direction direction)
+{
+       iommu_unmap_single(devnode_table(hwdev), dma_handle, size, direction);
+}
+
+
+static int pci_iommu_map_sg(struct device *pdev, struct scatterlist *sglist,
+               int nelems, enum dma_data_direction direction)
+{
+       return iommu_map_sg(pdev, devnode_table(pdev), sglist,
+                       nelems, direction);
+}
+
+static void pci_iommu_unmap_sg(struct device *pdev, struct scatterlist *sglist,
+               int nelems, enum dma_data_direction direction)
+{
+       iommu_unmap_sg(devnode_table(pdev), sglist, nelems, direction);
+}
+
+/* We support DMA to/from any memory page via the iommu */
+static int pci_iommu_dma_supported(struct device *dev, u64 mask)
+{
+       return 1;
+}
+
+void pci_iommu_init(void)
+{
+       pci_dma_ops.alloc_coherent = pci_iommu_alloc_coherent;
+       pci_dma_ops.free_coherent = pci_iommu_free_coherent;
+       pci_dma_ops.map_single = pci_iommu_map_single;
+       pci_dma_ops.unmap_single = pci_iommu_unmap_single;
+       pci_dma_ops.map_sg = pci_iommu_map_sg;
+       pci_dma_ops.unmap_sg = pci_iommu_unmap_sg;
+       pci_dma_ops.dma_supported = pci_iommu_dma_supported;
+}
index e7ca5b1f591e2977dd5d2162747086400b82a4f1..06d5ef501218c8d5f1a638a0906d859a5cc083f8 100644 (file)
@@ -4,4 +4,7 @@ obj-$(CONFIG_SMP)       += smp.o
 obj-$(CONFIG_IBMVIO)   += vio.o
 obj-$(CONFIG_XICS)     += xics.o
 obj-$(CONFIG_SCANLOG)  += scanlog.o
-obj-$(CONFIG_EEH)    += eeh.o eeh_event.o
+obj-$(CONFIG_EEH)      += eeh.o eeh_event.o
+
+obj-$(CONFIG_HVC_CONSOLE)      += hvconsole.o
+obj-$(CONFIG_HVCS)             += hvcserver.o
diff --git a/arch/powerpc/platforms/pseries/hvconsole.c b/arch/powerpc/platforms/pseries/hvconsole.c
new file mode 100644 (file)
index 0000000..138e128
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+ * hvconsole.c
+ * Copyright (C) 2004 Hollis Blanchard, IBM Corporation
+ * Copyright (C) 2004 IBM Corporation
+ *
+ * Additional Author(s):
+ *  Ryan S. Arnold <rsa@us.ibm.com>
+ *
+ * LPAR console support.
+ * 
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ * 
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ * 
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/hvcall.h>
+#include <asm/hvconsole.h>
+
+/**
+ * hvc_get_chars - retrieve characters from firmware for denoted vterm adatper
+ * @vtermno: The vtermno or unit_address of the adapter from which to fetch the
+ *     data.
+ * @buf: The character buffer into which to put the character data fetched from
+ *     firmware.
+ * @count: not used?
+ */
+int hvc_get_chars(uint32_t vtermno, char *buf, int count)
+{
+       unsigned long got;
+
+       if (plpar_hcall(H_GET_TERM_CHAR, vtermno, 0, 0, 0, &got,
+               (unsigned long *)buf, (unsigned long *)buf+1) == H_Success)
+               return got;
+       return 0;
+}
+
+EXPORT_SYMBOL(hvc_get_chars);
+
+
+/**
+ * hvc_put_chars: send characters to firmware for denoted vterm adapter
+ * @vtermno: The vtermno or unit_address of the adapter from which the data
+ *     originated.
+ * @buf: The character buffer that contains the character data to send to
+ *     firmware.
+ * @count: Send this number of characters.
+ */
+int hvc_put_chars(uint32_t vtermno, const char *buf, int count)
+{
+       unsigned long *lbuf = (unsigned long *) buf;
+       long ret;
+
+       ret = plpar_hcall_norets(H_PUT_TERM_CHAR, vtermno, count, lbuf[0],
+                                lbuf[1]);
+       if (ret == H_Success)
+               return count;
+       if (ret == H_Busy)
+               return 0;
+       return -EIO;
+}
+
+EXPORT_SYMBOL(hvc_put_chars);
diff --git a/arch/powerpc/platforms/pseries/hvcserver.c b/arch/powerpc/platforms/pseries/hvcserver.c
new file mode 100644 (file)
index 0000000..4d58417
--- /dev/null
@@ -0,0 +1,251 @@
+/*
+ * hvcserver.c
+ * Copyright (C) 2004 Ryan S Arnold, IBM Corporation
+ *
+ * PPC64 virtual I/O console server support.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <asm/hvcall.h>
+#include <asm/hvcserver.h>
+#include <asm/io.h>
+
+#define HVCS_ARCH_VERSION "1.0.0"
+
+MODULE_AUTHOR("Ryan S. Arnold <rsa@us.ibm.com>");
+MODULE_DESCRIPTION("IBM hvcs ppc64 API");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(HVCS_ARCH_VERSION);
+
+/*
+ * Convert arch specific return codes into relevant errnos.  The hvcs
+ * functions aren't performance sensitive, so this conversion isn't an
+ * issue.
+ */
+int hvcs_convert(long to_convert)
+{
+       switch (to_convert) {
+               case H_Success:
+                       return 0;
+               case H_Parameter:
+                       return -EINVAL;
+               case H_Hardware:
+                       return -EIO;
+               case H_Busy:
+               case H_LongBusyOrder1msec:
+               case H_LongBusyOrder10msec:
+               case H_LongBusyOrder100msec:
+               case H_LongBusyOrder1sec:
+               case H_LongBusyOrder10sec:
+               case H_LongBusyOrder100sec:
+                       return -EBUSY;
+               case H_Function: /* fall through */
+               default:
+                       return -EPERM;
+       }
+}
+
+/**
+ * hvcs_free_partner_info - free pi allocated by hvcs_get_partner_info
+ * @head: list_head pointer for an allocated list of partner info structs to
+ *     free.
+ *
+ * This function is used to free the partner info list that was returned by
+ * calling hvcs_get_partner_info().
+ */
+int hvcs_free_partner_info(struct list_head *head)
+{
+       struct hvcs_partner_info *pi;
+       struct list_head *element;
+
+       if (!head)
+               return -EINVAL;
+
+       while (!list_empty(head)) {
+               element = head->next;
+               pi = list_entry(element, struct hvcs_partner_info, node);
+               list_del(element);
+               kfree(pi);
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(hvcs_free_partner_info);
+
+/* Helper function for hvcs_get_partner_info */
+int hvcs_next_partner(uint32_t unit_address,
+               unsigned long last_p_partition_ID,
+               unsigned long last_p_unit_address, unsigned long *pi_buff)
+
+{
+       long retval;
+       retval = plpar_hcall_norets(H_VTERM_PARTNER_INFO, unit_address,
+                       last_p_partition_ID,
+                               last_p_unit_address, virt_to_phys(pi_buff));
+       return hvcs_convert(retval);
+}
+
+/**
+ * hvcs_get_partner_info - Get all of the partner info for a vty-server adapter
+ * @unit_address: The unit_address of the vty-server adapter for which this
+ *     function is fetching partner info.
+ * @head: An initialized list_head pointer to an empty list to use to return the
+ *     list of partner info fetched from the hypervisor to the caller.
+ * @pi_buff: A page sized buffer pre-allocated prior to calling this function
+ *     that is to be used to be used by firmware as an iterator to keep track
+ *     of the partner info retrieval.
+ *
+ * This function returns non-zero on success, or if there is no partner info.
+ *
+ * The pi_buff is pre-allocated prior to calling this function because this
+ * function may be called with a spin_lock held and kmalloc of a page is not
+ * recommended as GFP_ATOMIC.
+ *
+ * The first long of this buffer is used to store a partner unit address.  The
+ * second long is used to store a partner partition ID and starting at
+ * pi_buff[2] is the 79 character Converged Location Code (diff size than the
+ * unsigned longs, hence the casting mumbo jumbo you see later).
+ *
+ * Invocation of this function should always be followed by an invocation of
+ * hvcs_free_partner_info() using a pointer to the SAME list head instance
+ * that was passed as a parameter to this function.
+ */
+int hvcs_get_partner_info(uint32_t unit_address, struct list_head *head,
+               unsigned long *pi_buff)
+{
+       /*
+        * Dealt with as longs because of the hcall interface even though the
+        * values are uint32_t.
+        */
+       unsigned long   last_p_partition_ID;
+       unsigned long   last_p_unit_address;
+       struct hvcs_partner_info *next_partner_info = NULL;
+       int more = 1;
+       int retval;
+
+       memset(pi_buff, 0x00, PAGE_SIZE);
+       /* invalid parameters */
+       if (!head || !pi_buff)
+               return -EINVAL;
+
+       last_p_partition_ID = last_p_unit_address = ~0UL;
+       INIT_LIST_HEAD(head);
+
+       do {
+               retval = hvcs_next_partner(unit_address, last_p_partition_ID,
+                               last_p_unit_address, pi_buff);
+               if (retval) {
+                       /*
+                        * Don't indicate that we've failed if we have
+                        * any list elements.
+                        */
+                       if (!list_empty(head))
+                               return 0;
+                       return retval;
+               }
+
+               last_p_partition_ID = pi_buff[0];
+               last_p_unit_address = pi_buff[1];
+
+               /* This indicates that there are no further partners */
+               if (last_p_partition_ID == ~0UL
+                               && last_p_unit_address == ~0UL)
+                       break;
+
+               /* This is a very small struct and will be freed soon in
+                * hvcs_free_partner_info(). */
+               next_partner_info = kmalloc(sizeof(struct hvcs_partner_info),
+                               GFP_ATOMIC);
+
+               if (!next_partner_info) {
+                       printk(KERN_WARNING "HVCONSOLE: kmalloc() failed to"
+                               " allocate partner info struct.\n");
+                       hvcs_free_partner_info(head);
+                       return -ENOMEM;
+               }
+
+               next_partner_info->unit_address
+                       = (unsigned int)last_p_unit_address;
+               next_partner_info->partition_ID
+                       = (unsigned int)last_p_partition_ID;
+
+               /* copy the Null-term char too */
+               strncpy(&next_partner_info->location_code[0],
+                       (char *)&pi_buff[2],
+                       strlen((char *)&pi_buff[2]) + 1);
+
+               list_add_tail(&(next_partner_info->node), head);
+               next_partner_info = NULL;
+
+       } while (more);
+
+       return 0;
+}
+EXPORT_SYMBOL(hvcs_get_partner_info);
+
+/**
+ * hvcs_register_connection - establish a connection between this vty-server and
+ *     a vty.
+ * @unit_address: The unit address of the vty-server adapter that is to be
+ *     establish a connection.
+ * @p_partition_ID: The partition ID of the vty adapter that is to be connected.
+ * @p_unit_address: The unit address of the vty adapter to which the vty-server
+ *     is to be connected.
+ *
+ * If this function is called once and -EINVAL is returned it may
+ * indicate that the partner info needs to be refreshed for the
+ * target unit address at which point the caller must invoke
+ * hvcs_get_partner_info() and then call this function again.  If,
+ * for a second time, -EINVAL is returned then it indicates that
+ * there is probably already a partner connection registered to a
+ * different vty-server adapter.  It is also possible that a second
+ * -EINVAL may indicate that one of the parms is not valid, for
+ * instance if the link was removed between the vty-server adapter
+ * and the vty adapter that you are trying to open.  Don't shoot the
+ * messenger.  Firmware implemented it this way.
+ */
+int hvcs_register_connection( uint32_t unit_address,
+               uint32_t p_partition_ID, uint32_t p_unit_address)
+{
+       long retval;
+       retval = plpar_hcall_norets(H_REGISTER_VTERM, unit_address,
+                               p_partition_ID, p_unit_address);
+       return hvcs_convert(retval);
+}
+EXPORT_SYMBOL(hvcs_register_connection);
+
+/**
+ * hvcs_free_connection - free the connection between a vty-server and vty
+ * @unit_address: The unit address of the vty-server that is to have its
+ *     connection severed.
+ *
+ * This function is used to free the partner connection between a vty-server
+ * adapter and a vty adapter.
+ *
+ * If -EBUSY is returned continue to call this function until 0 is returned.
+ */
+int hvcs_free_connection(uint32_t unit_address)
+{
+       long retval;
+       retval = plpar_hcall_norets(H_FREE_VTERM, unit_address);
+       return hvcs_convert(retval);
+}
+EXPORT_SYMBOL(hvcs_free_connection);
diff --git a/arch/ppc64/Kconfig b/arch/ppc64/Kconfig
deleted file mode 100644 (file)
index 9d10c12..0000000
+++ /dev/null
@@ -1,520 +0,0 @@
-#
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-#
-
-config 64BIT
-       def_bool y
-
-config MMU
-       bool
-       default y
-
-config PPC_STD_MMU
-       def_bool y
-
-config UID16
-       bool
-
-config RWSEM_GENERIC_SPINLOCK
-       bool
-
-config RWSEM_XCHGADD_ALGORITHM
-       bool
-       default y
-
-config GENERIC_CALIBRATE_DELAY
-       bool
-       default y
-
-config GENERIC_ISA_DMA
-       bool
-       default y
-
-config EARLY_PRINTK
-       bool
-       default y
-
-config COMPAT
-       bool
-       default y
-
-config SCHED_NO_NO_OMIT_FRAME_POINTER
-       bool
-       default y
-
-config ARCH_MAY_HAVE_PC_FDC
-       bool
-       default y
-
-config PPC_STD_MMU
-       bool
-       default y
-
-# We optimistically allocate largepages from the VM, so make the limit
-# large enough (16MB). This badly named config option is actually
-# max order + 1
-config FORCE_MAX_ZONEORDER
-       int
-       default "9" if PPC_64K_PAGES
-       default "13"
-
-source "init/Kconfig"
-
-config SYSVIPC_COMPAT
-       bool
-       depends on COMPAT && SYSVIPC
-       default y
-
-menu "Platform support"
-
-choice
-       prompt "Platform Type"
-       default PPC_MULTIPLATFORM
-
-config PPC_ISERIES
-       bool "IBM Legacy iSeries"
-
-config PPC_MULTIPLATFORM
-       bool "Generic"
-
-endchoice
-
-config PPC_PSERIES
-       depends on PPC_MULTIPLATFORM
-       bool "  IBM pSeries & new iSeries"
-       default y
-
-config PPC_BPA
-       bool "  Broadband Processor Architecture"
-       depends on PPC_MULTIPLATFORM
-
-config PPC_PMAC
-       depends on PPC_MULTIPLATFORM
-       bool "  Apple G5 based machines"
-       default y
-       select U3_DART
-       select GENERIC_TBSYNC
-
-config PPC_MAPLE
-       depends on PPC_MULTIPLATFORM
-       bool "  Maple 970FX Evaluation Board"
-       select U3_DART
-       select MPIC_BROKEN_U3
-       select GENERIC_TBSYNC
-       default n
-       help
-          This option enables support for the Maple 970FX Evaluation Board.
-         For more informations, refer to <http://www.970eval.com>
-
-config PPC
-       bool
-       default y
-
-config PPC64
-       bool
-       default y
-
-config PPC_OF
-       depends on PPC_MULTIPLATFORM
-       bool
-       default y
-
-config XICS
-       depends on PPC_PSERIES
-       bool
-       default y
-
-config MPIC
-       depends on PPC_PSERIES || PPC_PMAC || PPC_MAPLE
-       bool
-       default y
-
-config PPC_I8259
-       depends on PPC_PSERIES
-       bool
-       default y
-
-config BPA_IIC
-       depends on PPC_BPA
-       bool
-       default y
-
-# VMX is pSeries only for now until somebody writes the iSeries
-# exception vectors for it
-config ALTIVEC
-       bool "Support for VMX (Altivec) vector unit"
-       depends on PPC_MULTIPLATFORM
-       default y
-
-config PPC_SPLPAR
-       depends on PPC_PSERIES
-       bool "Support for shared-processor logical partitions"
-       default n
-       help
-         Enabling this option will make the kernel run more efficiently
-         on logically-partitioned pSeries systems which use shared
-         processors, that is, which share physical processors between
-         two or more partitions.
-
-config KEXEC
-       bool "kexec system call (EXPERIMENTAL)"
-       depends on PPC_MULTIPLATFORM && EXPERIMENTAL
-       help
-         kexec is a system call that implements the ability to shutdown your
-         current kernel, and to start another kernel.  It is like a reboot
-         but it is indepedent of the system firmware.  And like a reboot
-         you can start any kernel with it, not just Linux.
-
-         The name comes from the similiarity to the exec system call.
-
-         It is an ongoing process to be certain the hardware in a machine
-         is properly shutdown, so do not be surprised if this code does not
-         initially work for you.  It may help to enable device hotplugging
-         support.  As of this writing the exact hardware interface is
-         strongly in flux, so no good recommendation can be made.
-
-source "drivers/cpufreq/Kconfig"
-
-config CPU_FREQ_PMAC64
-       bool "Support for some Apple G5s"
-       depends on CPU_FREQ && PMAC_SMU && PPC64
-       select CPU_FREQ_TABLE
-       help
-         This adds support for frequency switching on Apple iMac G5,
-         and some of the more recent desktop G5 machines as well.
-
-config IBMVIO
-       depends on PPC_PSERIES || PPC_ISERIES
-       bool
-       default y
-
-config U3_DART
-       bool 
-       depends on PPC_MULTIPLATFORM
-       default n
-
-config MPIC_BROKEN_U3
-       bool
-       depends on PPC_MAPLE
-       default y
-
-config GENERIC_TBSYNC
-       def_bool n
-
-config PPC_PMAC64
-       bool
-       depends on PPC_PMAC
-       default y
-
-config BOOTX_TEXT
-       bool "Support for early boot text console"
-       depends PPC_OF
-       help
-         Say Y here to see progress messages from the boot firmware in text
-         mode. Requires an Open Firmware compatible video card.
-
-config POWER4
-       def_bool y
-
-config PPC_FPU
-       def_bool y
-
-config POWER4_ONLY
-       bool "Optimize for POWER4"
-       default n
-       ---help---
-         Cause the compiler to optimize for POWER4 processors. The resulting
-         binary will not work on POWER3 or RS64 processors when compiled with
-         binutils 2.15 or later.
-
-config IOMMU_VMERGE
-       bool "Enable IOMMU virtual merging (EXPERIMENTAL)"
-       depends on EXPERIMENTAL
-       default n
-       help
-         Cause IO segments sent to a device for DMA to be merged virtually
-         by the IOMMU when they happen to have been allocated contiguously.
-         This doesn't add pressure to the IOMMU allocator. However, some
-         drivers don't support getting large merged segments coming back
-         from *_map_sg(). Say Y if you know the drivers you are using are
-         properly handling this case.
-
-config SMP
-       bool "Symmetric multi-processing support"
-       ---help---
-         This enables support for systems with more than one CPU. If you have
-         a system with only one CPU, say N. If you have a system with more
-         than one CPU, say Y.
-
-         If you say N here, the kernel will run on single and multiprocessor
-         machines, but will use only one CPU of a multiprocessor machine. If
-         you say Y here, the kernel will run on single-processor machines.
-         On a single-processor machine, the kernel will run faster if you say
-         N here.
-
-         If you don't know what to do here, say Y.
-
-config NR_CPUS
-       int "Maximum number of CPUs (2-128)"
-       range 2 128
-       depends on SMP
-       default "32"
-
-config HMT
-       bool "Hardware multithreading"
-       depends on SMP && PPC_PSERIES && BROKEN
-       help
-         This option enables hardware multithreading on RS64 cpus.
-         pSeries systems p620 and p660 have such a cpu type.
-
-config NUMA
-       bool "NUMA support"
-       default y if SMP && PPC_PSERIES
-
-config ARCH_SELECT_MEMORY_MODEL
-       def_bool y
-
-config ARCH_FLATMEM_ENABLE
-       def_bool y
-       depends on !NUMA
-
-config ARCH_SPARSEMEM_ENABLE
-       def_bool y
-
-config ARCH_SPARSEMEM_DEFAULT
-       def_bool y
-       depends on NUMA
-
-source "mm/Kconfig"
-
-config HAVE_ARCH_EARLY_PFN_TO_NID
-       def_bool y
-       depends on NEED_MULTIPLE_NODES
-
-config ARCH_MEMORY_PROBE
-       def_bool y
-       depends on MEMORY_HOTPLUG
-
-# Some NUMA nodes have memory ranges that span
-# other nodes.  Even though a pfn is valid and
-# between a node's start and end pfns, it may not
-# reside on that node.
-#
-# This is a relatively temporary hack that should
-# be able to go away when sparsemem is fully in
-# place
-config NODES_SPAN_OTHER_NODES
-       def_bool y
-       depends on NEED_MULTIPLE_NODES
-
-config PPC_64K_PAGES
-       bool "64k page size"
-       help
-         This option changes the kernel logical page size to 64k. On machines
-          without processor support for 64k pages, the kernel will simulate
-          them by loading each individual 4k page on demand transparently,
-          while on hardware with such support, it will be used to map
-          normal application pages.
-
-config SCHED_SMT
-       bool "SMT (Hyperthreading) scheduler support"
-       depends on SMP
-       default off
-       help
-         SMT scheduler support improves the CPU scheduler's decision making
-         when dealing with POWER5 cpus at a cost of slightly increased
-         overhead in some places. If unsure say N here.
-
-source "kernel/Kconfig.preempt"
-source kernel/Kconfig.hz
-
-config EEH
-       bool "PCI Extended Error Handling (EEH)" if EMBEDDED
-       depends on PPC_PSERIES
-       default y if !EMBEDDED
-
-#
-# Use the generic interrupt handling code in kernel/irq/:
-#
-config GENERIC_HARDIRQS
-       bool
-       default y
-
-config PPC_RTAS
-       bool
-       depends on PPC_PSERIES || PPC_BPA
-       default y
-
-config RTAS_ERROR_LOGGING
-       bool
-       depends on PPC_RTAS
-       default y
-
-config RTAS_PROC
-       bool "Proc interface to RTAS"
-       depends on PPC_RTAS
-       default y
-
-config RTAS_FLASH
-       tristate "Firmware flash interface"
-       depends on RTAS_PROC
-
-config SCANLOG
-       tristate "Scanlog dump interface"
-       depends on RTAS_PROC && PPC_PSERIES
-
-config LPARCFG
-       tristate "LPAR Configuration Data"
-       depends on PPC_PSERIES || PPC_ISERIES
-       help
-       Provide system capacity information via human readable
-       <key word>=<value> pairs through a /proc/ppc64/lparcfg interface.
-
-config SECCOMP
-       bool "Enable seccomp to safely compute untrusted bytecode"
-       depends on PROC_FS
-       default y
-       help
-         This kernel feature is useful for number crunching applications
-         that may need to compute untrusted bytecode during their
-         execution. By using pipes or other transports made available to
-         the process as file descriptors supporting the read/write
-         syscalls, it's possible to isolate those applications in
-         their own address space using seccomp. Once seccomp is
-         enabled via /proc/<pid>/seccomp, it cannot be disabled
-         and the task is only allowed to execute a few safe syscalls
-         defined by each seccomp mode.
-
-         If unsure, say Y. Only embedded should say N here.
-
-source "fs/Kconfig.binfmt"
-
-config HOTPLUG_CPU
-       bool "Support for hot-pluggable CPUs"
-       depends on SMP && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC)
-       select HOTPLUG
-       ---help---
-         Say Y here to be able to turn CPUs off and on.
-
-         Say N if you are unsure.
-
-config PROC_DEVICETREE
-       bool "Support for Open Firmware device tree in /proc"
-       help
-         This option adds a device-tree directory under /proc which contains
-         an image of the device tree that the kernel copies from Open
-         Firmware. If unsure, say Y here.
-
-config CMDLINE_BOOL
-       bool "Default bootloader kernel arguments"
-       depends on !PPC_ISERIES
-
-config CMDLINE
-       string "Initial kernel command string"
-       depends on CMDLINE_BOOL
-       default "console=ttyS0,9600 console=tty0 root=/dev/sda2"
-       help
-         On some platforms, there is currently no way for the boot loader to
-         pass arguments to the kernel. For these platforms, you can supply
-         some command-line options at build time by entering them here.  In
-         most cases you will need to specify the root device here.
-
-endmenu
-
-config ISA_DMA_API
-       bool
-       default y
-
-menu "Bus Options"
-
-config ISA
-       bool
-       help
-         Find out whether you have ISA slots on your motherboard.  ISA is the
-         name of a bus system, i.e. the way the CPU talks to the other stuff
-         inside your box.  If you have an Apple machine, say N here; if you
-         have an IBM RS/6000 or pSeries machine or a PReP machine, say Y.  If
-         you have an embedded board, consult your board documentation.
-
-config SBUS
-       bool
-
-config MCA
-       bool
-
-config EISA
-       bool
-
-config PCI
-       bool "support for PCI devices" if (EMBEDDED && PPC_ISERIES)
-       default y
-       help
-         Find out whether your system includes a PCI bus. PCI is the name of
-         a bus system, i.e. the way the CPU talks to the other stuff inside
-         your box.  If you say Y here, the kernel will include drivers and
-         infrastructure code to support PCI bus devices.
-
-config PCI_DOMAINS
-       bool
-       default PCI
-
-source "drivers/pci/Kconfig"
-
-source "drivers/pcmcia/Kconfig"
-
-source "drivers/pci/hotplug/Kconfig"
-
-endmenu
-
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
-source "fs/Kconfig"
-
-menu "iSeries device drivers"
-       depends on PPC_ISERIES
-
-config VIOCONS
-       tristate "iSeries Virtual Console Support"
-
-config VIODASD
-       tristate "iSeries Virtual I/O disk support"
-       help
-         If you are running on an iSeries system and you want to use
-         virtual disks created and managed by OS/400, say Y.
-
-config VIOCD
-       tristate "iSeries Virtual I/O CD support"
-       help
-         If you are running Linux on an IBM iSeries system and you want to
-         read a CD drive owned by OS/400, say Y here.
-
-config VIOTAPE
-       tristate "iSeries Virtual Tape Support"
-       help
-         If you are running Linux on an iSeries system and you want Linux
-         to read and/or write a tape drive owned by OS/400, say Y here.
-
-endmenu
-
-config VIOPATH
-       bool
-       depends on VIOCONS || VIODASD || VIOCD || VIOTAPE || VETH
-       default y
-
-source "arch/powerpc/oprofile/Kconfig"
-
-source "arch/ppc64/Kconfig.debug"
-
-source "security/Kconfig"
-
-config KEYS_COMPAT
-       bool
-       depends on COMPAT && KEYS
-       default y
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
index d0edea503c49e78af5cd849bcce5a24493ce18b9..e876c213f5ce5a1f59da331db1eee68db9b5c5dc 100644 (file)
@@ -2,44 +2,6 @@
 # Makefile for the linux ppc64 kernel.
 #
 
-ifneq ($(CONFIG_PPC_MERGE),y)
-
-EXTRA_CFLAGS   += -mno-minimal-toc
-extra-y                := head.o vmlinux.lds
-
-obj-y               := misc.o prom.o
-
-endif
-
-obj-y               += idle.o dma.o \
-                       align.o \
-                       iommu.o
-
-pci-obj-$(CONFIG_PPC_MULTIPLATFORM)    += pci_dn.o pci_direct_iommu.o
-
-obj-$(CONFIG_PCI)      += pci.o pci_iommu.o iomap.o $(pci-obj-y)
+obj-y               += idle.o align.o
 
 obj-$(CONFIG_PPC_MULTIPLATFORM) += nvram.o
-ifneq ($(CONFIG_PPC_MERGE),y)
-obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o
-endif
-
-obj-$(CONFIG_KEXEC)            += machine_kexec.o
-obj-$(CONFIG_MODULES)          += module.o
-ifneq ($(CONFIG_PPC_MERGE),y)
-obj-$(CONFIG_MODULES)          += ppc_ksyms.o
-endif
-obj-$(CONFIG_HVC_CONSOLE)      += hvconsole.o
-ifneq ($(CONFIG_PPC_MERGE),y)
-obj-$(CONFIG_BOOTX_TEXT)       += btext.o
-endif
-obj-$(CONFIG_HVCS)             += hvcserver.o
-
-obj-$(CONFIG_KPROBES)          += kprobes.o
-
-ifneq ($(CONFIG_PPC_MERGE),y)
-ifeq ($(CONFIG_PPC_ISERIES),y)
-arch/ppc64/kernel/head.o: arch/powerpc/kernel/lparmap.s
-AFLAGS_head.o += -Iarch/powerpc/kernel
-endif
-endif
diff --git a/arch/ppc64/kernel/asm-offsets.c b/arch/ppc64/kernel/asm-offsets.c
deleted file mode 100644 (file)
index 84ab5c1..0000000
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
- * This program is used to generate definitions needed by
- * assembly language modules.
- *
- * We use the technique used in the OSF Mach kernel code:
- * generate asm statements containing #defines,
- * compile this file to assembler, and then extract the
- * #defines from the assembly-language output.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/config.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/time.h>
-#include <linux/hardirq.h>
-#include <asm/io.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/processor.h>
-
-#include <asm/paca.h>
-#include <asm/lppaca.h>
-#include <asm/iseries/hv_lp_event.h>
-#include <asm/rtas.h>
-#include <asm/cputable.h>
-#include <asm/cache.h>
-#include <asm/systemcfg.h>
-#include <asm/compat.h>
-
-#define DEFINE(sym, val) \
-       asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
-
-int main(void)
-{
-       /* thread struct on stack */
-       DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
-       DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
-       DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror));
-
-       /* task_struct->thread */
-       DEFINE(THREAD, offsetof(struct task_struct, thread));
-       DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
-       DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode));
-       DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0]));
-       DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr));
-       DEFINE(KSP, offsetof(struct thread_struct, ksp));
-       DEFINE(KSP_VSID, offsetof(struct thread_struct, ksp_vsid));
-
-#ifdef CONFIG_ALTIVEC
-       DEFINE(THREAD_VR0, offsetof(struct thread_struct, vr[0]));
-       DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave));
-       DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr));
-       DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr));
-#endif /* CONFIG_ALTIVEC */
-       DEFINE(MM, offsetof(struct task_struct, mm));
-       DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context));
-
-       DEFINE(DCACHEL1LINESIZE, offsetof(struct ppc64_caches, dline_size));
-       DEFINE(DCACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_dline_size));
-       DEFINE(DCACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, dlines_per_page));
-       DEFINE(ICACHEL1LINESIZE, offsetof(struct ppc64_caches, iline_size));
-       DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size));
-       DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
-       DEFINE(PLATFORM_LPAR, PLATFORM_LPAR);
-
-       /* paca */
-        DEFINE(PACA_SIZE, sizeof(struct paca_struct));
-        DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index));
-        DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start));
-        DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack));
-       DEFINE(PACACURRENT, offsetof(struct paca_struct, __current));
-        DEFINE(PACASAVEDMSR, offsetof(struct paca_struct, saved_msr));
-        DEFINE(PACASTABREAL, offsetof(struct paca_struct, stab_real));
-        DEFINE(PACASTABVIRT, offsetof(struct paca_struct, stab_addr));
-       DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr));
-        DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1));
-       DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc));
-       DEFINE(PACAPROCENABLED, offsetof(struct paca_struct, proc_enabled));
-       DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
-       DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
-       DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
-#ifdef CONFIG_PPC_64K_PAGES
-       DEFINE(PACAPGDIR, offsetof(struct paca_struct, pgdir));
-#endif
-#ifdef CONFIG_HUGETLB_PAGE
-       DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas));
-       DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas));
-#endif /* CONFIG_HUGETLB_PAGE */
-       DEFINE(PACADEFAULTDECR, offsetof(struct paca_struct, default_decr));
-        DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen));
-        DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc));
-        DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb));
-        DEFINE(PACA_EXDSI, offsetof(struct paca_struct, exdsi));
-        DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
-       DEFINE(PACALPPACA, offsetof(struct paca_struct, lppaca));
-       DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
-       DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0));
-       DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1));
-       DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int));
-       DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int));
-
-       /* RTAS */
-       DEFINE(RTASBASE, offsetof(struct rtas_t, base));
-       DEFINE(RTASENTRY, offsetof(struct rtas_t, entry));
-
-       /* Interrupt register frame */
-       DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD);
-
-       DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
-
-       /* 288 = # of volatile regs, int & fp, for leaf routines */
-       /* which do not stack a frame.  See the PPC64 ABI.       */
-       DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 288);
-       /* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */
-       DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
-       DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
-       DEFINE(GPR0, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[0]));
-       DEFINE(GPR1, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[1]));
-       DEFINE(GPR2, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[2]));
-       DEFINE(GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[3]));
-       DEFINE(GPR4, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[4]));
-       DEFINE(GPR5, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[5]));
-       DEFINE(GPR6, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[6]));
-       DEFINE(GPR7, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[7]));
-       DEFINE(GPR8, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[8]));
-       DEFINE(GPR9, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[9]));
-       DEFINE(GPR10, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[10]));
-       DEFINE(GPR11, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[11]));
-       DEFINE(GPR12, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[12]));
-       DEFINE(GPR13, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[13]));
-       /*
-        * Note: these symbols include _ because they overlap with special
-        * register names
-        */
-       DEFINE(_NIP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, nip));
-       DEFINE(_MSR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, msr));
-       DEFINE(_CTR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ctr));
-       DEFINE(_LINK, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, link));
-       DEFINE(_CCR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ccr));
-       DEFINE(_XER, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, xer));
-       DEFINE(_DAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar));
-       DEFINE(_DSISR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
-       DEFINE(ORIG_GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, orig_gpr3));
-       DEFINE(RESULT, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, result));
-       DEFINE(_TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap));
-       DEFINE(SOFTE, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, softe));
-
-       /* These _only_ to be used with {PROM,RTAS}_FRAME_SIZE!!! */
-       DEFINE(_SRR0, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs));
-       DEFINE(_SRR1, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)+8);
-
-       DEFINE(CLONE_VM, CLONE_VM);
-       DEFINE(CLONE_UNTRACED, CLONE_UNTRACED);
-
-       /* About the CPU features table */
-       DEFINE(CPU_SPEC_ENTRY_SIZE, sizeof(struct cpu_spec));
-       DEFINE(CPU_SPEC_PVR_MASK, offsetof(struct cpu_spec, pvr_mask));
-       DEFINE(CPU_SPEC_PVR_VALUE, offsetof(struct cpu_spec, pvr_value));
-       DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features));
-       DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup));
-
-       /* systemcfg offsets for use by vdso */
-       DEFINE(CFG_TB_ORIG_STAMP, offsetof(struct systemcfg, tb_orig_stamp));
-       DEFINE(CFG_TB_TICKS_PER_SEC, offsetof(struct systemcfg, tb_ticks_per_sec));
-       DEFINE(CFG_TB_TO_XS, offsetof(struct systemcfg, tb_to_xs));
-       DEFINE(CFG_STAMP_XSEC, offsetof(struct systemcfg, stamp_xsec));
-       DEFINE(CFG_TB_UPDATE_COUNT, offsetof(struct systemcfg, tb_update_count));
-       DEFINE(CFG_TZ_MINUTEWEST, offsetof(struct systemcfg, tz_minuteswest));
-       DEFINE(CFG_TZ_DSTTIME, offsetof(struct systemcfg, tz_dsttime));
-       DEFINE(CFG_SYSCALL_MAP32, offsetof(struct systemcfg, syscall_map_32));
-       DEFINE(CFG_SYSCALL_MAP64, offsetof(struct systemcfg, syscall_map_64));
-
-       /* timeval/timezone offsets for use by vdso */
-       DEFINE(TVAL64_TV_SEC, offsetof(struct timeval, tv_sec));
-       DEFINE(TVAL64_TV_USEC, offsetof(struct timeval, tv_usec));
-       DEFINE(TVAL32_TV_SEC, offsetof(struct compat_timeval, tv_sec));
-       DEFINE(TVAL32_TV_USEC, offsetof(struct compat_timeval, tv_usec));
-       DEFINE(TZONE_TZ_MINWEST, offsetof(struct timezone, tz_minuteswest));
-       DEFINE(TZONE_TZ_DSTTIME, offsetof(struct timezone, tz_dsttime));
-
-       return 0;
-}
diff --git a/arch/ppc64/kernel/btext.c b/arch/ppc64/kernel/btext.c
deleted file mode 100644 (file)
index 506a378..0000000
+++ /dev/null
@@ -1,792 +0,0 @@
-/*
- * Procedures for drawing on the screen early on in the boot process.
- *
- * Benjamin Herrenschmidt <benh@kernel.crashing.org>
- */
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/init.h>
-
-#include <asm/sections.h>
-#include <asm/prom.h>
-#include <asm/btext.h>
-#include <asm/prom.h>
-#include <asm/page.h>
-#include <asm/mmu.h>
-#include <asm/pgtable.h>
-#include <asm/io.h>
-#include <asm/lmb.h>
-#include <asm/processor.h>
-#include <asm/udbg.h>
-
-#undef NO_SCROLL
-
-#ifndef NO_SCROLL
-static void scrollscreen(void);
-#endif
-
-static void draw_byte(unsigned char c, long locX, long locY);
-static void draw_byte_32(unsigned char *bits, unsigned int *base, int rb);
-static void draw_byte_16(unsigned char *bits, unsigned int *base, int rb);
-static void draw_byte_8(unsigned char *bits, unsigned int *base, int rb);
-
-static int g_loc_X;
-static int g_loc_Y;
-static int g_max_loc_X;
-static int g_max_loc_Y;
-
-static int dispDeviceRowBytes;
-static int dispDeviceDepth;
-static int dispDeviceRect[4];
-static unsigned char *dispDeviceBase, *logicalDisplayBase;
-
-unsigned long disp_BAT[2] __initdata = {0, 0};
-
-#define cmapsz (16*256)
-
-static unsigned char vga_font[cmapsz];
-
-int boot_text_mapped;
-int force_printk_to_btext = 0;
-
-
-/* Here's a small text engine to use during early boot
- * or for debugging purposes
- *
- * todo:
- *
- *  - build some kind of vgacon with it to enable early printk
- *  - move to a separate file
- *  - add a few video driver hooks to keep in sync with display
- *    changes.
- */
-
-void map_boot_text(void)
-{
-       unsigned long base, offset, size;
-       unsigned char *vbase;
-
-       /* By default, we are no longer mapped */
-       boot_text_mapped = 0;
-       if (dispDeviceBase == 0)
-               return;
-       base = ((unsigned long) dispDeviceBase) & 0xFFFFF000UL;
-       offset = ((unsigned long) dispDeviceBase) - base;
-       size = dispDeviceRowBytes * dispDeviceRect[3] + offset
-               + dispDeviceRect[0];
-       vbase = __ioremap(base, size, _PAGE_NO_CACHE);
-       if (vbase == 0)
-               return;
-       logicalDisplayBase = vbase + offset;
-       boot_text_mapped = 1;
-}
-
-int btext_initialize(struct device_node *np)
-{
-       unsigned int width, height, depth, pitch;
-       unsigned long address = 0;
-       u32 *prop;
-
-       prop = (u32 *)get_property(np, "width", NULL);
-       if (prop == NULL)
-               return -EINVAL;
-       width = *prop;
-       prop = (u32 *)get_property(np, "height", NULL);
-       if (prop == NULL)
-               return -EINVAL;
-       height = *prop;
-       prop = (u32 *)get_property(np, "depth", NULL);
-       if (prop == NULL)
-               return -EINVAL;
-       depth = *prop;
-       pitch = width * ((depth + 7) / 8);
-       prop = (u32 *)get_property(np, "linebytes", NULL);
-       if (prop)
-               pitch = *prop;
-       if (pitch == 1)
-               pitch = 0x1000;
-       prop = (u32 *)get_property(np, "address", NULL);
-       if (prop)
-               address = *prop;
-
-       /* FIXME: Add support for PCI reg properties */
-
-       if (address == 0)
-               return -EINVAL;
-
-       g_loc_X = 0;
-       g_loc_Y = 0;
-       g_max_loc_X = width / 8;
-       g_max_loc_Y = height / 16;
-       logicalDisplayBase = (unsigned char *)address;
-       dispDeviceBase = (unsigned char *)address;
-       dispDeviceRowBytes = pitch;
-       dispDeviceDepth = depth;
-       dispDeviceRect[0] = dispDeviceRect[1] = 0;
-       dispDeviceRect[2] = width;
-       dispDeviceRect[3] = height;
-
-       map_boot_text();
-
-       return 0;
-}
-
-static void btext_putc(unsigned char c)
-{
-       btext_drawchar(c);
-}
-
-void __init init_boot_display(void)
-{
-       char *name;
-       struct device_node *np = NULL; 
-       int rc = -ENODEV;
-
-       printk("trying to initialize btext ...\n");
-
-       name = (char *)get_property(of_chosen, "linux,stdout-path", NULL);
-       if (name != NULL) {
-               np = of_find_node_by_path(name);
-               if (np != NULL) {
-                       if (strcmp(np->type, "display") != 0) {
-                               printk("boot stdout isn't a display !\n");
-                               of_node_put(np);
-                               np = NULL;
-                       }
-               }
-       }
-       if (np)
-               rc = btext_initialize(np);
-       if (rc) {
-               for (np = NULL; (np = of_find_node_by_type(np, "display"));) {
-                       if (get_property(np, "linux,opened", NULL)) {
-                               printk("trying %s ...\n", np->full_name);
-                               rc = btext_initialize(np);
-                               printk("result: %d\n", rc);
-                       }
-                       if (rc == 0)
-                               break;
-               }
-       }
-       if (rc == 0 && udbg_putc == NULL)
-               udbg_putc = btext_putc;
-}
-
-
-/* Calc the base address of a given point (x,y) */
-static unsigned char * calc_base(int x, int y)
-{
-       unsigned char *base;
-
-       base = logicalDisplayBase;
-       if (base == 0)
-               base = dispDeviceBase;
-       base += (x + dispDeviceRect[0]) * (dispDeviceDepth >> 3);
-       base += (y + dispDeviceRect[1]) * dispDeviceRowBytes;
-       return base;
-}
-
-/* Adjust the display to a new resolution */
-void btext_update_display(unsigned long phys, int width, int height,
-                         int depth, int pitch)
-{
-       if (dispDeviceBase == 0)
-               return;
-
-       /* check it's the same frame buffer (within 256MB) */
-       if ((phys ^ (unsigned long)dispDeviceBase) & 0xf0000000)
-               return;
-
-       dispDeviceBase = (__u8 *) phys;
-       dispDeviceRect[0] = 0;
-       dispDeviceRect[1] = 0;
-       dispDeviceRect[2] = width;
-       dispDeviceRect[3] = height;
-       dispDeviceDepth = depth;
-       dispDeviceRowBytes = pitch;
-       if (boot_text_mapped) {
-               iounmap(logicalDisplayBase);
-               boot_text_mapped = 0;
-       }
-       map_boot_text();
-       g_loc_X = 0;
-       g_loc_Y = 0;
-       g_max_loc_X = width / 8;
-       g_max_loc_Y = height / 16;
-}
-
-void btext_clearscreen(void)
-{
-       unsigned long *base     = (unsigned long *)calc_base(0, 0);
-       unsigned long width     = ((dispDeviceRect[2] - dispDeviceRect[0]) *
-                                       (dispDeviceDepth >> 3)) >> 3;
-       int i,j;
-
-       for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1]); i++)
-       {
-               unsigned long *ptr = base;
-               for(j=width; j; --j)
-                       *(ptr++) = 0;
-               base += (dispDeviceRowBytes >> 3);
-       }
-}
-
-#ifndef NO_SCROLL
-static void scrollscreen(void)
-{
-       unsigned long *src      = (unsigned long *)calc_base(0,16);
-       unsigned long *dst      = (unsigned long *)calc_base(0,0);
-       unsigned long width     = ((dispDeviceRect[2] - dispDeviceRect[0]) *
-                                  (dispDeviceDepth >> 3)) >> 3;
-       int i,j;
-
-       for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1] - 16); i++)
-       {
-               unsigned long *src_ptr = src;
-               unsigned long *dst_ptr = dst;
-               for(j=width; j; --j)
-                       *(dst_ptr++) = *(src_ptr++);
-               src += (dispDeviceRowBytes >> 3);
-               dst += (dispDeviceRowBytes >> 3);
-       }
-       for (i=0; i<16; i++)
-       {
-               unsigned long *dst_ptr = dst;
-               for(j=width; j; --j)
-                       *(dst_ptr++) = 0;
-               dst += (dispDeviceRowBytes >> 3);
-       }
-}
-#endif /* ndef NO_SCROLL */
-
-void btext_drawchar(char c)
-{
-       int cline = 0;
-#ifdef NO_SCROLL
-       int x;
-#endif
-       if (!boot_text_mapped)
-               return;
-
-       switch (c) {
-       case '\b':
-               if (g_loc_X > 0)
-                       --g_loc_X;
-               break;
-       case '\t':
-               g_loc_X = (g_loc_X & -8) + 8;
-               break;
-       case '\r':
-               g_loc_X = 0;
-               break;
-       case '\n':
-               g_loc_X = 0;
-               g_loc_Y++;
-               cline = 1;
-               break;
-       default:
-               draw_byte(c, g_loc_X++, g_loc_Y);
-       }
-       if (g_loc_X >= g_max_loc_X) {
-               g_loc_X = 0;
-               g_loc_Y++;
-               cline = 1;
-       }
-#ifndef NO_SCROLL
-       while (g_loc_Y >= g_max_loc_Y) {
-               scrollscreen();
-               g_loc_Y--;
-       }
-#else
-       /* wrap around from bottom to top of screen so we don't
-          waste time scrolling each line.  -- paulus. */
-       if (g_loc_Y >= g_max_loc_Y)
-               g_loc_Y = 0;
-       if (cline) {
-               for (x = 0; x < g_max_loc_X; ++x)
-                       draw_byte(' ', x, g_loc_Y);
-       }
-#endif
-}
-
-void btext_drawstring(const char *c)
-{
-       if (!boot_text_mapped)
-               return;
-       while (*c)
-               btext_drawchar(*c++);
-}
-
-void btext_drawhex(unsigned long v)
-{
-       char *hex_table = "0123456789abcdef";
-
-       if (!boot_text_mapped)
-               return;
-       btext_drawchar(hex_table[(v >> 60) & 0x0000000FUL]);
-       btext_drawchar(hex_table[(v >> 56) & 0x0000000FUL]);
-       btext_drawchar(hex_table[(v >> 52) & 0x0000000FUL]);
-       btext_drawchar(hex_table[(v >> 48) & 0x0000000FUL]);
-       btext_drawchar(hex_table[(v >> 44) & 0x0000000FUL]);
-       btext_drawchar(hex_table[(v >> 40) & 0x0000000FUL]);
-       btext_drawchar(hex_table[(v >> 36) & 0x0000000FUL]);
-       btext_drawchar(hex_table[(v >> 32) & 0x0000000FUL]);
-       btext_drawchar(hex_table[(v >> 28) & 0x0000000FUL]);
-       btext_drawchar(hex_table[(v >> 24) & 0x0000000FUL]);
-       btext_drawchar(hex_table[(v >> 20) & 0x0000000FUL]);
-       btext_drawchar(hex_table[(v >> 16) & 0x0000000FUL]);
-       btext_drawchar(hex_table[(v >> 12) & 0x0000000FUL]);
-       btext_drawchar(hex_table[(v >>  8) & 0x0000000FUL]);
-       btext_drawchar(hex_table[(v >>  4) & 0x0000000FUL]);
-       btext_drawchar(hex_table[(v >>  0) & 0x0000000FUL]);
-       btext_drawchar(' ');
-}
-
-static void draw_byte(unsigned char c, long locX, long locY)
-{
-       unsigned char *base     = calc_base(locX << 3, locY << 4);
-       unsigned char *font     = &vga_font[((unsigned int)c) * 16];
-       int rb                  = dispDeviceRowBytes;
-
-       switch(dispDeviceDepth) {
-       case 24:
-       case 32:
-               draw_byte_32(font, (unsigned int *)base, rb);
-               break;
-       case 15:
-       case 16:
-               draw_byte_16(font, (unsigned int *)base, rb);
-               break;
-       case 8:
-               draw_byte_8(font, (unsigned int *)base, rb);
-               break;
-       }
-}
-
-static unsigned int expand_bits_8[16] = {
-       0x00000000,
-       0x000000ff,
-       0x0000ff00,
-       0x0000ffff,
-       0x00ff0000,
-       0x00ff00ff,
-       0x00ffff00,
-       0x00ffffff,
-       0xff000000,
-       0xff0000ff,
-       0xff00ff00,
-       0xff00ffff,
-       0xffff0000,
-       0xffff00ff,
-       0xffffff00,
-       0xffffffff
-};
-
-static unsigned int expand_bits_16[4] = {
-       0x00000000,
-       0x0000ffff,
-       0xffff0000,
-       0xffffffff
-};
-
-
-static void draw_byte_32(unsigned char *font, unsigned int *base, int rb)
-{
-       int l, bits;
-       int fg = 0xFFFFFFFFUL;
-       int bg = 0x00000000UL;
-
-       for (l = 0; l < 16; ++l)
-       {
-               bits = *font++;
-               base[0] = (-(bits >> 7) & fg) ^ bg;
-               base[1] = (-((bits >> 6) & 1) & fg) ^ bg;
-               base[2] = (-((bits >> 5) & 1) & fg) ^ bg;
-               base[3] = (-((bits >> 4) & 1) & fg) ^ bg;
-               base[4] = (-((bits >> 3) & 1) & fg) ^ bg;
-               base[5] = (-((bits >> 2) & 1) & fg) ^ bg;
-               base[6] = (-((bits >> 1) & 1) & fg) ^ bg;
-               base[7] = (-(bits & 1) & fg) ^ bg;
-               base = (unsigned int *) ((char *)base + rb);
-       }
-}
-
-static void draw_byte_16(unsigned char *font, unsigned int *base, int rb)
-{
-       int l, bits;
-       int fg = 0xFFFFFFFFUL;
-       int bg = 0x00000000UL;
-       unsigned int *eb = (int *)expand_bits_16;
-
-       for (l = 0; l < 16; ++l)
-       {
-               bits = *font++;
-               base[0] = (eb[bits >> 6] & fg) ^ bg;
-               base[1] = (eb[(bits >> 4) & 3] & fg) ^ bg;
-               base[2] = (eb[(bits >> 2) & 3] & fg) ^ bg;
-               base[3] = (eb[bits & 3] & fg) ^ bg;
-               base = (unsigned int *) ((char *)base + rb);
-       }
-}
-
-static void draw_byte_8(unsigned char *font, unsigned int *base, int rb)
-{
-       int l, bits;
-       int fg = 0x0F0F0F0FUL;
-       int bg = 0x00000000UL;
-       unsigned int *eb = (int *)expand_bits_8;
-
-       for (l = 0; l < 16; ++l)
-       {
-               bits = *font++;
-               base[0] = (eb[bits >> 4] & fg) ^ bg;
-               base[1] = (eb[bits & 0xf] & fg) ^ bg;
-               base = (unsigned int *) ((char *)base + rb);
-       }
-}
-
-static unsigned char vga_font[cmapsz] = {
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x81, 0xa5, 0x81, 0x81, 0xbd,
-0x99, 0x81, 0x81, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xff,
-0xdb, 0xff, 0xff, 0xc3, 0xe7, 0xff, 0xff, 0x7e, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x6c, 0xfe, 0xfe, 0xfe, 0xfe, 0x7c, 0x38, 0x10,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x7c, 0xfe,
-0x7c, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18,
-0x3c, 0x3c, 0xe7, 0xe7, 0xe7, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x18, 0x3c, 0x7e, 0xff, 0xff, 0x7e, 0x18, 0x18, 0x3c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c,
-0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xe7, 0xc3, 0xc3, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x42, 0x42, 0x66, 0x3c, 0x00,
-0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc3, 0x99, 0xbd,
-0xbd, 0x99, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x1e, 0x0e,
-0x1a, 0x32, 0x78, 0xcc, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x3c, 0x66, 0x66, 0x66, 0x66, 0x3c, 0x18, 0x7e, 0x18, 0x18,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x33, 0x3f, 0x30, 0x30, 0x30,
-0x30, 0x70, 0xf0, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0x63,
-0x7f, 0x63, 0x63, 0x63, 0x63, 0x67, 0xe7, 0xe6, 0xc0, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x18, 0x18, 0xdb, 0x3c, 0xe7, 0x3c, 0xdb, 0x18, 0x18,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfe, 0xf8,
-0xf0, 0xe0, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x06, 0x0e,
-0x1e, 0x3e, 0xfe, 0x3e, 0x1e, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
-0x66, 0x00, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xdb,
-0xdb, 0xdb, 0x7b, 0x1b, 0x1b, 0x1b, 0x1b, 0x1b, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x7c, 0xc6, 0x60, 0x38, 0x6c, 0xc6, 0xc6, 0x6c, 0x38, 0x0c, 0xc6,
-0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0xfe, 0xfe, 0xfe, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c,
-0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x7e, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x18, 0x0c, 0xfe, 0x0c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x60, 0xfe, 0x60, 0x30, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xc0,
-0xc0, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x24, 0x66, 0xff, 0x66, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x38, 0x7c, 0x7c, 0xfe, 0xfe, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xfe, 0x7c, 0x7c,
-0x38, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x24, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6c,
-0x6c, 0xfe, 0x6c, 0x6c, 0x6c, 0xfe, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00,
-0x18, 0x18, 0x7c, 0xc6, 0xc2, 0xc0, 0x7c, 0x06, 0x06, 0x86, 0xc6, 0x7c,
-0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc2, 0xc6, 0x0c, 0x18,
-0x30, 0x60, 0xc6, 0x86, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c,
-0x6c, 0x38, 0x76, 0xdc, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x30, 0x30, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x30, 0x30, 0x30,
-0x30, 0x30, 0x18, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x18,
-0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x3c, 0xff, 0x3c, 0x66, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e,
-0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x02, 0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xce, 0xde, 0xf6, 0xe6, 0xc6, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x38, 0x78, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
-0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x7c, 0xc6, 0x06, 0x06, 0x3c, 0x06, 0x06, 0x06, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x1c, 0x3c, 0x6c, 0xcc, 0xfe,
-0x0c, 0x0c, 0x0c, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0,
-0xc0, 0xc0, 0xfc, 0x06, 0x06, 0x06, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x38, 0x60, 0xc0, 0xc0, 0xfc, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0x06, 0x06, 0x0c, 0x18,
-0x30, 0x30, 0x30, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
-0xc6, 0xc6, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x06, 0x06, 0x0c, 0x78,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00,
-0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x18, 0x18, 0x00, 0x00, 0x00, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x06,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00,
-0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60,
-0x30, 0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x7c, 0xc6, 0xc6, 0x0c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xde, 0xde,
-0xde, 0xdc, 0xc0, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38,
-0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x66, 0x66, 0x66, 0x66, 0xfc,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0xc2, 0xc0, 0xc0, 0xc0,
-0xc0, 0xc2, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x6c,
-0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x6c, 0xf8, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68, 0x60, 0x62, 0x66, 0xfe,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68,
-0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66,
-0xc2, 0xc0, 0xc0, 0xde, 0xc6, 0xc6, 0x66, 0x3a, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x0c,
-0x0c, 0x0c, 0x0c, 0x0c, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xe6, 0x66, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0x66, 0xe6,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x60, 0x60, 0x60, 0x60, 0x60,
-0x60, 0x62, 0x66, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xe7,
-0xff, 0xff, 0xdb, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6, 0xc6,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
-0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66,
-0x66, 0x66, 0x7c, 0x60, 0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xd6, 0xde, 0x7c,
-0x0c, 0x0e, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x6c,
-0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
-0xc6, 0x60, 0x38, 0x0c, 0x06, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xff, 0xdb, 0x99, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
-0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3,
-0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x66,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x18,
-0x3c, 0x66, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3,
-0xc3, 0x66, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xff, 0xc3, 0x86, 0x0c, 0x18, 0x30, 0x60, 0xc1, 0xc3, 0xff,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x30, 0x30, 0x30, 0x30, 0x30,
-0x30, 0x30, 0x30, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
-0xc0, 0xe0, 0x70, 0x38, 0x1c, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x3c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x3c,
-0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00,
-0x30, 0x30, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x0c, 0x7c,
-0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x60,
-0x60, 0x78, 0x6c, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc0, 0xc0, 0xc0, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x0c, 0x0c, 0x3c, 0x6c, 0xcc,
-0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xf0,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xcc, 0xcc,
-0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0xcc, 0x78, 0x00, 0x00, 0x00, 0xe0, 0x60,
-0x60, 0x6c, 0x76, 0x66, 0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x18, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x06, 0x00, 0x0e, 0x06, 0x06,
-0x06, 0x06, 0x06, 0x06, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0xe0, 0x60,
-0x60, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe6, 0xff, 0xdb,
-0xdb, 0xdb, 0xdb, 0xdb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x66, 0x66,
-0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x76, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0x0c, 0x1e, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x76, 0x66, 0x60, 0x60, 0x60, 0xf0,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0x60,
-0x38, 0x0c, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x30,
-0x30, 0xfc, 0x30, 0x30, 0x30, 0x30, 0x36, 0x1c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0xc3,
-0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0x3c, 0x66, 0xc3,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6,
-0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0xfe, 0xcc, 0x18, 0x30, 0x60, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x0e, 0x18, 0x18, 0x18, 0x70, 0x18, 0x18, 0x18, 0x18, 0x0e,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x00, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x18,
-0x18, 0x18, 0x0e, 0x18, 0x18, 0x18, 0x18, 0x70, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6,
-0xc6, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66,
-0xc2, 0xc0, 0xc0, 0xc0, 0xc2, 0x66, 0x3c, 0x0c, 0x06, 0x7c, 0x00, 0x00,
-0x00, 0x00, 0xcc, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x00, 0x7c, 0xc6, 0xfe,
-0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c,
-0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xcc, 0x00, 0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0x78, 0x0c, 0x7c,
-0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38,
-0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x60, 0x60, 0x66, 0x3c, 0x0c, 0x06,
-0x3c, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xfe,
-0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00,
-0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x60, 0x30, 0x18, 0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x00, 0x00, 0x38, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c, 0x66,
-0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x60, 0x30, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0xc6,
-0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38, 0x00,
-0x38, 0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
-0x18, 0x30, 0x60, 0x00, 0xfe, 0x66, 0x60, 0x7c, 0x60, 0x60, 0x66, 0xfe,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x3b, 0x1b,
-0x7e, 0xd8, 0xdc, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x6c,
-0xcc, 0xcc, 0xfe, 0xcc, 0xcc, 0xcc, 0xcc, 0xce, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x00, 0x7c, 0xc6, 0xc6,
-0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18,
-0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x30, 0x78, 0xcc, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0xcc, 0xcc, 0xcc,
-0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00,
-0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0x78, 0x00,
-0x00, 0xc6, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
-0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e,
-0xc3, 0xc0, 0xc0, 0xc0, 0xc3, 0x7e, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xe6, 0xfc,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0xff, 0x18,
-0xff, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66,
-0x7c, 0x62, 0x66, 0x6f, 0x66, 0x66, 0x66, 0xf3, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x0e, 0x1b, 0x18, 0x18, 0x18, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18,
-0xd8, 0x70, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0x78, 0x0c, 0x7c,
-0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30,
-0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x18, 0x30, 0x60, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0xcc, 0xcc, 0xcc,
-0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc,
-0x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
-0x76, 0xdc, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x6c, 0x6c, 0x3e, 0x00, 0x7e, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c,
-0x38, 0x00, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x30, 0x30, 0x00, 0x30, 0x30, 0x60, 0xc0, 0xc6, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0,
-0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xfe, 0x06, 0x06, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30, 0x60, 0xce, 0x9b, 0x06,
-0x0c, 0x1f, 0x00, 0x00, 0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30,
-0x66, 0xce, 0x96, 0x3e, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18,
-0x00, 0x18, 0x18, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x6c, 0xd8, 0x6c, 0x36, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x6c, 0x36,
-0x6c, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x44, 0x11, 0x44,
-0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44,
-0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa,
-0x55, 0xaa, 0x55, 0xaa, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77,
-0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x36, 0x36, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x18, 0xf8,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36,
-0x36, 0xf6, 0x06, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x06, 0xf6,
-0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
-0x36, 0xf6, 0x06, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xfe, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0xf8, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xff,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x37,
-0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x37, 0x30, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xf7, 0x00, 0xff,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0xff, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x36, 0x36, 0x36, 0x36, 0x37, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0xff,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36,
-0x36, 0xf7, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
-0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xff,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0xff, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x3f,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x1f, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f,
-0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x36, 0x36, 0xff, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
-0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x1f, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xf0, 0xf0, 0xf0,
-0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,
-0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
-0x0f, 0x0f, 0x0f, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x76, 0xdc, 0xd8, 0xd8, 0xd8, 0xdc, 0x76, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x78, 0xcc, 0xcc, 0xcc, 0xd8, 0xcc, 0xc6, 0xc6, 0xc6, 0xcc,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0xc6, 0xc0, 0xc0, 0xc0,
-0xc0, 0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0xfe, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0xfe, 0xc6, 0x60, 0x30, 0x18, 0x30, 0x60, 0xc6, 0xfe,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xd8, 0xd8,
-0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x66, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xc0, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x18, 0x3c, 0x66, 0x66,
-0x66, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38,
-0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0x6c, 0x38, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x38, 0x6c, 0xc6, 0xc6, 0xc6, 0x6c, 0x6c, 0x6c, 0x6c, 0xee,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x30, 0x18, 0x0c, 0x3e, 0x66,
-0x66, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x7e, 0xdb, 0xdb, 0xdb, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x03, 0x06, 0x7e, 0xdb, 0xdb, 0xf3, 0x7e, 0x60, 0xc0,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x30, 0x60, 0x60, 0x7c, 0x60,
-0x60, 0x60, 0x30, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c,
-0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e, 0x18,
-0x18, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30,
-0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x00, 0x7e,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x1b, 0x1b, 0x1b, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x7e, 0x00, 0x18, 0x18, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x00,
-0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c,
-0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x0c, 0x0c,
-0x0c, 0x0c, 0x0c, 0xec, 0x6c, 0x6c, 0x3c, 0x1c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0xd8, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0xd8, 0x30, 0x60, 0xc8, 0xf8, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00,
-};
diff --git a/arch/ppc64/kernel/dma.c b/arch/ppc64/kernel/dma.c
deleted file mode 100644 (file)
index 7c34196..0000000
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * Copyright (C) 2004 IBM Corporation
- *
- * Implements the generic device dma API for ppc64. Handles
- * the pci and vio busses
- */
-
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-/* Include the busses we support */
-#include <linux/pci.h>
-#include <asm/vio.h>
-#include <asm/scatterlist.h>
-#include <asm/bug.h>
-
-static struct dma_mapping_ops *get_dma_ops(struct device *dev)
-{
-#ifdef CONFIG_PCI
-       if (dev->bus == &pci_bus_type)
-               return &pci_dma_ops;
-#endif
-#ifdef CONFIG_IBMVIO
-       if (dev->bus == &vio_bus_type)
-               return &vio_dma_ops;
-#endif
-       return NULL;
-}
-
-int dma_supported(struct device *dev, u64 mask)
-{
-       struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
-       if (dma_ops)
-               return dma_ops->dma_supported(dev, mask);
-       BUG();
-       return 0;
-}
-EXPORT_SYMBOL(dma_supported);
-
-int dma_set_mask(struct device *dev, u64 dma_mask)
-{
-#ifdef CONFIG_PCI
-       if (dev->bus == &pci_bus_type)
-               return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
-#endif
-#ifdef CONFIG_IBMVIO
-       if (dev->bus == &vio_bus_type)
-               return -EIO;
-#endif /* CONFIG_IBMVIO */
-       BUG();
-       return 0;
-}
-EXPORT_SYMBOL(dma_set_mask);
-
-void *dma_alloc_coherent(struct device *dev, size_t size,
-               dma_addr_t *dma_handle, gfp_t flag)
-{
-       struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
-       if (dma_ops)
-               return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
-       BUG();
-       return NULL;
-}
-EXPORT_SYMBOL(dma_alloc_coherent);
-
-void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
-               dma_addr_t dma_handle)
-{
-       struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
-       if (dma_ops)
-               dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
-       else
-               BUG();
-}
-EXPORT_SYMBOL(dma_free_coherent);
-
-dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size,
-               enum dma_data_direction direction)
-{
-       struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
-       if (dma_ops)
-               return dma_ops->map_single(dev, cpu_addr, size, direction);
-       BUG();
-       return (dma_addr_t)0;
-}
-EXPORT_SYMBOL(dma_map_single);
-
-void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
-               enum dma_data_direction direction)
-{
-       struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
-       if (dma_ops)
-               dma_ops->unmap_single(dev, dma_addr, size, direction);
-       else
-               BUG();
-}
-EXPORT_SYMBOL(dma_unmap_single);
-
-dma_addr_t dma_map_page(struct device *dev, struct page *page,
-               unsigned long offset, size_t size,
-               enum dma_data_direction direction)
-{
-       struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
-       if (dma_ops)
-               return dma_ops->map_single(dev,
-                               (page_address(page) + offset), size, direction);
-       BUG();
-       return (dma_addr_t)0;
-}
-EXPORT_SYMBOL(dma_map_page);
-
-void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
-               enum dma_data_direction direction)
-{
-       struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
-       if (dma_ops)
-               dma_ops->unmap_single(dev, dma_address, size, direction);
-       else
-               BUG();
-}
-EXPORT_SYMBOL(dma_unmap_page);
-
-int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
-               enum dma_data_direction direction)
-{
-       struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
-       if (dma_ops)
-               return dma_ops->map_sg(dev, sg, nents, direction);
-       BUG();
-       return 0;
-}
-EXPORT_SYMBOL(dma_map_sg);
-
-void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
-               enum dma_data_direction direction)
-{
-       struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
-       if (dma_ops)
-               dma_ops->unmap_sg(dev, sg, nhwentries, direction);
-       else
-               BUG();
-}
-EXPORT_SYMBOL(dma_unmap_sg);
diff --git a/arch/ppc64/kernel/head.S b/arch/ppc64/kernel/head.S
deleted file mode 100644 (file)
index 1c869ea..0000000
+++ /dev/null
@@ -1,2007 +0,0 @@
-/*
- *  arch/ppc64/kernel/head.S
- *
- *  PowerPC version
- *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
- *
- *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
- *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
- *  Adapted for Power Macintosh by Paul Mackerras.
- *  Low-level exception handlers and MMU support
- *  rewritten by Paul Mackerras.
- *    Copyright (C) 1996 Paul Mackerras.
- *
- *  Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
- *    Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
- *
- *  This file contains the low-level support and setup for the
- *  PowerPC-64 platform, including trap and interrupt dispatch.
- *
- *  This program is free software; you can redistribute it and/or
- *  modify it under the terms of the GNU General Public License
- *  as published by the Free Software Foundation; either version
- *  2 of the License, or (at your option) any later version.
- */
-
-#include <linux/config.h>
-#include <linux/threads.h>
-#include <asm/processor.h>
-#include <asm/page.h>
-#include <asm/mmu.h>
-#include <asm/ppc_asm.h>
-#include <asm/asm-offsets.h>
-#include <asm/bug.h>
-#include <asm/cputable.h>
-#include <asm/setup.h>
-#include <asm/hvcall.h>
-#include <asm/iseries/lpar_map.h>
-#include <asm/thread_info.h>
-
-#ifdef CONFIG_PPC_ISERIES
-#define DO_SOFT_DISABLE
-#endif
-
-/*
- * We layout physical memory as follows:
- * 0x0000 - 0x00ff : Secondary processor spin code
- * 0x0100 - 0x2fff : pSeries Interrupt prologs
- * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
- * 0x6000 - 0x6fff : Initial (CPU0) segment table
- * 0x7000 - 0x7fff : FWNMI data area
- * 0x8000 -        : Early init and support code
- */
-
-/*
- *   SPRG Usage
- *
- *   Register  Definition
- *
- *   SPRG0     reserved for hypervisor
- *   SPRG1     temp - used to save gpr
- *   SPRG2     temp - used to save gpr
- *   SPRG3     virt addr of paca
- */
-
-/*
- * Entering into this code we make the following assumptions:
- *  For pSeries:
- *   1. The MMU is off & open firmware is running in real mode.
- *   2. The kernel is entered at __start
- *
- *  For iSeries:
- *   1. The MMU is on (as it always is for iSeries)
- *   2. The kernel is entered at system_reset_iSeries
- */
-
-       .text
-       .globl  _stext
-_stext:
-#ifdef CONFIG_PPC_MULTIPLATFORM
-_GLOBAL(__start)
-       /* NOP this out unconditionally */
-BEGIN_FTR_SECTION
-       b       .__start_initialization_multiplatform
-END_FTR_SECTION(0, 1)
-#endif /* CONFIG_PPC_MULTIPLATFORM */
-
-       /* Catch branch to 0 in real mode */
-       trap
-
-#ifdef CONFIG_PPC_ISERIES
-       /*
-        * At offset 0x20, there is a pointer to iSeries LPAR data.
-        * This is required by the hypervisor
-        */
-       . = 0x20
-       .llong hvReleaseData-KERNELBASE
-
-       /*
-        * At offset 0x28 and 0x30 are offsets to the mschunks_map
-        * array (used by the iSeries LPAR debugger to do translation
-        * between physical addresses and absolute addresses) and
-        * to the pidhash table (also used by the debugger)
-        */
-       .llong mschunks_map-KERNELBASE
-       .llong 0        /* pidhash-KERNELBASE SFRXXX */
-
-       /* Offset 0x38 - Pointer to start of embedded System.map */
-       .globl  embedded_sysmap_start
-embedded_sysmap_start:
-       .llong  0
-       /* Offset 0x40 - Pointer to end of embedded System.map */
-       .globl  embedded_sysmap_end
-embedded_sysmap_end:
-       .llong  0
-
-#endif /* CONFIG_PPC_ISERIES */
-
-       /* Secondary processors spin on this value until it goes to 1. */
-       .globl  __secondary_hold_spinloop
-__secondary_hold_spinloop:
-       .llong  0x0
-
-       /* Secondary processors write this value with their cpu # */
-       /* after they enter the spin loop immediately below.      */
-       .globl  __secondary_hold_acknowledge
-__secondary_hold_acknowledge:
-       .llong  0x0
-
-       . = 0x60
-/*
- * The following code is used on pSeries to hold secondary processors
- * in a spin loop after they have been freed from OpenFirmware, but
- * before the bulk of the kernel has been relocated.  This code
- * is relocated to physical address 0x60 before prom_init is run.
- * All of it must fit below the first exception vector at 0x100.
- */
-_GLOBAL(__secondary_hold)
-       mfmsr   r24
-       ori     r24,r24,MSR_RI
-       mtmsrd  r24                     /* RI on */
-
-       /* Grab our linux cpu number */
-       mr      r24,r3
-
-       /* Tell the master cpu we're here */
-       /* Relocation is off & we are located at an address less */
-       /* than 0x100, so only need to grab low order offset.    */
-       std     r24,__secondary_hold_acknowledge@l(0)
-       sync
-
-       /* All secondary cpus wait here until told to start. */
-100:   ld      r4,__secondary_hold_spinloop@l(0)
-       cmpdi   0,r4,1
-       bne     100b
-
-#ifdef CONFIG_HMT
-       b       .hmt_init
-#else
-#ifdef CONFIG_SMP
-       mr      r3,r24
-       b       .pSeries_secondary_smp_init
-#else
-       BUG_OPCODE
-#endif
-#endif
-
-/* This value is used to mark exception frames on the stack. */
-       .section ".toc","aw"
-exception_marker:
-       .tc     ID_72656773_68657265[TC],0x7265677368657265
-       .text
-
-/*
- * The following macros define the code that appears as
- * the prologue to each of the exception handlers.  They
- * are split into two parts to allow a single kernel binary
- * to be used for pSeries and iSeries.
- * LOL.  One day... - paulus
- */
-
-/*
- * We make as much of the exception code common between native
- * exception handlers (including pSeries LPAR) and iSeries LPAR
- * implementations as possible.
- */
-
-/*
- * This is the start of the interrupt handlers for pSeries
- * This code runs with relocation off.
- */
-#define EX_R9          0
-#define EX_R10         8
-#define EX_R11         16
-#define EX_R12         24
-#define EX_R13         32
-#define EX_SRR0                40
-#define EX_DAR         48
-#define EX_DSISR       56
-#define EX_CCR         60
-#define EX_R3          64
-#define EX_LR          72
-
-#define EXCEPTION_PROLOG_PSERIES(area, label)                          \
-       mfspr   r13,SPRN_SPRG3;         /* get paca address into r13 */ \
-       std     r9,area+EX_R9(r13);     /* save r9 - r12 */             \
-       std     r10,area+EX_R10(r13);                                   \
-       std     r11,area+EX_R11(r13);                                   \
-       std     r12,area+EX_R12(r13);                                   \
-       mfspr   r9,SPRN_SPRG1;                                          \
-       std     r9,area+EX_R13(r13);                                    \
-       mfcr    r9;                                                     \
-       clrrdi  r12,r13,32;             /* get high part of &label */   \
-       mfmsr   r10;                                                    \
-       mfspr   r11,SPRN_SRR0;          /* save SRR0 */                 \
-       ori     r12,r12,(label)@l;      /* virt addr of handler */      \
-       ori     r10,r10,MSR_IR|MSR_DR|MSR_RI;                           \
-       mtspr   SPRN_SRR0,r12;                                          \
-       mfspr   r12,SPRN_SRR1;          /* and SRR1 */                  \
-       mtspr   SPRN_SRR1,r10;                                          \
-       rfid;                                                           \
-       b       .       /* prevent speculative execution */
-
-/*
- * This is the start of the interrupt handlers for iSeries
- * This code runs with relocation on.
- */
-#define EXCEPTION_PROLOG_ISERIES_1(area)                               \
-       mfspr   r13,SPRN_SPRG3;         /* get paca address into r13 */ \
-       std     r9,area+EX_R9(r13);     /* save r9 - r12 */             \
-       std     r10,area+EX_R10(r13);                                   \
-       std     r11,area+EX_R11(r13);                                   \
-       std     r12,area+EX_R12(r13);                                   \
-       mfspr   r9,SPRN_SPRG1;                                          \
-       std     r9,area+EX_R13(r13);                                    \
-       mfcr    r9
-
-#define EXCEPTION_PROLOG_ISERIES_2                                     \
-       mfmsr   r10;                                                    \
-       ld      r11,PACALPPACA+LPPACASRR0(r13);                         \
-       ld      r12,PACALPPACA+LPPACASRR1(r13);                         \
-       ori     r10,r10,MSR_RI;                                         \
-       mtmsrd  r10,1
-
-/*
- * The common exception prolog is used for all except a few exceptions
- * such as a segment miss on a kernel address.  We have to be prepared
- * to take another exception from the point where we first touch the
- * kernel stack onwards.
- *
- * On entry r13 points to the paca, r9-r13 are saved in the paca,
- * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
- * SRR1, and relocation is on.
- */
-#define EXCEPTION_PROLOG_COMMON(n, area)                                  \
-       andi.   r10,r12,MSR_PR;         /* See if coming from user      */ \
-       mr      r10,r1;                 /* Save r1                      */ \
-       subi    r1,r1,INT_FRAME_SIZE;   /* alloc frame on kernel stack  */ \
-       beq-    1f;                                                        \
-       ld      r1,PACAKSAVE(r13);      /* kernel stack to use          */ \
-1:     cmpdi   cr1,r1,0;               /* check if r1 is in userspace  */ \
-       bge-    cr1,bad_stack;          /* abort if it is               */ \
-       std     r9,_CCR(r1);            /* save CR in stackframe        */ \
-       std     r11,_NIP(r1);           /* save SRR0 in stackframe      */ \
-       std     r12,_MSR(r1);           /* save SRR1 in stackframe      */ \
-       std     r10,0(r1);              /* make stack chain pointer     */ \
-       std     r0,GPR0(r1);            /* save r0 in stackframe        */ \
-       std     r10,GPR1(r1);           /* save r1 in stackframe        */ \
-       std     r2,GPR2(r1);            /* save r2 in stackframe        */ \
-       SAVE_4GPRS(3, r1);              /* save r3 - r6 in stackframe   */ \
-       SAVE_2GPRS(7, r1);              /* save r7, r8 in stackframe    */ \
-       ld      r9,area+EX_R9(r13);     /* move r9, r10 to stackframe   */ \
-       ld      r10,area+EX_R10(r13);                                      \
-       std     r9,GPR9(r1);                                               \
-       std     r10,GPR10(r1);                                             \
-       ld      r9,area+EX_R11(r13);    /* move r11 - r13 to stackframe */ \
-       ld      r10,area+EX_R12(r13);                                      \
-       ld      r11,area+EX_R13(r13);                                      \
-       std     r9,GPR11(r1);                                              \
-       std     r10,GPR12(r1);                                             \
-       std     r11,GPR13(r1);                                             \
-       ld      r2,PACATOC(r13);        /* get kernel TOC into r2       */ \
-       mflr    r9;                     /* save LR in stackframe        */ \
-       std     r9,_LINK(r1);                                              \
-       mfctr   r10;                    /* save CTR in stackframe       */ \
-       std     r10,_CTR(r1);                                              \
-       mfspr   r11,SPRN_XER;           /* save XER in stackframe       */ \
-       std     r11,_XER(r1);                                              \
-       li      r9,(n)+1;                                                  \
-       std     r9,_TRAP(r1);           /* set trap number              */ \
-       li      r10,0;                                                     \
-       ld      r11,exception_marker@toc(r2);                              \
-       std     r10,RESULT(r1);         /* clear regs->result           */ \
-       std     r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame      */
-
-/*
- * Exception vectors.
- */
-#define STD_EXCEPTION_PSERIES(n, label)                        \
-       . = n;                                          \
-       .globl label##_pSeries;                         \
-label##_pSeries:                                       \
-       HMT_MEDIUM;                                     \
-       mtspr   SPRN_SPRG1,r13;         /* save r13 */  \
-       RUNLATCH_ON(r13);                               \
-       EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
-
-#define STD_EXCEPTION_ISERIES(n, label, area)          \
-       .globl label##_iSeries;                         \
-label##_iSeries:                                       \
-       HMT_MEDIUM;                                     \
-       mtspr   SPRN_SPRG1,r13;         /* save r13 */  \
-       RUNLATCH_ON(r13);                               \
-       EXCEPTION_PROLOG_ISERIES_1(area);               \
-       EXCEPTION_PROLOG_ISERIES_2;                     \
-       b       label##_common
-
-#define MASKABLE_EXCEPTION_ISERIES(n, label)                           \
-       .globl label##_iSeries;                                         \
-label##_iSeries:                                                       \
-       HMT_MEDIUM;                                                     \
-       mtspr   SPRN_SPRG1,r13;         /* save r13 */                  \
-       RUNLATCH_ON(r13);                                               \
-       EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN);                         \
-       lbz     r10,PACAPROCENABLED(r13);                               \
-       cmpwi   0,r10,0;                                                \
-       beq-    label##_iSeries_masked;                                 \
-       EXCEPTION_PROLOG_ISERIES_2;                                     \
-       b       label##_common;                                         \
-
-#ifdef DO_SOFT_DISABLE
-#define DISABLE_INTS                           \
-       lbz     r10,PACAPROCENABLED(r13);       \
-       li      r11,0;                          \
-       std     r10,SOFTE(r1);                  \
-       mfmsr   r10;                            \
-       stb     r11,PACAPROCENABLED(r13);       \
-       ori     r10,r10,MSR_EE;                 \
-       mtmsrd  r10,1
-
-#define ENABLE_INTS                            \
-       lbz     r10,PACAPROCENABLED(r13);       \
-       mfmsr   r11;                            \
-       std     r10,SOFTE(r1);                  \
-       ori     r11,r11,MSR_EE;                 \
-       mtmsrd  r11,1
-
-#else  /* hard enable/disable interrupts */
-#define DISABLE_INTS
-
-#define ENABLE_INTS                            \
-       ld      r12,_MSR(r1);                   \
-       mfmsr   r11;                            \
-       rlwimi  r11,r12,0,MSR_EE;               \
-       mtmsrd  r11,1
-
-#endif
-
-#define STD_EXCEPTION_COMMON(trap, label, hdlr)                \
-       .align  7;                                      \
-       .globl label##_common;                          \
-label##_common:                                                \
-       EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN);      \
-       DISABLE_INTS;                                   \
-       bl      .save_nvgprs;                           \
-       addi    r3,r1,STACK_FRAME_OVERHEAD;             \
-       bl      hdlr;                                   \
-       b       .ret_from_except
-
-#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr)   \
-       .align  7;                                      \
-       .globl label##_common;                          \
-label##_common:                                                \
-       EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN);      \
-       DISABLE_INTS;                                   \
-       addi    r3,r1,STACK_FRAME_OVERHEAD;             \
-       bl      hdlr;                                   \
-       b       .ret_from_except_lite
-
-/*
- * Start of pSeries system interrupt routines
- */
-       . = 0x100
-       .globl __start_interrupts
-__start_interrupts:
-
-       STD_EXCEPTION_PSERIES(0x100, system_reset)
-
-       . = 0x200
-_machine_check_pSeries:
-       HMT_MEDIUM
-       mtspr   SPRN_SPRG1,r13          /* save r13 */
-       RUNLATCH_ON(r13)
-       EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
-
-       . = 0x300
-       .globl data_access_pSeries
-data_access_pSeries:
-       HMT_MEDIUM
-       mtspr   SPRN_SPRG1,r13
-BEGIN_FTR_SECTION
-       mtspr   SPRN_SPRG2,r12
-       mfspr   r13,SPRN_DAR
-       mfspr   r12,SPRN_DSISR
-       srdi    r13,r13,60
-       rlwimi  r13,r12,16,0x20
-       mfcr    r12
-       cmpwi   r13,0x2c
-       beq     .do_stab_bolted_pSeries
-       mtcrf   0x80,r12
-       mfspr   r12,SPRN_SPRG2
-END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
-       EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
-
-       . = 0x380
-       .globl data_access_slb_pSeries
-data_access_slb_pSeries:
-       HMT_MEDIUM
-       mtspr   SPRN_SPRG1,r13
-       RUNLATCH_ON(r13)
-       mfspr   r13,SPRN_SPRG3          /* get paca address into r13 */
-       std     r3,PACA_EXSLB+EX_R3(r13)
-       mfspr   r3,SPRN_DAR
-       std     r9,PACA_EXSLB+EX_R9(r13)        /* save r9 - r12 */
-       mfcr    r9
-#ifdef __DISABLED__
-       /* Keep that around for when we re-implement dynamic VSIDs */
-       cmpdi   r3,0
-       bge     slb_miss_user_pseries
-#endif /* __DISABLED__ */
-       std     r10,PACA_EXSLB+EX_R10(r13)
-       std     r11,PACA_EXSLB+EX_R11(r13)
-       std     r12,PACA_EXSLB+EX_R12(r13)
-       mfspr   r10,SPRN_SPRG1
-       std     r10,PACA_EXSLB+EX_R13(r13)
-       mfspr   r12,SPRN_SRR1           /* and SRR1 */
-       b       .slb_miss_realmode      /* Rel. branch works in real mode */
-
-       STD_EXCEPTION_PSERIES(0x400, instruction_access)
-
-       . = 0x480
-       .globl instruction_access_slb_pSeries
-instruction_access_slb_pSeries:
-       HMT_MEDIUM
-       mtspr   SPRN_SPRG1,r13
-       RUNLATCH_ON(r13)
-       mfspr   r13,SPRN_SPRG3          /* get paca address into r13 */
-       std     r3,PACA_EXSLB+EX_R3(r13)
-       mfspr   r3,SPRN_SRR0            /* SRR0 is faulting address */
-       std     r9,PACA_EXSLB+EX_R9(r13)        /* save r9 - r12 */
-       mfcr    r9
-#ifdef __DISABLED__
-       /* Keep that around for when we re-implement dynamic VSIDs */
-       cmpdi   r3,0
-       bge     slb_miss_user_pseries
-#endif /* __DISABLED__ */
-       std     r10,PACA_EXSLB+EX_R10(r13)
-       std     r11,PACA_EXSLB+EX_R11(r13)
-       std     r12,PACA_EXSLB+EX_R12(r13)
-       mfspr   r10,SPRN_SPRG1
-       std     r10,PACA_EXSLB+EX_R13(r13)
-       mfspr   r12,SPRN_SRR1           /* and SRR1 */
-       b       .slb_miss_realmode      /* Rel. branch works in real mode */
-
-       STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
-       STD_EXCEPTION_PSERIES(0x600, alignment)
-       STD_EXCEPTION_PSERIES(0x700, program_check)
-       STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
-       STD_EXCEPTION_PSERIES(0x900, decrementer)
-       STD_EXCEPTION_PSERIES(0xa00, trap_0a)
-       STD_EXCEPTION_PSERIES(0xb00, trap_0b)
-
-       . = 0xc00
-       .globl  system_call_pSeries
-system_call_pSeries:
-       HMT_MEDIUM
-       RUNLATCH_ON(r9)
-       mr      r9,r13
-       mfmsr   r10
-       mfspr   r13,SPRN_SPRG3
-       mfspr   r11,SPRN_SRR0
-       clrrdi  r12,r13,32
-       oris    r12,r12,system_call_common@h
-       ori     r12,r12,system_call_common@l
-       mtspr   SPRN_SRR0,r12
-       ori     r10,r10,MSR_IR|MSR_DR|MSR_RI
-       mfspr   r12,SPRN_SRR1
-       mtspr   SPRN_SRR1,r10
-       rfid
-       b       .       /* prevent speculative execution */
-
-       STD_EXCEPTION_PSERIES(0xd00, single_step)
-       STD_EXCEPTION_PSERIES(0xe00, trap_0e)
-
-       /* We need to deal with the Altivec unavailable exception
-        * here which is at 0xf20, thus in the middle of the
-        * prolog code of the PerformanceMonitor one. A little
-        * trickery is thus necessary
-        */
-       . = 0xf00
-       b       performance_monitor_pSeries
-
-       STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable)
-
-       STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
-       STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
-
-       . = 0x3000
-
-/*** pSeries interrupt support ***/
-
-       /* moved from 0xf00 */
-       STD_EXCEPTION_PSERIES(., performance_monitor)
-
-       .align  7
-_GLOBAL(do_stab_bolted_pSeries)
-       mtcrf   0x80,r12
-       mfspr   r12,SPRN_SPRG2
-       EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
-
-/*
- * We have some room here  we use that to put
- * the peries slb miss user trampoline code so it's reasonably
- * away from slb_miss_user_common to avoid problems with rfid
- *
- * This is used for when the SLB miss handler has to go virtual,
- * which doesn't happen for now anymore but will once we re-implement
- * dynamic VSIDs for shared page tables
- */
-#ifdef __DISABLED__
-slb_miss_user_pseries:
-       std     r10,PACA_EXGEN+EX_R10(r13)
-       std     r11,PACA_EXGEN+EX_R11(r13)
-       std     r12,PACA_EXGEN+EX_R12(r13)
-       mfspr   r10,SPRG1
-       ld      r11,PACA_EXSLB+EX_R9(r13)
-       ld      r12,PACA_EXSLB+EX_R3(r13)
-       std     r10,PACA_EXGEN+EX_R13(r13)
-       std     r11,PACA_EXGEN+EX_R9(r13)
-       std     r12,PACA_EXGEN+EX_R3(r13)
-       clrrdi  r12,r13,32
-       mfmsr   r10
-       mfspr   r11,SRR0                        /* save SRR0 */
-       ori     r12,r12,slb_miss_user_common@l  /* virt addr of handler */
-       ori     r10,r10,MSR_IR|MSR_DR|MSR_RI
-       mtspr   SRR0,r12
-       mfspr   r12,SRR1                        /* and SRR1 */
-       mtspr   SRR1,r10
-       rfid
-       b       .                               /* prevent spec. execution */
-#endif /* __DISABLED__ */
-
-/*
- * Vectors for the FWNMI option.  Share common code.
- */
-       .globl system_reset_fwnmi
-system_reset_fwnmi:
-       HMT_MEDIUM
-       mtspr   SPRN_SPRG1,r13          /* save r13 */
-       RUNLATCH_ON(r13)
-       EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
-
-       .globl machine_check_fwnmi
-machine_check_fwnmi:
-       HMT_MEDIUM
-       mtspr   SPRN_SPRG1,r13          /* save r13 */
-       RUNLATCH_ON(r13)
-       EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
-
-#ifdef CONFIG_PPC_ISERIES
-/***  ISeries-LPAR interrupt handlers ***/
-
-       STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC)
-
-       .globl data_access_iSeries
-data_access_iSeries:
-       mtspr   SPRN_SPRG1,r13
-BEGIN_FTR_SECTION
-       mtspr   SPRN_SPRG2,r12
-       mfspr   r13,SPRN_DAR
-       mfspr   r12,SPRN_DSISR
-       srdi    r13,r13,60
-       rlwimi  r13,r12,16,0x20
-       mfcr    r12
-       cmpwi   r13,0x2c
-       beq     .do_stab_bolted_iSeries
-       mtcrf   0x80,r12
-       mfspr   r12,SPRN_SPRG2
-END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
-       EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
-       EXCEPTION_PROLOG_ISERIES_2
-       b       data_access_common
-
-.do_stab_bolted_iSeries:
-       mtcrf   0x80,r12
-       mfspr   r12,SPRN_SPRG2
-       EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
-       EXCEPTION_PROLOG_ISERIES_2
-       b       .do_stab_bolted
-
-       .globl  data_access_slb_iSeries
-data_access_slb_iSeries:
-       mtspr   SPRN_SPRG1,r13          /* save r13 */
-       mfspr   r13,SPRN_SPRG3          /* get paca address into r13 */
-       std     r3,PACA_EXSLB+EX_R3(r13)
-       mfspr   r3,SPRN_DAR
-       std     r9,PACA_EXSLB+EX_R9(r13)
-       mfcr    r9
-#ifdef __DISABLED__
-       cmpdi   r3,0
-       bge     slb_miss_user_iseries
-#endif
-       std     r10,PACA_EXSLB+EX_R10(r13)
-       std     r11,PACA_EXSLB+EX_R11(r13)
-       std     r12,PACA_EXSLB+EX_R12(r13)
-       mfspr   r10,SPRN_SPRG1
-       std     r10,PACA_EXSLB+EX_R13(r13)
-       ld      r12,PACALPPACA+LPPACASRR1(r13);
-       b       .slb_miss_realmode
-
-       STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
-
-       .globl  instruction_access_slb_iSeries
-instruction_access_slb_iSeries:
-       mtspr   SPRN_SPRG1,r13          /* save r13 */
-       mfspr   r13,SPRN_SPRG3          /* get paca address into r13 */
-       std     r3,PACA_EXSLB+EX_R3(r13)
-       ld      r3,PACALPPACA+LPPACASRR0(r13)   /* get SRR0 value */
-       std     r9,PACA_EXSLB+EX_R9(r13)
-       mfcr    r9
-#ifdef __DISABLED__
-       cmpdi   r3,0
-       bge     .slb_miss_user_iseries
-#endif
-       std     r10,PACA_EXSLB+EX_R10(r13)
-       std     r11,PACA_EXSLB+EX_R11(r13)
-       std     r12,PACA_EXSLB+EX_R12(r13)
-       mfspr   r10,SPRN_SPRG1
-       std     r10,PACA_EXSLB+EX_R13(r13)
-       ld      r12,PACALPPACA+LPPACASRR1(r13);
-       b       .slb_miss_realmode
-
-#ifdef __DISABLED__
-slb_miss_user_iseries:
-       std     r10,PACA_EXGEN+EX_R10(r13)
-       std     r11,PACA_EXGEN+EX_R11(r13)
-       std     r12,PACA_EXGEN+EX_R12(r13)
-       mfspr   r10,SPRG1
-       ld      r11,PACA_EXSLB+EX_R9(r13)
-       ld      r12,PACA_EXSLB+EX_R3(r13)
-       std     r10,PACA_EXGEN+EX_R13(r13)
-       std     r11,PACA_EXGEN+EX_R9(r13)
-       std     r12,PACA_EXGEN+EX_R3(r13)
-       EXCEPTION_PROLOG_ISERIES_2
-       b       slb_miss_user_common
-#endif
-
-       MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt)
-       STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN)
-       STD_EXCEPTION_ISERIES(0x700, program_check, PACA_EXGEN)
-       STD_EXCEPTION_ISERIES(0x800, fp_unavailable, PACA_EXGEN)
-       MASKABLE_EXCEPTION_ISERIES(0x900, decrementer)
-       STD_EXCEPTION_ISERIES(0xa00, trap_0a, PACA_EXGEN)
-       STD_EXCEPTION_ISERIES(0xb00, trap_0b, PACA_EXGEN)
-
-       .globl  system_call_iSeries
-system_call_iSeries:
-       mr      r9,r13
-       mfspr   r13,SPRN_SPRG3
-       EXCEPTION_PROLOG_ISERIES_2
-       b       system_call_common
-
-       STD_EXCEPTION_ISERIES( 0xd00, single_step, PACA_EXGEN)
-       STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN)
-       STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN)
-
-       .globl system_reset_iSeries
-system_reset_iSeries:
-       mfspr   r13,SPRN_SPRG3          /* Get paca address */
-       mfmsr   r24
-       ori     r24,r24,MSR_RI
-       mtmsrd  r24                     /* RI on */
-       lhz     r24,PACAPACAINDEX(r13)  /* Get processor # */
-       cmpwi   0,r24,0                 /* Are we processor 0? */
-       beq     .__start_initialization_iSeries /* Start up the first processor */
-       mfspr   r4,SPRN_CTRLF
-       li      r5,CTRL_RUNLATCH        /* Turn off the run light */
-       andc    r4,r4,r5
-       mtspr   SPRN_CTRLT,r4
-
-1:
-       HMT_LOW
-#ifdef CONFIG_SMP
-       lbz     r23,PACAPROCSTART(r13)  /* Test if this processor
-                                        * should start */
-       sync
-       LOADADDR(r3,current_set)
-       sldi    r28,r24,3               /* get current_set[cpu#] */
-       ldx     r3,r3,r28
-       addi    r1,r3,THREAD_SIZE
-       subi    r1,r1,STACK_FRAME_OVERHEAD
-
-       cmpwi   0,r23,0
-       beq     iSeries_secondary_smp_loop      /* Loop until told to go */
-       bne     .__secondary_start              /* Loop until told to go */
-iSeries_secondary_smp_loop:
-       /* Let the Hypervisor know we are alive */
-       /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
-       lis     r3,0x8002
-       rldicr  r3,r3,32,15             /* r0 = (r3 << 32) & 0xffff000000000000 */
-#else /* CONFIG_SMP */
-       /* Yield the processor.  This is required for non-SMP kernels
-               which are running on multi-threaded machines. */
-       lis     r3,0x8000
-       rldicr  r3,r3,32,15             /* r3 = (r3 << 32) & 0xffff000000000000 */
-       addi    r3,r3,18                /* r3 = 0x8000000000000012 which is "yield" */
-       li      r4,0                    /* "yield timed" */
-       li      r5,-1                   /* "yield forever" */
-#endif /* CONFIG_SMP */
-       li      r0,-1                   /* r0=-1 indicates a Hypervisor call */
-       sc                              /* Invoke the hypervisor via a system call */
-       mfspr   r13,SPRN_SPRG3          /* Put r13 back ???? */
-       b       1b                      /* If SMP not configured, secondaries
-                                        * loop forever */
-
-       .globl decrementer_iSeries_masked
-decrementer_iSeries_masked:
-       li      r11,1
-       stb     r11,PACALPPACA+LPPACADECRINT(r13)
-       lwz     r12,PACADEFAULTDECR(r13)
-       mtspr   SPRN_DEC,r12
-       /* fall through */
-
-       .globl hardware_interrupt_iSeries_masked
-hardware_interrupt_iSeries_masked:
-       mtcrf   0x80,r9         /* Restore regs */
-       ld      r11,PACALPPACA+LPPACASRR0(r13)
-       ld      r12,PACALPPACA+LPPACASRR1(r13)
-       mtspr   SPRN_SRR0,r11
-       mtspr   SPRN_SRR1,r12
-       ld      r9,PACA_EXGEN+EX_R9(r13)
-       ld      r10,PACA_EXGEN+EX_R10(r13)
-       ld      r11,PACA_EXGEN+EX_R11(r13)
-       ld      r12,PACA_EXGEN+EX_R12(r13)
-       ld      r13,PACA_EXGEN+EX_R13(r13)
-       rfid
-       b       .       /* prevent speculative execution */
-#endif /* CONFIG_PPC_ISERIES */
-
-/*** Common interrupt handlers ***/
-
-       STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
-
-       /*
-        * Machine check is different because we use a different
-        * save area: PACA_EXMC instead of PACA_EXGEN.
-        */
-       .align  7
-       .globl machine_check_common
-machine_check_common:
-       EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
-       DISABLE_INTS
-       bl      .save_nvgprs
-       addi    r3,r1,STACK_FRAME_OVERHEAD
-       bl      .machine_check_exception
-       b       .ret_from_except
-
-       STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
-       STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
-       STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
-       STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
-       STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
-       STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception)
-       STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
-#ifdef CONFIG_ALTIVEC
-       STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
-#else
-       STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
-#endif
-
-/*
- * Here we have detected that the kernel stack pointer is bad.
- * R9 contains the saved CR, r13 points to the paca,
- * r10 contains the (bad) kernel stack pointer,
- * r11 and r12 contain the saved SRR0 and SRR1.
- * We switch to using an emergency stack, save the registers there,
- * and call kernel_bad_stack(), which panics.
- */
-bad_stack:
-       ld      r1,PACAEMERGSP(r13)
-       subi    r1,r1,64+INT_FRAME_SIZE
-       std     r9,_CCR(r1)
-       std     r10,GPR1(r1)
-       std     r11,_NIP(r1)
-       std     r12,_MSR(r1)
-       mfspr   r11,SPRN_DAR
-       mfspr   r12,SPRN_DSISR
-       std     r11,_DAR(r1)
-       std     r12,_DSISR(r1)
-       mflr    r10
-       mfctr   r11
-       mfxer   r12
-       std     r10,_LINK(r1)
-       std     r11,_CTR(r1)
-       std     r12,_XER(r1)
-       SAVE_GPR(0,r1)
-       SAVE_GPR(2,r1)
-       SAVE_4GPRS(3,r1)
-       SAVE_2GPRS(7,r1)
-       SAVE_10GPRS(12,r1)
-       SAVE_10GPRS(22,r1)
-       addi    r11,r1,INT_FRAME_SIZE
-       std     r11,0(r1)
-       li      r12,0
-       std     r12,0(r11)
-       ld      r2,PACATOC(r13)
-1:     addi    r3,r1,STACK_FRAME_OVERHEAD
-       bl      .kernel_bad_stack
-       b       1b
-
-/*
- * Return from an exception with minimal checks.
- * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
- * If interrupts have been enabled, or anything has been
- * done that might have changed the scheduling status of
- * any task or sent any task a signal, you should use
- * ret_from_except or ret_from_except_lite instead of this.
- */
-       .globl  fast_exception_return
-fast_exception_return:
-       ld      r12,_MSR(r1)
-       ld      r11,_NIP(r1)
-       andi.   r3,r12,MSR_RI           /* check if RI is set */
-       beq-    unrecov_fer
-       ld      r3,_CCR(r1)
-       ld      r4,_LINK(r1)
-       ld      r5,_CTR(r1)
-       ld      r6,_XER(r1)
-       mtcr    r3
-       mtlr    r4
-       mtctr   r5
-       mtxer   r6
-       REST_GPR(0, r1)
-       REST_8GPRS(2, r1)
-
-       mfmsr   r10
-       clrrdi  r10,r10,2               /* clear RI (LE is 0 already) */
-       mtmsrd  r10,1
-
-       mtspr   SPRN_SRR1,r12
-       mtspr   SPRN_SRR0,r11
-       REST_4GPRS(10, r1)
-       ld      r1,GPR1(r1)
-       rfid
-       b       .       /* prevent speculative execution */
-
-unrecov_fer:
-       bl      .save_nvgprs
-1:     addi    r3,r1,STACK_FRAME_OVERHEAD
-       bl      .unrecoverable_exception
-       b       1b
-
-/*
- * Here r13 points to the paca, r9 contains the saved CR,
- * SRR0 and SRR1 are saved in r11 and r12,
- * r9 - r13 are saved in paca->exgen.
- */
-       .align  7
-       .globl data_access_common
-data_access_common:
-       RUNLATCH_ON(r10)                /* It wont fit in the 0x300 handler */
-       mfspr   r10,SPRN_DAR
-       std     r10,PACA_EXGEN+EX_DAR(r13)
-       mfspr   r10,SPRN_DSISR
-       stw     r10,PACA_EXGEN+EX_DSISR(r13)
-       EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
-       ld      r3,PACA_EXGEN+EX_DAR(r13)
-       lwz     r4,PACA_EXGEN+EX_DSISR(r13)
-       li      r5,0x300
-       b       .do_hash_page           /* Try to handle as hpte fault */
-
-       .align  7
-       .globl instruction_access_common
-instruction_access_common:
-       EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
-       ld      r3,_NIP(r1)
-       andis.  r4,r12,0x5820
-       li      r5,0x400
-       b       .do_hash_page           /* Try to handle as hpte fault */
-
-/*
- * Here is the common SLB miss user that is used when going to virtual
- * mode for SLB misses, that is currently not used
- */
-#ifdef __DISABLED__
-       .align  7
-       .globl  slb_miss_user_common
-slb_miss_user_common:
-       mflr    r10
-       std     r3,PACA_EXGEN+EX_DAR(r13)
-       stw     r9,PACA_EXGEN+EX_CCR(r13)
-       std     r10,PACA_EXGEN+EX_LR(r13)
-       std     r11,PACA_EXGEN+EX_SRR0(r13)
-       bl      .slb_allocate_user
-
-       ld      r10,PACA_EXGEN+EX_LR(r13)
-       ld      r3,PACA_EXGEN+EX_R3(r13)
-       lwz     r9,PACA_EXGEN+EX_CCR(r13)
-       ld      r11,PACA_EXGEN+EX_SRR0(r13)
-       mtlr    r10
-       beq-    slb_miss_fault
-
-       andi.   r10,r12,MSR_RI          /* check for unrecoverable exception */
-       beq-    unrecov_user_slb
-       mfmsr   r10
-
-.machine push
-.machine "power4"
-       mtcrf   0x80,r9
-.machine pop
-
-       clrrdi  r10,r10,2               /* clear RI before setting SRR0/1 */
-       mtmsrd  r10,1
-
-       mtspr   SRR0,r11
-       mtspr   SRR1,r12
-
-       ld      r9,PACA_EXGEN+EX_R9(r13)
-       ld      r10,PACA_EXGEN+EX_R10(r13)
-       ld      r11,PACA_EXGEN+EX_R11(r13)
-       ld      r12,PACA_EXGEN+EX_R12(r13)
-       ld      r13,PACA_EXGEN+EX_R13(r13)
-       rfid
-       b       .
-
-slb_miss_fault:
-       EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
-       ld      r4,PACA_EXGEN+EX_DAR(r13)
-       li      r5,0
-       std     r4,_DAR(r1)
-       std     r5,_DSISR(r1)
-       b       .handle_page_fault
-
-unrecov_user_slb:
-       EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
-       DISABLE_INTS
-       bl      .save_nvgprs
-1:     addi    r3,r1,STACK_FRAME_OVERHEAD
-       bl      .unrecoverable_exception
-       b       1b
-
-#endif /* __DISABLED__ */
-
-
-/*
- * r13 points to the PACA, r9 contains the saved CR,
- * r12 contain the saved SRR1, SRR0 is still ready for return
- * r3 has the faulting address
- * r9 - r13 are saved in paca->exslb.
- * r3 is saved in paca->slb_r3
- * We assume we aren't going to take any exceptions during this procedure.
- */
-_GLOBAL(slb_miss_realmode)
-       mflr    r10
-
-       stw     r9,PACA_EXSLB+EX_CCR(r13)       /* save CR in exc. frame */
-       std     r10,PACA_EXSLB+EX_LR(r13)       /* save LR */
-
-       bl      .slb_allocate_realmode
-
-       /* All done -- return from exception. */
-
-       ld      r10,PACA_EXSLB+EX_LR(r13)
-       ld      r3,PACA_EXSLB+EX_R3(r13)
-       lwz     r9,PACA_EXSLB+EX_CCR(r13)       /* get saved CR */
-#ifdef CONFIG_PPC_ISERIES
-       ld      r11,PACALPPACA+LPPACASRR0(r13)  /* get SRR0 value */
-#endif /* CONFIG_PPC_ISERIES */
-
-       mtlr    r10
-
-       andi.   r10,r12,MSR_RI  /* check for unrecoverable exception */
-       beq-    unrecov_slb
-
-.machine       push
-.machine       "power4"
-       mtcrf   0x80,r9
-       mtcrf   0x01,r9         /* slb_allocate uses cr0 and cr7 */
-.machine       pop
-
-#ifdef CONFIG_PPC_ISERIES
-       mtspr   SPRN_SRR0,r11
-       mtspr   SPRN_SRR1,r12
-#endif /* CONFIG_PPC_ISERIES */
-       ld      r9,PACA_EXSLB+EX_R9(r13)
-       ld      r10,PACA_EXSLB+EX_R10(r13)
-       ld      r11,PACA_EXSLB+EX_R11(r13)
-       ld      r12,PACA_EXSLB+EX_R12(r13)
-       ld      r13,PACA_EXSLB+EX_R13(r13)
-       rfid
-       b       .       /* prevent speculative execution */
-
-unrecov_slb:
-       EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
-       DISABLE_INTS
-       bl      .save_nvgprs
-1:     addi    r3,r1,STACK_FRAME_OVERHEAD
-       bl      .unrecoverable_exception
-       b       1b
-
-       .align  7
-       .globl hardware_interrupt_common
-       .globl hardware_interrupt_entry
-hardware_interrupt_common:
-       EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
-hardware_interrupt_entry:
-       DISABLE_INTS
-       addi    r3,r1,STACK_FRAME_OVERHEAD
-       bl      .do_IRQ
-       b       .ret_from_except_lite
-
-       .align  7
-       .globl alignment_common
-alignment_common:
-       mfspr   r10,SPRN_DAR
-       std     r10,PACA_EXGEN+EX_DAR(r13)
-       mfspr   r10,SPRN_DSISR
-       stw     r10,PACA_EXGEN+EX_DSISR(r13)
-       EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
-       ld      r3,PACA_EXGEN+EX_DAR(r13)
-       lwz     r4,PACA_EXGEN+EX_DSISR(r13)
-       std     r3,_DAR(r1)
-       std     r4,_DSISR(r1)
-       bl      .save_nvgprs
-       addi    r3,r1,STACK_FRAME_OVERHEAD
-       ENABLE_INTS
-       bl      .alignment_exception
-       b       .ret_from_except
-
-       .align  7
-       .globl program_check_common
-program_check_common:
-       EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
-       bl      .save_nvgprs
-       addi    r3,r1,STACK_FRAME_OVERHEAD
-       ENABLE_INTS
-       bl      .program_check_exception
-       b       .ret_from_except
-
-       .align  7
-       .globl fp_unavailable_common
-fp_unavailable_common:
-       EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
-       bne     .load_up_fpu            /* if from user, just load it up */
-       bl      .save_nvgprs
-       addi    r3,r1,STACK_FRAME_OVERHEAD
-       ENABLE_INTS
-       bl      .kernel_fp_unavailable_exception
-       BUG_OPCODE
-
-       .align  7
-       .globl altivec_unavailable_common
-altivec_unavailable_common:
-       EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
-#ifdef CONFIG_ALTIVEC
-BEGIN_FTR_SECTION
-       bne     .load_up_altivec        /* if from user, just load it up */
-END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
-#endif
-       bl      .save_nvgprs
-       addi    r3,r1,STACK_FRAME_OVERHEAD
-       ENABLE_INTS
-       bl      .altivec_unavailable_exception
-       b       .ret_from_except
-
-#ifdef CONFIG_ALTIVEC
-/*
- * load_up_altivec(unused, unused, tsk)
- * Disable VMX for the task which had it previously,
- * and save its vector registers in its thread_struct.
- * Enables the VMX for use in the kernel on return.
- * On SMP we know the VMX is free, since we give it up every
- * switch (ie, no lazy save of the vector registers).
- * On entry: r13 == 'current' && last_task_used_altivec != 'current'
- */
-_STATIC(load_up_altivec)
-       mfmsr   r5                      /* grab the current MSR */
-       oris    r5,r5,MSR_VEC@h
-       mtmsrd  r5                      /* enable use of VMX now */
-       isync
-
-/*
- * For SMP, we don't do lazy VMX switching because it just gets too
- * horrendously complex, especially when a task switches from one CPU
- * to another.  Instead we call giveup_altvec in switch_to.
- * VRSAVE isn't dealt with here, that is done in the normal context
- * switch code. Note that we could rely on vrsave value to eventually
- * avoid saving all of the VREGs here...
- */
-#ifndef CONFIG_SMP
-       ld      r3,last_task_used_altivec@got(r2)
-       ld      r4,0(r3)
-       cmpdi   0,r4,0
-       beq     1f
-       /* Save VMX state to last_task_used_altivec's THREAD struct */
-       addi    r4,r4,THREAD
-       SAVE_32VRS(0,r5,r4)
-       mfvscr  vr0
-       li      r10,THREAD_VSCR
-       stvx    vr0,r10,r4
-       /* Disable VMX for last_task_used_altivec */
-       ld      r5,PT_REGS(r4)
-       ld      r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-       lis     r6,MSR_VEC@h
-       andc    r4,r4,r6
-       std     r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#endif /* CONFIG_SMP */
-       /* Hack: if we get an altivec unavailable trap with VRSAVE
-        * set to all zeros, we assume this is a broken application
-        * that fails to set it properly, and thus we switch it to
-        * all 1's
-        */
-       mfspr   r4,SPRN_VRSAVE
-       cmpdi   0,r4,0
-       bne+    1f
-       li      r4,-1
-       mtspr   SPRN_VRSAVE,r4
-1:
-       /* enable use of VMX after return */
-       ld      r4,PACACURRENT(r13)
-       addi    r5,r4,THREAD            /* Get THREAD */
-       oris    r12,r12,MSR_VEC@h
-       std     r12,_MSR(r1)
-       li      r4,1
-       li      r10,THREAD_VSCR
-       stw     r4,THREAD_USED_VR(r5)
-       lvx     vr0,r10,r5
-       mtvscr  vr0
-       REST_32VRS(0,r4,r5)
-#ifndef CONFIG_SMP
-       /* Update last_task_used_math to 'current' */
-       subi    r4,r5,THREAD            /* Back to 'current' */
-       std     r4,0(r3)
-#endif /* CONFIG_SMP */
-       /* restore registers and return */
-       b       fast_exception_return
-#endif /* CONFIG_ALTIVEC */
-
-/*
- * Hash table stuff
- */
-       .align  7
-_GLOBAL(do_hash_page)
-       std     r3,_DAR(r1)
-       std     r4,_DSISR(r1)
-
-       andis.  r0,r4,0xa450            /* weird error? */
-       bne-    .handle_page_fault      /* if not, try to insert a HPTE */
-BEGIN_FTR_SECTION
-       andis.  r0,r4,0x0020            /* Is it a segment table fault? */
-       bne-    .do_ste_alloc           /* If so handle it */
-END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
-
-       /*
-        * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
-        * accessing a userspace segment (even from the kernel). We assume
-        * kernel addresses always have the high bit set.
-        */
-       rlwinm  r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
-       rotldi  r0,r3,15                /* Move high bit into MSR_PR posn */
-       orc     r0,r12,r0               /* MSR_PR | ~high_bit */
-       rlwimi  r4,r0,32-13,30,30       /* becomes _PAGE_USER access bit */
-       ori     r4,r4,1                 /* add _PAGE_PRESENT */
-       rlwimi  r4,r5,22+2,31-2,31-2    /* Set _PAGE_EXEC if trap is 0x400 */
-
-       /*
-        * On iSeries, we soft-disable interrupts here, then
-        * hard-enable interrupts so that the hash_page code can spin on
-        * the hash_table_lock without problems on a shared processor.
-        */
-       DISABLE_INTS
-
-       /*
-        * r3 contains the faulting address
-        * r4 contains the required access permissions
-        * r5 contains the trap number
-        *
-        * at return r3 = 0 for success
-        */
-       bl      .hash_page              /* build HPTE if possible */
-       cmpdi   r3,0                    /* see if hash_page succeeded */
-
-#ifdef DO_SOFT_DISABLE
-       /*
-        * If we had interrupts soft-enabled at the point where the
-        * DSI/ISI occurred, and an interrupt came in during hash_page,
-        * handle it now.
-        * We jump to ret_from_except_lite rather than fast_exception_return
-        * because ret_from_except_lite will check for and handle pending
-        * interrupts if necessary.
-        */
-       beq     .ret_from_except_lite
-       /* For a hash failure, we don't bother re-enabling interrupts */
-       ble-    12f
-
-       /*
-        * hash_page couldn't handle it, set soft interrupt enable back
-        * to what it was before the trap.  Note that .local_irq_restore
-        * handles any interrupts pending at this point.
-        */
-       ld      r3,SOFTE(r1)
-       bl      .local_irq_restore
-       b       11f
-#else
-       beq     fast_exception_return   /* Return from exception on success */
-       ble-    12f                     /* Failure return from hash_page */
-
-       /* fall through */
-#endif
-
-/* Here we have a page fault that hash_page can't handle. */
-_GLOBAL(handle_page_fault)
-       ENABLE_INTS
-11:    ld      r4,_DAR(r1)
-       ld      r5,_DSISR(r1)
-       addi    r3,r1,STACK_FRAME_OVERHEAD
-       bl      .do_page_fault
-       cmpdi   r3,0
-       beq+    .ret_from_except_lite
-       bl      .save_nvgprs
-       mr      r5,r3
-       addi    r3,r1,STACK_FRAME_OVERHEAD
-       lwz     r4,_DAR(r1)
-       bl      .bad_page_fault
-       b       .ret_from_except
-
-/* We have a page fault that hash_page could handle but HV refused
- * the PTE insertion
- */
-12:    bl      .save_nvgprs
-       addi    r3,r1,STACK_FRAME_OVERHEAD
-       lwz     r4,_DAR(r1)
-       bl      .low_hash_fault
-       b       .ret_from_except
-
-       /* here we have a segment miss */
-_GLOBAL(do_ste_alloc)
-       bl      .ste_allocate           /* try to insert stab entry */
-       cmpdi   r3,0
-       beq+    fast_exception_return
-       b       .handle_page_fault
-
-/*
- * r13 points to the PACA, r9 contains the saved CR,
- * r11 and r12 contain the saved SRR0 and SRR1.
- * r9 - r13 are saved in paca->exslb.
- * We assume we aren't going to take any exceptions during this procedure.
- * We assume (DAR >> 60) == 0xc.
- */
-       .align  7
-_GLOBAL(do_stab_bolted)
-       stw     r9,PACA_EXSLB+EX_CCR(r13)       /* save CR in exc. frame */
-       std     r11,PACA_EXSLB+EX_SRR0(r13)     /* save SRR0 in exc. frame */
-
-       /* Hash to the primary group */
-       ld      r10,PACASTABVIRT(r13)
-       mfspr   r11,SPRN_DAR
-       srdi    r11,r11,28
-       rldimi  r10,r11,7,52    /* r10 = first ste of the group */
-
-       /* Calculate VSID */
-       /* This is a kernel address, so protovsid = ESID */
-       ASM_VSID_SCRAMBLE(r11, r9)
-       rldic   r9,r11,12,16    /* r9 = vsid << 12 */
-
-       /* Search the primary group for a free entry */
-1:     ld      r11,0(r10)      /* Test valid bit of the current ste    */
-       andi.   r11,r11,0x80
-       beq     2f
-       addi    r10,r10,16
-       andi.   r11,r10,0x70
-       bne     1b
-
-       /* Stick for only searching the primary group for now.          */
-       /* At least for now, we use a very simple random castout scheme */
-       /* Use the TB as a random number ;  OR in 1 to avoid entry 0    */
-       mftb    r11
-       rldic   r11,r11,4,57    /* r11 = (r11 << 4) & 0x70 */
-       ori     r11,r11,0x10
-
-       /* r10 currently points to an ste one past the group of interest */
-       /* make it point to the randomly selected entry                 */
-       subi    r10,r10,128
-       or      r10,r10,r11     /* r10 is the entry to invalidate       */
-
-       isync                   /* mark the entry invalid               */
-       ld      r11,0(r10)
-       rldicl  r11,r11,56,1    /* clear the valid bit */
-       rotldi  r11,r11,8
-       std     r11,0(r10)
-       sync
-
-       clrrdi  r11,r11,28      /* Get the esid part of the ste         */
-       slbie   r11
-
-2:     std     r9,8(r10)       /* Store the vsid part of the ste       */
-       eieio
-
-       mfspr   r11,SPRN_DAR            /* Get the new esid                     */
-       clrrdi  r11,r11,28      /* Permits a full 32b of ESID           */
-       ori     r11,r11,0x90    /* Turn on valid and kp                 */
-       std     r11,0(r10)      /* Put new entry back into the stab     */
-
-       sync
-
-       /* All done -- return from exception. */
-       lwz     r9,PACA_EXSLB+EX_CCR(r13)       /* get saved CR */
-       ld      r11,PACA_EXSLB+EX_SRR0(r13)     /* get saved SRR0 */
-
-       andi.   r10,r12,MSR_RI
-       beq-    unrecov_slb
-
-       mtcrf   0x80,r9                 /* restore CR */
-
-       mfmsr   r10
-       clrrdi  r10,r10,2
-       mtmsrd  r10,1
-
-       mtspr   SPRN_SRR0,r11
-       mtspr   SPRN_SRR1,r12
-       ld      r9,PACA_EXSLB+EX_R9(r13)
-       ld      r10,PACA_EXSLB+EX_R10(r13)
-       ld      r11,PACA_EXSLB+EX_R11(r13)
-       ld      r12,PACA_EXSLB+EX_R12(r13)
-       ld      r13,PACA_EXSLB+EX_R13(r13)
-       rfid
-       b       .       /* prevent speculative execution */
-
-/*
- * Space for CPU0's segment table.
- *
- * On iSeries, the hypervisor must fill in at least one entry before
- * we get control (with relocate on).  The address is give to the hv
- * as a page number (see xLparMap in lpardata.c), so this must be at a
- * fixed address (the linker can't compute (u64)&initial_stab >>
- * PAGE_SHIFT).
- */
-       . = STAB0_PHYS_ADDR     /* 0x6000 */
-       .globl initial_stab
-initial_stab:
-       .space  4096
-
-/*
- * Data area reserved for FWNMI option.
- * This address (0x7000) is fixed by the RPA.
- */
-       .= 0x7000
-       .globl fwnmi_data_area
-fwnmi_data_area:
-
-       /* iSeries does not use the FWNMI stuff, so it is safe to put
-        * this here, even if we later allow kernels that will boot on
-        * both pSeries and iSeries */
-#ifdef CONFIG_PPC_ISERIES
-        . = LPARMAP_PHYS
-#include "lparmap.s"
-/*
- * This ".text" is here for old compilers that generate a trailing
- * .note section when compiling .c files to .s
- */
-       .text
-#endif /* CONFIG_PPC_ISERIES */
-
-        . = 0x8000
-
-/*
- * On pSeries, secondary processors spin in the following code.
- * At entry, r3 = this processor's number (physical cpu id)
- */
-_GLOBAL(pSeries_secondary_smp_init)
-       mr      r24,r3
-       
-       /* turn on 64-bit mode */
-       bl      .enable_64b_mode
-       isync
-
-       /* Copy some CPU settings from CPU 0 */
-       bl      .__restore_cpu_setup
-
-       /* Set up a paca value for this processor. Since we have the
-        * physical cpu id in r24, we need to search the pacas to find
-        * which logical id maps to our physical one.
-        */
-       LOADADDR(r13, paca)             /* Get base vaddr of paca array  */
-       li      r5,0                    /* logical cpu id                */
-1:     lhz     r6,PACAHWCPUID(r13)     /* Load HW procid from paca      */
-       cmpw    r6,r24                  /* Compare to our id             */
-       beq     2f
-       addi    r13,r13,PACA_SIZE       /* Loop to next PACA on miss     */
-       addi    r5,r5,1
-       cmpwi   r5,NR_CPUS
-       blt     1b
-
-       mr      r3,r24                  /* not found, copy phys to r3    */
-       b       .kexec_wait             /* next kernel might do better   */
-
-2:     mtspr   SPRN_SPRG3,r13          /* Save vaddr of paca in SPRG3   */
-       /* From now on, r24 is expected to be logical cpuid */
-       mr      r24,r5
-3:     HMT_LOW
-       lbz     r23,PACAPROCSTART(r13)  /* Test if this processor should */
-                                       /* start.                        */
-       sync
-
-       /* Create a temp kernel stack for use before relocation is on.  */
-       ld      r1,PACAEMERGSP(r13)
-       subi    r1,r1,STACK_FRAME_OVERHEAD
-
-       cmpwi   0,r23,0
-#ifdef CONFIG_SMP
-       bne     .__secondary_start
-#endif
-       b       3b                      /* Loop until told to go         */
-
-#ifdef CONFIG_PPC_ISERIES
-_STATIC(__start_initialization_iSeries)
-       /* Clear out the BSS */
-       LOADADDR(r11,__bss_stop)
-       LOADADDR(r8,__bss_start)
-       sub     r11,r11,r8              /* bss size                     */
-       addi    r11,r11,7               /* round up to an even double word */
-       rldicl. r11,r11,61,3            /* shift right by 3             */
-       beq     4f
-       addi    r8,r8,-8
-       li      r0,0
-       mtctr   r11                     /* zero this many doublewords   */
-3:     stdu    r0,8(r8)
-       bdnz    3b
-4:
-       LOADADDR(r1,init_thread_union)
-       addi    r1,r1,THREAD_SIZE
-       li      r0,0
-       stdu    r0,-STACK_FRAME_OVERHEAD(r1)
-
-       LOADADDR(r3,cpu_specs)
-       LOADADDR(r4,cur_cpu_spec)
-       li      r5,0
-       bl      .identify_cpu
-
-       LOADADDR(r2,__toc_start)
-       addi    r2,r2,0x4000
-       addi    r2,r2,0x4000
-
-       bl      .iSeries_early_setup
-       bl      .early_setup
-
-       /* relocation is on at this point */
-
-       b       .start_here_common
-#endif /* CONFIG_PPC_ISERIES */
-
-#ifdef CONFIG_PPC_MULTIPLATFORM
-
-_STATIC(__mmu_off)
-       mfmsr   r3
-       andi.   r0,r3,MSR_IR|MSR_DR
-       beqlr
-       andc    r3,r3,r0
-       mtspr   SPRN_SRR0,r4
-       mtspr   SPRN_SRR1,r3
-       sync
-       rfid
-       b       .       /* prevent speculative execution */
-
-
-/*
- * Here is our main kernel entry point. We support currently 2 kind of entries
- * depending on the value of r5.
- *
- *   r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
- *                 in r3...r7
- *   
- *   r5 == NULL -> kexec style entry. r3 is a physical pointer to the
- *                 DT block, r4 is a physical pointer to the kernel itself
- *
- */
-_GLOBAL(__start_initialization_multiplatform)
-       /*
-        * Are we booted from a PROM Of-type client-interface ?
-        */
-       cmpldi  cr0,r5,0
-       bne     .__boot_from_prom               /* yes -> prom */
-
-       /* Save parameters */
-       mr      r31,r3
-       mr      r30,r4
-
-       /* Make sure we are running in 64 bits mode */
-       bl      .enable_64b_mode
-
-       /* Setup some critical 970 SPRs before switching MMU off */
-       bl      .__970_cpu_preinit
-
-       /* cpu # */
-       li      r24,0
-
-       /* Switch off MMU if not already */
-       LOADADDR(r4, .__after_prom_start - KERNELBASE)
-       add     r4,r4,r30
-       bl      .__mmu_off
-       b       .__after_prom_start
-
-_STATIC(__boot_from_prom)
-       /* Save parameters */
-       mr      r31,r3
-       mr      r30,r4
-       mr      r29,r5
-       mr      r28,r6
-       mr      r27,r7
-
-       /* Make sure we are running in 64 bits mode */
-       bl      .enable_64b_mode
-
-       /* put a relocation offset into r3 */
-       bl      .reloc_offset
-
-       LOADADDR(r2,__toc_start)
-       addi    r2,r2,0x4000
-       addi    r2,r2,0x4000
-
-       /* Relocate the TOC from a virt addr to a real addr */
-       sub     r2,r2,r3
-
-       /* Restore parameters */
-       mr      r3,r31
-       mr      r4,r30
-       mr      r5,r29
-       mr      r6,r28
-       mr      r7,r27
-
-       /* Do all of the interaction with OF client interface */
-       bl      .prom_init
-       /* We never return */
-       trap
-
-/*
- * At this point, r3 contains the physical address we are running at,
- * returned by prom_init()
- */
-_STATIC(__after_prom_start)
-
-/*
- * We need to run with __start at physical address 0.
- * This will leave some code in the first 256B of
- * real memory, which are reserved for software use.
- * The remainder of the first page is loaded with the fixed
- * interrupt vectors.  The next two pages are filled with
- * unknown exception placeholders.
- *
- * Note: This process overwrites the OF exception vectors.
- *     r26 == relocation offset
- *     r27 == KERNELBASE
- */
-       bl      .reloc_offset
-       mr      r26,r3
-       SET_REG_TO_CONST(r27,KERNELBASE)
-
-       li      r3,0                    /* target addr */
-
-       // XXX FIXME: Use phys returned by OF (r30)
-       sub     r4,r27,r26              /* source addr                   */
-                                       /* current address of _start     */
-                                       /*   i.e. where we are running   */
-                                       /*      the source addr          */
-
-       LOADADDR(r5,copy_to_here)       /* # bytes of memory to copy     */
-       sub     r5,r5,r27
-
-       li      r6,0x100                /* Start offset, the first 0x100 */
-                                       /* bytes were copied earlier.    */
-
-       bl      .copy_and_flush         /* copy the first n bytes        */
-                                       /* this includes the code being  */
-                                       /* executed here.                */
-
-       LOADADDR(r0, 4f)                /* Jump to the copy of this code */
-       mtctr   r0                      /* that we just made/relocated   */
-       bctr
-
-4:     LOADADDR(r5,klimit)
-       sub     r5,r5,r26
-       ld      r5,0(r5)                /* get the value of klimit */
-       sub     r5,r5,r27
-       bl      .copy_and_flush         /* copy the rest */
-       b       .start_here_multiplatform
-
-#endif /* CONFIG_PPC_MULTIPLATFORM */
-
-/*
- * Copy routine used to copy the kernel to start at physical address 0
- * and flush and invalidate the caches as needed.
- * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
- * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
- *
- * Note: this routine *only* clobbers r0, r6 and lr
- */
-_GLOBAL(copy_and_flush)
-       addi    r5,r5,-8
-       addi    r6,r6,-8
-4:     li      r0,16                   /* Use the least common         */
-                                       /* denominator cache line       */
-                                       /* size.  This results in       */
-                                       /* extra cache line flushes     */
-                                       /* but operation is correct.    */
-                                       /* Can't get cache line size    */
-                                       /* from NACA as it is being     */
-                                       /* moved too.                   */
-
-       mtctr   r0                      /* put # words/line in ctr      */
-3:     addi    r6,r6,8                 /* copy a cache line            */
-       ldx     r0,r6,r4
-       stdx    r0,r6,r3
-       bdnz    3b
-       dcbst   r6,r3                   /* write it to memory           */
-       sync
-       icbi    r6,r3                   /* flush the icache line        */
-       cmpld   0,r6,r5
-       blt     4b
-       sync
-       addi    r5,r5,8
-       addi    r6,r6,8
-       blr
-
-.align 8
-copy_to_here:
-
-#ifdef CONFIG_SMP
-#ifdef CONFIG_PPC_PMAC
-/*
- * On PowerMac, secondary processors starts from the reset vector, which
- * is temporarily turned into a call to one of the functions below.
- */
-       .section ".text";
-       .align 2 ;
-
-       .globl  __secondary_start_pmac_0
-__secondary_start_pmac_0:
-       /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
-       li      r24,0
-       b       1f
-       li      r24,1
-       b       1f
-       li      r24,2
-       b       1f
-       li      r24,3
-1:
-       
-_GLOBAL(pmac_secondary_start)
-       /* turn on 64-bit mode */
-       bl      .enable_64b_mode
-       isync
-
-       /* Copy some CPU settings from CPU 0 */
-       bl      .__restore_cpu_setup
-
-       /* pSeries do that early though I don't think we really need it */
-       mfmsr   r3
-       ori     r3,r3,MSR_RI
-       mtmsrd  r3                      /* RI on */
-
-       /* Set up a paca value for this processor. */
-       LOADADDR(r4, paca)               /* Get base vaddr of paca array        */
-       mulli   r13,r24,PACA_SIZE        /* Calculate vaddr of right paca */
-       add     r13,r13,r4              /* for this processor.          */
-       mtspr   SPRN_SPRG3,r13           /* Save vaddr of paca in SPRG3 */
-
-       /* Create a temp kernel stack for use before relocation is on.  */
-       ld      r1,PACAEMERGSP(r13)
-       subi    r1,r1,STACK_FRAME_OVERHEAD
-
-       b       .__secondary_start
-
-#endif /* CONFIG_PPC_PMAC */
-
-/*
- * This function is called after the master CPU has released the
- * secondary processors.  The execution environment is relocation off.
- * The paca for this processor has the following fields initialized at
- * this point:
- *   1. Processor number
- *   2. Segment table pointer (virtual address)
- * On entry the following are set:
- *   r1        = stack pointer.  vaddr for iSeries, raddr (temp stack) for pSeries
- *   r24   = cpu# (in Linux terms)
- *   r13   = paca virtual address
- *   SPRG3 = paca virtual address
- */
-_GLOBAL(__secondary_start)
-
-       HMT_MEDIUM                      /* Set thread priority to MEDIUM */
-
-       ld      r2,PACATOC(r13)
-
-       /* Do early setup for that CPU */
-       bl      .early_setup_secondary
-
-       /* Initialize the kernel stack.  Just a repeat for iSeries.      */
-       LOADADDR(r3,current_set)
-       sldi    r28,r24,3               /* get current_set[cpu#]         */
-       ldx     r1,r3,r28
-       addi    r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
-       std     r1,PACAKSAVE(r13)
-
-       li      r7,0
-       mtlr    r7
-
-       /* enable MMU and jump to start_secondary */
-       LOADADDR(r3,.start_secondary_prolog)
-       SET_REG_TO_CONST(r4, MSR_KERNEL)
-#ifdef DO_SOFT_DISABLE
-       ori     r4,r4,MSR_EE
-#endif
-       mtspr   SPRN_SRR0,r3
-       mtspr   SPRN_SRR1,r4
-       rfid
-       b       .       /* prevent speculative execution */
-
-/* 
- * Running with relocation on at this point.  All we want to do is
- * zero the stack back-chain pointer before going into C code.
- */
-_GLOBAL(start_secondary_prolog)
-       li      r3,0
-       std     r3,0(r1)                /* Zero the stack frame pointer */
-       bl      .start_secondary
-#endif
-
-/*
- * This subroutine clobbers r11 and r12
- */
-_GLOBAL(enable_64b_mode)
-       mfmsr   r11                     /* grab the current MSR */
-       li      r12,1
-       rldicr  r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
-       or      r11,r11,r12
-       li      r12,1
-       rldicr  r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
-       or      r11,r11,r12
-       mtmsrd  r11
-       isync
-       blr
-
-#ifdef CONFIG_PPC_MULTIPLATFORM
-/*
- * This is where the main kernel code starts.
- */
-_STATIC(start_here_multiplatform)
-       /* get a new offset, now that the kernel has moved. */
-       bl      .reloc_offset
-       mr      r26,r3
-
-       /* Clear out the BSS. It may have been done in prom_init,
-        * already but that's irrelevant since prom_init will soon
-        * be detached from the kernel completely. Besides, we need
-        * to clear it now for kexec-style entry.
-        */
-       LOADADDR(r11,__bss_stop)
-       LOADADDR(r8,__bss_start)
-       sub     r11,r11,r8              /* bss size                     */
-       addi    r11,r11,7               /* round up to an even double word */
-       rldicl. r11,r11,61,3            /* shift right by 3             */
-       beq     4f
-       addi    r8,r8,-8
-       li      r0,0
-       mtctr   r11                     /* zero this many doublewords   */
-3:     stdu    r0,8(r8)
-       bdnz    3b
-4:
-
-       mfmsr   r6
-       ori     r6,r6,MSR_RI
-       mtmsrd  r6                      /* RI on */
-
-#ifdef CONFIG_HMT
-       /* Start up the second thread on cpu 0 */
-       mfspr   r3,SPRN_PVR
-       srwi    r3,r3,16
-       cmpwi   r3,0x34                 /* Pulsar  */
-       beq     90f
-       cmpwi   r3,0x36                 /* Icestar */
-       beq     90f
-       cmpwi   r3,0x37                 /* SStar   */
-       beq     90f
-       b       91f                     /* HMT not supported */
-90:    li      r3,0
-       bl      .hmt_start_secondary
-91:
-#endif
-
-       /* The following gets the stack and TOC set up with the regs */
-       /* pointing to the real addr of the kernel stack.  This is   */
-       /* all done to support the C function call below which sets  */
-       /* up the htab.  This is done because we have relocated the  */
-       /* kernel but are still running in real mode. */
-
-       LOADADDR(r3,init_thread_union)
-       sub     r3,r3,r26
-
-       /* set up a stack pointer (physical address) */
-       addi    r1,r3,THREAD_SIZE
-       li      r0,0
-       stdu    r0,-STACK_FRAME_OVERHEAD(r1)
-
-       /* set up the TOC (physical address) */
-       LOADADDR(r2,__toc_start)
-       addi    r2,r2,0x4000
-       addi    r2,r2,0x4000
-       sub     r2,r2,r26
-
-       LOADADDR(r3,cpu_specs)
-       sub     r3,r3,r26
-       LOADADDR(r4,cur_cpu_spec)
-       sub     r4,r4,r26
-       mr      r5,r26
-       bl      .identify_cpu
-
-       /* Save some low level config HIDs of CPU0 to be copied to
-        * other CPUs later on, or used for suspend/resume
-        */
-       bl      .__save_cpu_setup
-       sync
-
-       /* Setup a valid physical PACA pointer in SPRG3 for early_setup
-        * note that boot_cpuid can always be 0 nowadays since there is
-        * nowhere it can be initialized differently before we reach this
-        * code
-        */
-       LOADADDR(r27, boot_cpuid)
-       sub     r27,r27,r26
-       lwz     r27,0(r27)
-
-       LOADADDR(r24, paca)             /* Get base vaddr of paca array  */
-       mulli   r13,r27,PACA_SIZE       /* Calculate vaddr of right paca */
-       add     r13,r13,r24             /* for this processor.           */
-       sub     r13,r13,r26             /* convert to physical addr      */
-       mtspr   SPRN_SPRG3,r13          /* PPPBBB: Temp... -Peter */
-       
-       /* Do very early kernel initializations, including initial hash table,
-        * stab and slb setup before we turn on relocation.     */
-
-       /* Restore parameters passed from prom_init/kexec */
-       mr      r3,r31
-       bl      .early_setup
-
-       LOADADDR(r3,.start_here_common)
-       SET_REG_TO_CONST(r4, MSR_KERNEL)
-       mtspr   SPRN_SRR0,r3
-       mtspr   SPRN_SRR1,r4
-       rfid
-       b       .       /* prevent speculative execution */
-#endif /* CONFIG_PPC_MULTIPLATFORM */
-       
-       /* This is where all platforms converge execution */
-_STATIC(start_here_common)
-       /* relocation is on at this point */
-
-       /* The following code sets up the SP and TOC now that we are */
-       /* running with translation enabled. */
-
-       LOADADDR(r3,init_thread_union)
-
-       /* set up the stack */
-       addi    r1,r3,THREAD_SIZE
-       li      r0,0
-       stdu    r0,-STACK_FRAME_OVERHEAD(r1)
-
-       /* Apply the CPUs-specific fixups (nop out sections not relevant
-        * to this CPU
-        */
-       li      r3,0
-       bl      .do_cpu_ftr_fixups
-
-       LOADADDR(r26, boot_cpuid)
-       lwz     r26,0(r26)
-
-       LOADADDR(r24, paca)             /* Get base vaddr of paca array  */
-       mulli   r13,r26,PACA_SIZE       /* Calculate vaddr of right paca */
-       add     r13,r13,r24             /* for this processor.           */
-       mtspr   SPRN_SPRG3,r13
-
-       /* ptr to current */
-       LOADADDR(r4,init_task)
-       std     r4,PACACURRENT(r13)
-
-       /* Load the TOC */
-       ld      r2,PACATOC(r13)
-       std     r1,PACAKSAVE(r13)
-
-       bl      .setup_system
-
-       /* Load up the kernel context */
-5:
-#ifdef DO_SOFT_DISABLE
-       li      r5,0
-       stb     r5,PACAPROCENABLED(r13) /* Soft Disabled */
-       mfmsr   r5
-       ori     r5,r5,MSR_EE            /* Hard Enabled */
-       mtmsrd  r5
-#endif
-
-       bl .start_kernel
-
-_GLOBAL(hmt_init)
-#ifdef CONFIG_HMT
-       LOADADDR(r5, hmt_thread_data)
-       mfspr   r7,SPRN_PVR
-       srwi    r7,r7,16
-       cmpwi   r7,0x34                 /* Pulsar  */
-       beq     90f
-       cmpwi   r7,0x36                 /* Icestar */
-       beq     91f
-       cmpwi   r7,0x37                 /* SStar   */
-       beq     91f
-       b       101f
-90:    mfspr   r6,SPRN_PIR
-       andi.   r6,r6,0x1f
-       b       92f
-91:    mfspr   r6,SPRN_PIR
-       andi.   r6,r6,0x3ff
-92:    sldi    r4,r24,3
-       stwx    r6,r5,r4
-       bl      .hmt_start_secondary
-       b       101f
-
-__hmt_secondary_hold:
-       LOADADDR(r5, hmt_thread_data)
-       clrldi  r5,r5,4
-       li      r7,0
-       mfspr   r6,SPRN_PIR
-       mfspr   r8,SPRN_PVR
-       srwi    r8,r8,16
-       cmpwi   r8,0x34
-       bne     93f
-       andi.   r6,r6,0x1f
-       b       103f
-93:    andi.   r6,r6,0x3f
-
-103:   lwzx    r8,r5,r7
-       cmpw    r8,r6
-       beq     104f
-       addi    r7,r7,8
-       b       103b
-
-104:   addi    r7,r7,4
-       lwzx    r9,r5,r7
-       mr      r24,r9
-101:
-#endif
-       mr      r3,r24
-       b       .pSeries_secondary_smp_init
-
-#ifdef CONFIG_HMT
-_GLOBAL(hmt_start_secondary)
-       LOADADDR(r4,__hmt_secondary_hold)
-       clrldi  r4,r4,4
-       mtspr   SPRN_NIADORM, r4
-       mfspr   r4, SPRN_MSRDORM
-       li      r5, -65
-       and     r4, r4, r5
-       mtspr   SPRN_MSRDORM, r4
-       lis     r4,0xffef
-       ori     r4,r4,0x7403
-       mtspr   SPRN_TSC, r4
-       li      r4,0x1f4
-       mtspr   SPRN_TST, r4
-       mfspr   r4, SPRN_HID0
-       ori     r4, r4, 0x1
-       mtspr   SPRN_HID0, r4
-       mfspr   r4, SPRN_CTRLF
-       oris    r4, r4, 0x40
-       mtspr   SPRN_CTRLT, r4
-       blr
-#endif
-
-/*
- * We put a few things here that have to be page-aligned.
- * This stuff goes at the beginning of the bss, which is page-aligned.
- */
-       .section ".bss"
-
-       .align  PAGE_SHIFT
-
-       .globl  empty_zero_page
-empty_zero_page:
-       .space  PAGE_SIZE
-
-       .globl  swapper_pg_dir
-swapper_pg_dir:
-       .space  PAGE_SIZE
-
-/*
- * This space gets a copy of optional info passed to us by the bootstrap
- * Used to pass parameters into the kernel like root=/dev/sda1, etc.
- */
-       .globl  cmd_line
-cmd_line:
-       .space  COMMAND_LINE_SIZE
diff --git a/arch/ppc64/kernel/hvconsole.c b/arch/ppc64/kernel/hvconsole.c
deleted file mode 100644 (file)
index 138e128..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * hvconsole.c
- * Copyright (C) 2004 Hollis Blanchard, IBM Corporation
- * Copyright (C) 2004 IBM Corporation
- *
- * Additional Author(s):
- *  Ryan S. Arnold <rsa@us.ibm.com>
- *
- * LPAR console support.
- * 
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- * 
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- * 
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <asm/hvcall.h>
-#include <asm/hvconsole.h>
-
-/**
- * hvc_get_chars - retrieve characters from firmware for denoted vterm adatper
- * @vtermno: The vtermno or unit_address of the adapter from which to fetch the
- *     data.
- * @buf: The character buffer into which to put the character data fetched from
- *     firmware.
- * @count: not used?
- */
-int hvc_get_chars(uint32_t vtermno, char *buf, int count)
-{
-       unsigned long got;
-
-       if (plpar_hcall(H_GET_TERM_CHAR, vtermno, 0, 0, 0, &got,
-               (unsigned long *)buf, (unsigned long *)buf+1) == H_Success)
-               return got;
-       return 0;
-}
-
-EXPORT_SYMBOL(hvc_get_chars);
-
-
-/**
- * hvc_put_chars: send characters to firmware for denoted vterm adapter
- * @vtermno: The vtermno or unit_address of the adapter from which the data
- *     originated.
- * @buf: The character buffer that contains the character data to send to
- *     firmware.
- * @count: Send this number of characters.
- */
-int hvc_put_chars(uint32_t vtermno, const char *buf, int count)
-{
-       unsigned long *lbuf = (unsigned long *) buf;
-       long ret;
-
-       ret = plpar_hcall_norets(H_PUT_TERM_CHAR, vtermno, count, lbuf[0],
-                                lbuf[1]);
-       if (ret == H_Success)
-               return count;
-       if (ret == H_Busy)
-               return 0;
-       return -EIO;
-}
-
-EXPORT_SYMBOL(hvc_put_chars);
diff --git a/arch/ppc64/kernel/hvcserver.c b/arch/ppc64/kernel/hvcserver.c
deleted file mode 100644 (file)
index 4d58417..0000000
+++ /dev/null
@@ -1,251 +0,0 @@
-/*
- * hvcserver.c
- * Copyright (C) 2004 Ryan S Arnold, IBM Corporation
- *
- * PPC64 virtual I/O console server support.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
- */
-
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-
-#include <asm/hvcall.h>
-#include <asm/hvcserver.h>
-#include <asm/io.h>
-
-#define HVCS_ARCH_VERSION "1.0.0"
-
-MODULE_AUTHOR("Ryan S. Arnold <rsa@us.ibm.com>");
-MODULE_DESCRIPTION("IBM hvcs ppc64 API");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(HVCS_ARCH_VERSION);
-
-/*
- * Convert arch specific return codes into relevant errnos.  The hvcs
- * functions aren't performance sensitive, so this conversion isn't an
- * issue.
- */
-int hvcs_convert(long to_convert)
-{
-       switch (to_convert) {
-               case H_Success:
-                       return 0;
-               case H_Parameter:
-                       return -EINVAL;
-               case H_Hardware:
-                       return -EIO;
-               case H_Busy:
-               case H_LongBusyOrder1msec:
-               case H_LongBusyOrder10msec:
-               case H_LongBusyOrder100msec:
-               case H_LongBusyOrder1sec:
-               case H_LongBusyOrder10sec:
-               case H_LongBusyOrder100sec:
-                       return -EBUSY;
-               case H_Function: /* fall through */
-               default:
-                       return -EPERM;
-       }
-}
-
-/**
- * hvcs_free_partner_info - free pi allocated by hvcs_get_partner_info
- * @head: list_head pointer for an allocated list of partner info structs to
- *     free.
- *
- * This function is used to free the partner info list that was returned by
- * calling hvcs_get_partner_info().
- */
-int hvcs_free_partner_info(struct list_head *head)
-{
-       struct hvcs_partner_info *pi;
-       struct list_head *element;
-
-       if (!head)
-               return -EINVAL;
-
-       while (!list_empty(head)) {
-               element = head->next;
-               pi = list_entry(element, struct hvcs_partner_info, node);
-               list_del(element);
-               kfree(pi);
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL(hvcs_free_partner_info);
-
-/* Helper function for hvcs_get_partner_info */
-int hvcs_next_partner(uint32_t unit_address,
-               unsigned long last_p_partition_ID,
-               unsigned long last_p_unit_address, unsigned long *pi_buff)
-
-{
-       long retval;
-       retval = plpar_hcall_norets(H_VTERM_PARTNER_INFO, unit_address,
-                       last_p_partition_ID,
-                               last_p_unit_address, virt_to_phys(pi_buff));
-       return hvcs_convert(retval);
-}
-
-/**
- * hvcs_get_partner_info - Get all of the partner info for a vty-server adapter
- * @unit_address: The unit_address of the vty-server adapter for which this
- *     function is fetching partner info.
- * @head: An initialized list_head pointer to an empty list to use to return the
- *     list of partner info fetched from the hypervisor to the caller.
- * @pi_buff: A page sized buffer pre-allocated prior to calling this function
- *     that is to be used to be used by firmware as an iterator to keep track
- *     of the partner info retrieval.
- *
- * This function returns non-zero on success, or if there is no partner info.
- *
- * The pi_buff is pre-allocated prior to calling this function because this
- * function may be called with a spin_lock held and kmalloc of a page is not
- * recommended as GFP_ATOMIC.
- *
- * The first long of this buffer is used to store a partner unit address.  The
- * second long is used to store a partner partition ID and starting at
- * pi_buff[2] is the 79 character Converged Location Code (diff size than the
- * unsigned longs, hence the casting mumbo jumbo you see later).
- *
- * Invocation of this function should always be followed by an invocation of
- * hvcs_free_partner_info() using a pointer to the SAME list head instance
- * that was passed as a parameter to this function.
- */
-int hvcs_get_partner_info(uint32_t unit_address, struct list_head *head,
-               unsigned long *pi_buff)
-{
-       /*
-        * Dealt with as longs because of the hcall interface even though the
-        * values are uint32_t.
-        */
-       unsigned long   last_p_partition_ID;
-       unsigned long   last_p_unit_address;
-       struct hvcs_partner_info *next_partner_info = NULL;
-       int more = 1;
-       int retval;
-
-       memset(pi_buff, 0x00, PAGE_SIZE);
-       /* invalid parameters */
-       if (!head || !pi_buff)
-               return -EINVAL;
-
-       last_p_partition_ID = last_p_unit_address = ~0UL;
-       INIT_LIST_HEAD(head);
-
-       do {
-               retval = hvcs_next_partner(unit_address, last_p_partition_ID,
-                               last_p_unit_address, pi_buff);
-               if (retval) {
-                       /*
-                        * Don't indicate that we've failed if we have
-                        * any list elements.
-                        */
-                       if (!list_empty(head))
-                               return 0;
-                       return retval;
-               }
-
-               last_p_partition_ID = pi_buff[0];
-               last_p_unit_address = pi_buff[1];
-
-               /* This indicates that there are no further partners */
-               if (last_p_partition_ID == ~0UL
-                               && last_p_unit_address == ~0UL)
-                       break;
-
-               /* This is a very small struct and will be freed soon in
-                * hvcs_free_partner_info(). */
-               next_partner_info = kmalloc(sizeof(struct hvcs_partner_info),
-                               GFP_ATOMIC);
-
-               if (!next_partner_info) {
-                       printk(KERN_WARNING "HVCONSOLE: kmalloc() failed to"
-                               " allocate partner info struct.\n");
-                       hvcs_free_partner_info(head);
-                       return -ENOMEM;
-               }
-
-               next_partner_info->unit_address
-                       = (unsigned int)last_p_unit_address;
-               next_partner_info->partition_ID
-                       = (unsigned int)last_p_partition_ID;
-
-               /* copy the Null-term char too */
-               strncpy(&next_partner_info->location_code[0],
-                       (char *)&pi_buff[2],
-                       strlen((char *)&pi_buff[2]) + 1);
-
-               list_add_tail(&(next_partner_info->node), head);
-               next_partner_info = NULL;
-
-       } while (more);
-
-       return 0;
-}
-EXPORT_SYMBOL(hvcs_get_partner_info);
-
-/**
- * hvcs_register_connection - establish a connection between this vty-server and
- *     a vty.
- * @unit_address: The unit address of the vty-server adapter that is to be
- *     establish a connection.
- * @p_partition_ID: The partition ID of the vty adapter that is to be connected.
- * @p_unit_address: The unit address of the vty adapter to which the vty-server
- *     is to be connected.
- *
- * If this function is called once and -EINVAL is returned it may
- * indicate that the partner info needs to be refreshed for the
- * target unit address at which point the caller must invoke
- * hvcs_get_partner_info() and then call this function again.  If,
- * for a second time, -EINVAL is returned then it indicates that
- * there is probably already a partner connection registered to a
- * different vty-server adapter.  It is also possible that a second
- * -EINVAL may indicate that one of the parms is not valid, for
- * instance if the link was removed between the vty-server adapter
- * and the vty adapter that you are trying to open.  Don't shoot the
- * messenger.  Firmware implemented it this way.
- */
-int hvcs_register_connection( uint32_t unit_address,
-               uint32_t p_partition_ID, uint32_t p_unit_address)
-{
-       long retval;
-       retval = plpar_hcall_norets(H_REGISTER_VTERM, unit_address,
-                               p_partition_ID, p_unit_address);
-       return hvcs_convert(retval);
-}
-EXPORT_SYMBOL(hvcs_register_connection);
-
-/**
- * hvcs_free_connection - free the connection between a vty-server and vty
- * @unit_address: The unit address of the vty-server that is to have its
- *     connection severed.
- *
- * This function is used to free the partner connection between a vty-server
- * adapter and a vty adapter.
- *
- * If -EBUSY is returned continue to call this function until 0 is returned.
- */
-int hvcs_free_connection(uint32_t unit_address)
-{
-       long retval;
-       retval = plpar_hcall_norets(H_FREE_VTERM, unit_address);
-       return hvcs_convert(retval);
-}
-EXPORT_SYMBOL(hvcs_free_connection);
diff --git a/arch/ppc64/kernel/iomap.c b/arch/ppc64/kernel/iomap.c
deleted file mode 100644 (file)
index 6160c8d..0000000
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * arch/ppc64/kernel/iomap.c
- *
- * ppc64 "iomap" interface implementation.
- *
- * (C) Copyright 2004 Linus Torvalds
- */
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/mm.h>
-#include <asm/io.h>
-
-/*
- * Here comes the ppc64 implementation of the IOMAP 
- * interfaces.
- */
-unsigned int fastcall ioread8(void __iomem *addr)
-{
-       return readb(addr);
-}
-unsigned int fastcall ioread16(void __iomem *addr)
-{
-       return readw(addr);
-}
-unsigned int fastcall ioread16be(void __iomem *addr)
-{
-       return in_be16(addr);
-}
-unsigned int fastcall ioread32(void __iomem *addr)
-{
-       return readl(addr);
-}
-unsigned int fastcall ioread32be(void __iomem *addr)
-{
-       return in_be32(addr);
-}
-EXPORT_SYMBOL(ioread8);
-EXPORT_SYMBOL(ioread16);
-EXPORT_SYMBOL(ioread16be);
-EXPORT_SYMBOL(ioread32);
-EXPORT_SYMBOL(ioread32be);
-
-void fastcall iowrite8(u8 val, void __iomem *addr)
-{
-       writeb(val, addr);
-}
-void fastcall iowrite16(u16 val, void __iomem *addr)
-{
-       writew(val, addr);
-}
-void fastcall iowrite16be(u16 val, void __iomem *addr)
-{
-       out_be16(addr, val);
-}
-void fastcall iowrite32(u32 val, void __iomem *addr)
-{
-       writel(val, addr);
-}
-void fastcall iowrite32be(u32 val, void __iomem *addr)
-{
-       out_be32(addr, val);
-}
-EXPORT_SYMBOL(iowrite8);
-EXPORT_SYMBOL(iowrite16);
-EXPORT_SYMBOL(iowrite16be);
-EXPORT_SYMBOL(iowrite32);
-EXPORT_SYMBOL(iowrite32be);
-
-/*
- * These are the "repeat read/write" functions. Note the
- * non-CPU byte order. We do things in "IO byteorder"
- * here.
- *
- * FIXME! We could make these do EEH handling if we really
- * wanted. Not clear if we do.
- */
-void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
-{
-       _insb((u8 __iomem *) addr, dst, count);
-}
-void ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
-{
-       _insw_ns((u16 __iomem *) addr, dst, count);
-}
-void ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
-{
-       _insl_ns((u32 __iomem *) addr, dst, count);
-}
-EXPORT_SYMBOL(ioread8_rep);
-EXPORT_SYMBOL(ioread16_rep);
-EXPORT_SYMBOL(ioread32_rep);
-
-void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
-{
-       _outsb((u8 __iomem *) addr, src, count);
-}
-void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
-{
-       _outsw_ns((u16 __iomem *) addr, src, count);
-}
-void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
-{
-       _outsl_ns((u32 __iomem *) addr, src, count);
-}
-EXPORT_SYMBOL(iowrite8_rep);
-EXPORT_SYMBOL(iowrite16_rep);
-EXPORT_SYMBOL(iowrite32_rep);
-
-void __iomem *ioport_map(unsigned long port, unsigned int len)
-{
-       if (!_IO_IS_VALID(port))
-               return NULL;
-       return (void __iomem *) (port+pci_io_base);
-}
-
-void ioport_unmap(void __iomem *addr)
-{
-       /* Nothing to do */
-}
-EXPORT_SYMBOL(ioport_map);
-EXPORT_SYMBOL(ioport_unmap);
-
-void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
-{
-       unsigned long start = pci_resource_start(dev, bar);
-       unsigned long len = pci_resource_len(dev, bar);
-       unsigned long flags = pci_resource_flags(dev, bar);
-
-       if (!len)
-               return NULL;
-       if (max && len > max)
-               len = max;
-       if (flags & IORESOURCE_IO)
-               return ioport_map(start, len);
-       if (flags & IORESOURCE_MEM)
-               return ioremap(start, len);
-       /* What? */
-       return NULL;
-}
-
-void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
-{
-       /* Nothing to do */
-}
-EXPORT_SYMBOL(pci_iomap);
-EXPORT_SYMBOL(pci_iounmap);
diff --git a/arch/ppc64/kernel/iommu.c b/arch/ppc64/kernel/iommu.c
deleted file mode 100644 (file)
index 4d9b438..0000000
+++ /dev/null
@@ -1,572 +0,0 @@
-/*
- * arch/ppc64/kernel/iommu.c
- * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
- * 
- * Rewrite, cleanup, new allocation schemes, virtual merging: 
- * Copyright (C) 2004 Olof Johansson, IBM Corporation
- *               and  Ben. Herrenschmidt, IBM Corporation
- *
- * Dynamic DMA mapping support, bus-independent parts.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- * 
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- * 
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
- */
-
-
-#include <linux/config.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/spinlock.h>
-#include <linux/string.h>
-#include <linux/dma-mapping.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
-#include <asm/io.h>
-#include <asm/prom.h>
-#include <asm/iommu.h>
-#include <asm/pci-bridge.h>
-#include <asm/machdep.h>
-
-#define DBG(...)
-
-#ifdef CONFIG_IOMMU_VMERGE
-static int novmerge = 0;
-#else
-static int novmerge = 1;
-#endif
-
-static int __init setup_iommu(char *str)
-{
-       if (!strcmp(str, "novmerge"))
-               novmerge = 1;
-       else if (!strcmp(str, "vmerge"))
-               novmerge = 0;
-       return 1;
-}
-
-__setup("iommu=", setup_iommu);
-
-static unsigned long iommu_range_alloc(struct iommu_table *tbl,
-                                       unsigned long npages,
-                                       unsigned long *handle,
-                                       unsigned int align_order)
-{ 
-       unsigned long n, end, i, start;
-       unsigned long limit;
-       int largealloc = npages > 15;
-       int pass = 0;
-       unsigned long align_mask;
-
-       align_mask = 0xffffffffffffffffl >> (64 - align_order);
-
-       /* This allocator was derived from x86_64's bit string search */
-
-       /* Sanity check */
-       if (unlikely(npages) == 0) {
-               if (printk_ratelimit())
-                       WARN_ON(1);
-               return DMA_ERROR_CODE;
-       }
-
-       if (handle && *handle)
-               start = *handle;
-       else
-               start = largealloc ? tbl->it_largehint : tbl->it_hint;
-
-       /* Use only half of the table for small allocs (15 pages or less) */
-       limit = largealloc ? tbl->it_size : tbl->it_halfpoint;
-
-       if (largealloc && start < tbl->it_halfpoint)
-               start = tbl->it_halfpoint;
-
-       /* The case below can happen if we have a small segment appended
-        * to a large, or when the previous alloc was at the very end of
-        * the available space. If so, go back to the initial start.
-        */
-       if (start >= limit)
-               start = largealloc ? tbl->it_largehint : tbl->it_hint;
-       
- again:
-
-       n = find_next_zero_bit(tbl->it_map, limit, start);
-
-       /* Align allocation */
-       n = (n + align_mask) & ~align_mask;
-
-       end = n + npages;
-
-       if (unlikely(end >= limit)) {
-               if (likely(pass < 2)) {
-                       /* First failure, just rescan the half of the table.
-                        * Second failure, rescan the other half of the table.
-                        */
-                       start = (largealloc ^ pass) ? tbl->it_halfpoint : 0;
-                       limit = pass ? tbl->it_size : limit;
-                       pass++;
-                       goto again;
-               } else {
-                       /* Third failure, give up */
-                       return DMA_ERROR_CODE;
-               }
-       }
-
-       for (i = n; i < end; i++)
-               if (test_bit(i, tbl->it_map)) {
-                       start = i+1;
-                       goto again;
-               }
-
-       for (i = n; i < end; i++)
-               __set_bit(i, tbl->it_map);
-
-       /* Bump the hint to a new block for small allocs. */
-       if (largealloc) {
-               /* Don't bump to new block to avoid fragmentation */
-               tbl->it_largehint = end;
-       } else {
-               /* Overflow will be taken care of at the next allocation */
-               tbl->it_hint = (end + tbl->it_blocksize - 1) &
-                               ~(tbl->it_blocksize - 1);
-       }
-
-       /* Update handle for SG allocations */
-       if (handle)
-               *handle = end;
-
-       return n;
-}
-
-static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page,
-                      unsigned int npages, enum dma_data_direction direction,
-                      unsigned int align_order)
-{
-       unsigned long entry, flags;
-       dma_addr_t ret = DMA_ERROR_CODE;
-       
-       spin_lock_irqsave(&(tbl->it_lock), flags);
-
-       entry = iommu_range_alloc(tbl, npages, NULL, align_order);
-
-       if (unlikely(entry == DMA_ERROR_CODE)) {
-               spin_unlock_irqrestore(&(tbl->it_lock), flags);
-               return DMA_ERROR_CODE;
-       }
-
-       entry += tbl->it_offset;        /* Offset into real TCE table */
-       ret = entry << PAGE_SHIFT;      /* Set the return dma address */
-
-       /* Put the TCEs in the HW table */
-       ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & PAGE_MASK,
-                        direction);
-
-
-       /* Flush/invalidate TLB caches if necessary */
-       if (ppc_md.tce_flush)
-               ppc_md.tce_flush(tbl);
-
-       spin_unlock_irqrestore(&(tbl->it_lock), flags);
-
-       /* Make sure updates are seen by hardware */
-       mb();
-
-       return ret;
-}
-
-static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, 
-                        unsigned int npages)
-{
-       unsigned long entry, free_entry;
-       unsigned long i;
-
-       entry = dma_addr >> PAGE_SHIFT;
-       free_entry = entry - tbl->it_offset;
-
-       if (((free_entry + npages) > tbl->it_size) ||
-           (entry < tbl->it_offset)) {
-               if (printk_ratelimit()) {
-                       printk(KERN_INFO "iommu_free: invalid entry\n");
-                       printk(KERN_INFO "\tentry     = 0x%lx\n", entry); 
-                       printk(KERN_INFO "\tdma_addr  = 0x%lx\n", (u64)dma_addr);
-                       printk(KERN_INFO "\tTable     = 0x%lx\n", (u64)tbl);
-                       printk(KERN_INFO "\tbus#      = 0x%lx\n", (u64)tbl->it_busno);
-                       printk(KERN_INFO "\tsize      = 0x%lx\n", (u64)tbl->it_size);
-                       printk(KERN_INFO "\tstartOff  = 0x%lx\n", (u64)tbl->it_offset);
-                       printk(KERN_INFO "\tindex     = 0x%lx\n", (u64)tbl->it_index);
-                       WARN_ON(1);
-               }
-               return;
-       }
-
-       ppc_md.tce_free(tbl, entry, npages);
-       
-       for (i = 0; i < npages; i++)
-               __clear_bit(free_entry+i, tbl->it_map);
-}
-
-static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
-               unsigned int npages)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&(tbl->it_lock), flags);
-
-       __iommu_free(tbl, dma_addr, npages);
-
-       /* Make sure TLB cache is flushed if the HW needs it. We do
-        * not do an mb() here on purpose, it is not needed on any of
-        * the current platforms.
-        */
-       if (ppc_md.tce_flush)
-               ppc_md.tce_flush(tbl);
-
-       spin_unlock_irqrestore(&(tbl->it_lock), flags);
-}
-
-int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
-               struct scatterlist *sglist, int nelems,
-               enum dma_data_direction direction)
-{
-       dma_addr_t dma_next = 0, dma_addr;
-       unsigned long flags;
-       struct scatterlist *s, *outs, *segstart;
-       int outcount, incount;
-       unsigned long handle;
-
-       BUG_ON(direction == DMA_NONE);
-
-       if ((nelems == 0) || !tbl)
-               return 0;
-
-       outs = s = segstart = &sglist[0];
-       outcount = 1;
-       incount = nelems;
-       handle = 0;
-
-       /* Init first segment length for backout at failure */
-       outs->dma_length = 0;
-
-       DBG("mapping %d elements:\n", nelems);
-
-       spin_lock_irqsave(&(tbl->it_lock), flags);
-
-       for (s = outs; nelems; nelems--, s++) {
-               unsigned long vaddr, npages, entry, slen;
-
-               slen = s->length;
-               /* Sanity check */
-               if (slen == 0) {
-                       dma_next = 0;
-                       continue;
-               }
-               /* Allocate iommu entries for that segment */
-               vaddr = (unsigned long)page_address(s->page) + s->offset;
-               npages = PAGE_ALIGN(vaddr + slen) - (vaddr & PAGE_MASK);
-               npages >>= PAGE_SHIFT;
-               entry = iommu_range_alloc(tbl, npages, &handle, 0);
-
-               DBG("  - vaddr: %lx, size: %lx\n", vaddr, slen);
-
-               /* Handle failure */
-               if (unlikely(entry == DMA_ERROR_CODE)) {
-                       if (printk_ratelimit())
-                               printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx"
-                                      " npages %lx\n", tbl, vaddr, npages);
-                       goto failure;
-               }
-
-               /* Convert entry to a dma_addr_t */
-               entry += tbl->it_offset;
-               dma_addr = entry << PAGE_SHIFT;
-               dma_addr |= s->offset;
-
-               DBG("  - %lx pages, entry: %lx, dma_addr: %lx\n",
-                           npages, entry, dma_addr);
-
-               /* Insert into HW table */
-               ppc_md.tce_build(tbl, entry, npages, vaddr & PAGE_MASK, direction);
-
-               /* If we are in an open segment, try merging */
-               if (segstart != s) {
-                       DBG("  - trying merge...\n");
-                       /* We cannot merge if:
-                        * - allocated dma_addr isn't contiguous to previous allocation
-                        */
-                       if (novmerge || (dma_addr != dma_next)) {
-                               /* Can't merge: create a new segment */
-                               segstart = s;
-                               outcount++; outs++;
-                               DBG("    can't merge, new segment.\n");
-                       } else {
-                               outs->dma_length += s->length;
-                               DBG("    merged, new len: %lx\n", outs->dma_length);
-                       }
-               }
-
-               if (segstart == s) {
-                       /* This is a new segment, fill entries */
-                       DBG("  - filling new segment.\n");
-                       outs->dma_address = dma_addr;
-                       outs->dma_length = slen;
-               }
-
-               /* Calculate next page pointer for contiguous check */
-               dma_next = dma_addr + slen;
-
-               DBG("  - dma next is: %lx\n", dma_next);
-       }
-
-       /* Flush/invalidate TLB caches if necessary */
-       if (ppc_md.tce_flush)
-               ppc_md.tce_flush(tbl);
-
-       spin_unlock_irqrestore(&(tbl->it_lock), flags);
-
-       /* Make sure updates are seen by hardware */
-       mb();
-
-       DBG("mapped %d elements:\n", outcount);
-
-       /* For the sake of iommu_unmap_sg, we clear out the length in the
-        * next entry of the sglist if we didn't fill the list completely
-        */
-       if (outcount < incount) {
-               outs++;
-               outs->dma_address = DMA_ERROR_CODE;
-               outs->dma_length = 0;
-       }
-       return outcount;
-
- failure:
-       for (s = &sglist[0]; s <= outs; s++) {
-               if (s->dma_length != 0) {
-                       unsigned long vaddr, npages;
-
-                       vaddr = s->dma_address & PAGE_MASK;
-                       npages = (PAGE_ALIGN(s->dma_address + s->dma_length) - vaddr)
-                               >> PAGE_SHIFT;
-                       __iommu_free(tbl, vaddr, npages);
-               }
-       }
-       spin_unlock_irqrestore(&(tbl->it_lock), flags);
-       return 0;
-}
-
-
-void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
-               int nelems, enum dma_data_direction direction)
-{
-       unsigned long flags;
-
-       BUG_ON(direction == DMA_NONE);
-
-       if (!tbl)
-               return;
-
-       spin_lock_irqsave(&(tbl->it_lock), flags);
-
-       while (nelems--) {
-               unsigned int npages;
-               dma_addr_t dma_handle = sglist->dma_address;
-
-               if (sglist->dma_length == 0)
-                       break;
-               npages = (PAGE_ALIGN(dma_handle + sglist->dma_length)
-                         - (dma_handle & PAGE_MASK)) >> PAGE_SHIFT;
-               __iommu_free(tbl, dma_handle, npages);
-               sglist++;
-       }
-
-       /* Flush/invalidate TLBs if necessary. As for iommu_free(), we
-        * do not do an mb() here, the affected platforms do not need it
-        * when freeing.
-        */
-       if (ppc_md.tce_flush)
-               ppc_md.tce_flush(tbl);
-
-       spin_unlock_irqrestore(&(tbl->it_lock), flags);
-}
-
-/*
- * Build a iommu_table structure.  This contains a bit map which
- * is used to manage allocation of the tce space.
- */
-struct iommu_table *iommu_init_table(struct iommu_table *tbl)
-{
-       unsigned long sz;
-       static int welcomed = 0;
-
-       /* Set aside 1/4 of the table for large allocations. */
-       tbl->it_halfpoint = tbl->it_size * 3 / 4;
-
-       /* number of bytes needed for the bitmap */
-       sz = (tbl->it_size + 7) >> 3;
-
-       tbl->it_map = (unsigned long *)__get_free_pages(GFP_ATOMIC, get_order(sz));
-       if (!tbl->it_map)
-               panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
-
-       memset(tbl->it_map, 0, sz);
-
-       tbl->it_hint = 0;
-       tbl->it_largehint = tbl->it_halfpoint;
-       spin_lock_init(&tbl->it_lock);
-
-       /* Clear the hardware table in case firmware left allocations in it */
-       ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
-
-       if (!welcomed) {
-               printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
-                      novmerge ? "disabled" : "enabled");
-               welcomed = 1;
-       }
-
-       return tbl;
-}
-
-void iommu_free_table(struct device_node *dn)
-{
-       struct pci_dn *pdn = dn->data;
-       struct iommu_table *tbl = pdn->iommu_table;
-       unsigned long bitmap_sz, i;
-       unsigned int order;
-
-       if (!tbl || !tbl->it_map) {
-               printk(KERN_ERR "%s: expected TCE map for %s\n", __FUNCTION__,
-                               dn->full_name);
-               return;
-       }
-
-       /* verify that table contains no entries */
-       /* it_size is in entries, and we're examining 64 at a time */
-       for (i = 0; i < (tbl->it_size/64); i++) {
-               if (tbl->it_map[i] != 0) {
-                       printk(KERN_WARNING "%s: Unexpected TCEs for %s\n",
-                               __FUNCTION__, dn->full_name);
-                       break;
-               }
-       }
-
-       /* calculate bitmap size in bytes */
-       bitmap_sz = (tbl->it_size + 7) / 8;
-
-       /* free bitmap */
-       order = get_order(bitmap_sz);
-       free_pages((unsigned long) tbl->it_map, order);
-
-       /* free table */
-       kfree(tbl);
-}
-
-/* Creates TCEs for a user provided buffer.  The user buffer must be
- * contiguous real kernel storage (not vmalloc).  The address of the buffer
- * passed here is the kernel (virtual) address of the buffer.  The buffer
- * need not be page aligned, the dma_addr_t returned will point to the same
- * byte within the page as vaddr.
- */
-dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
-               size_t size, enum dma_data_direction direction)
-{
-       dma_addr_t dma_handle = DMA_ERROR_CODE;
-       unsigned long uaddr;
-       unsigned int npages;
-
-       BUG_ON(direction == DMA_NONE);
-
-       uaddr = (unsigned long)vaddr;
-       npages = PAGE_ALIGN(uaddr + size) - (uaddr & PAGE_MASK);
-       npages >>= PAGE_SHIFT;
-
-       if (tbl) {
-               dma_handle = iommu_alloc(tbl, vaddr, npages, direction, 0);
-               if (dma_handle == DMA_ERROR_CODE) {
-                       if (printk_ratelimit())  {
-                               printk(KERN_INFO "iommu_alloc failed, "
-                                               "tbl %p vaddr %p npages %d\n",
-                                               tbl, vaddr, npages);
-                       }
-               } else
-                       dma_handle |= (uaddr & ~PAGE_MASK);
-       }
-
-       return dma_handle;
-}
-
-void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
-               size_t size, enum dma_data_direction direction)
-{
-       BUG_ON(direction == DMA_NONE);
-
-       if (tbl)
-               iommu_free(tbl, dma_handle, (PAGE_ALIGN(dma_handle + size) -
-                                       (dma_handle & PAGE_MASK)) >> PAGE_SHIFT);
-}
-
-/* Allocates a contiguous real buffer and creates mappings over it.
- * Returns the virtual address of the buffer and sets dma_handle
- * to the dma address (mapping) of the first page.
- */
-void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
-               dma_addr_t *dma_handle, gfp_t flag)
-{
-       void *ret = NULL;
-       dma_addr_t mapping;
-       unsigned int npages, order;
-
-       size = PAGE_ALIGN(size);
-       npages = size >> PAGE_SHIFT;
-       order = get_order(size);
-
-       /*
-        * Client asked for way too much space.  This is checked later
-        * anyway.  It is easier to debug here for the drivers than in
-        * the tce tables.
-        */
-       if (order >= IOMAP_MAX_ORDER) {
-               printk("iommu_alloc_consistent size too large: 0x%lx\n", size);
-               return NULL;
-       }
-
-       if (!tbl)
-               return NULL;
-
-       /* Alloc enough pages (and possibly more) */
-       ret = (void *)__get_free_pages(flag, order);
-       if (!ret)
-               return NULL;
-       memset(ret, 0, size);
-
-       /* Set up tces to cover the allocated range */
-       mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL, order);
-       if (mapping == DMA_ERROR_CODE) {
-               free_pages((unsigned long)ret, order);
-               ret = NULL;
-       } else
-               *dma_handle = mapping;
-       return ret;
-}
-
-void iommu_free_coherent(struct iommu_table *tbl, size_t size,
-                        void *vaddr, dma_addr_t dma_handle)
-{
-       unsigned int npages;
-
-       if (tbl) {
-               size = PAGE_ALIGN(size);
-               npages = size >> PAGE_SHIFT;
-               iommu_free(tbl, dma_handle, npages);
-               free_pages((unsigned long)vaddr, get_order(size));
-       }
-}
diff --git a/arch/ppc64/kernel/kprobes.c b/arch/ppc64/kernel/kprobes.c
deleted file mode 100644 (file)
index 511af54..0000000
+++ /dev/null
@@ -1,459 +0,0 @@
-/*
- *  Kernel Probes (KProbes)
- *  arch/ppc64/kernel/kprobes.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (C) IBM Corporation, 2002, 2004
- *
- * 2002-Oct    Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
- *             Probes initial implementation ( includes contributions from
- *             Rusty Russell).
- * 2004-July   Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
- *             interface to access function arguments.
- * 2004-Nov    Ananth N Mavinakayanahalli <ananth@in.ibm.com> kprobes port
- *             for PPC64
- */
-
-#include <linux/config.h>
-#include <linux/kprobes.h>
-#include <linux/ptrace.h>
-#include <linux/preempt.h>
-#include <asm/cacheflush.h>
-#include <asm/kdebug.h>
-#include <asm/sstep.h>
-
-static DECLARE_MUTEX(kprobe_mutex);
-DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
-DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
-
-int __kprobes arch_prepare_kprobe(struct kprobe *p)
-{
-       int ret = 0;
-       kprobe_opcode_t insn = *p->addr;
-
-       if ((unsigned long)p->addr & 0x03) {
-               printk("Attempt to register kprobe at an unaligned address\n");
-               ret = -EINVAL;
-       } else if (IS_MTMSRD(insn) || IS_RFID(insn)) {
-               printk("Cannot register a kprobe on rfid or mtmsrd\n");
-               ret = -EINVAL;
-       }
-
-       /* insn must be on a special executable page on ppc64 */
-       if (!ret) {
-               down(&kprobe_mutex);
-               p->ainsn.insn = get_insn_slot();
-               up(&kprobe_mutex);
-               if (!p->ainsn.insn)
-                       ret = -ENOMEM;
-       }
-       return ret;
-}
-
-void __kprobes arch_copy_kprobe(struct kprobe *p)
-{
-       memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
-       p->opcode = *p->addr;
-}
-
-void __kprobes arch_arm_kprobe(struct kprobe *p)
-{
-       *p->addr = BREAKPOINT_INSTRUCTION;
-       flush_icache_range((unsigned long) p->addr,
-                          (unsigned long) p->addr + sizeof(kprobe_opcode_t));
-}
-
-void __kprobes arch_disarm_kprobe(struct kprobe *p)
-{
-       *p->addr = p->opcode;
-       flush_icache_range((unsigned long) p->addr,
-                          (unsigned long) p->addr + sizeof(kprobe_opcode_t));
-}
-
-void __kprobes arch_remove_kprobe(struct kprobe *p)
-{
-       down(&kprobe_mutex);
-       free_insn_slot(p->ainsn.insn);
-       up(&kprobe_mutex);
-}
-
-static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
-{
-       kprobe_opcode_t insn = *p->ainsn.insn;
-
-       regs->msr |= MSR_SE;
-
-       /* single step inline if it is a trap variant */
-       if (is_trap(insn))
-               regs->nip = (unsigned long)p->addr;
-       else
-               regs->nip = (unsigned long)p->ainsn.insn;
-}
-
-static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
-{
-       kcb->prev_kprobe.kp = kprobe_running();
-       kcb->prev_kprobe.status = kcb->kprobe_status;
-       kcb->prev_kprobe.saved_msr = kcb->kprobe_saved_msr;
-}
-
-static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
-{
-       __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
-       kcb->kprobe_status = kcb->prev_kprobe.status;
-       kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr;
-}
-
-static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
-                               struct kprobe_ctlblk *kcb)
-{
-       __get_cpu_var(current_kprobe) = p;
-       kcb->kprobe_saved_msr = regs->msr;
-}
-
-/* Called with kretprobe_lock held */
-void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
-                                     struct pt_regs *regs)
-{
-       struct kretprobe_instance *ri;
-
-       if ((ri = get_free_rp_inst(rp)) != NULL) {
-               ri->rp = rp;
-               ri->task = current;
-               ri->ret_addr = (kprobe_opcode_t *)regs->link;
-
-               /* Replace the return addr with trampoline addr */
-               regs->link = (unsigned long)kretprobe_trampoline;
-               add_rp_inst(ri);
-       } else {
-               rp->nmissed++;
-       }
-}
-
-static inline int kprobe_handler(struct pt_regs *regs)
-{
-       struct kprobe *p;
-       int ret = 0;
-       unsigned int *addr = (unsigned int *)regs->nip;
-       struct kprobe_ctlblk *kcb;
-
-       /*
-        * We don't want to be preempted for the entire
-        * duration of kprobe processing
-        */
-       preempt_disable();
-       kcb = get_kprobe_ctlblk();
-
-       /* Check we're not actually recursing */
-       if (kprobe_running()) {
-               p = get_kprobe(addr);
-               if (p) {
-                       kprobe_opcode_t insn = *p->ainsn.insn;
-                       if (kcb->kprobe_status == KPROBE_HIT_SS &&
-                                       is_trap(insn)) {
-                               regs->msr &= ~MSR_SE;
-                               regs->msr |= kcb->kprobe_saved_msr;
-                               goto no_kprobe;
-                       }
-                       /* We have reentered the kprobe_handler(), since
-                        * another probe was hit while within the handler.
-                        * We here save the original kprobes variables and
-                        * just single step on the instruction of the new probe
-                        * without calling any user handlers.
-                        */
-                       save_previous_kprobe(kcb);
-                       set_current_kprobe(p, regs, kcb);
-                       kcb->kprobe_saved_msr = regs->msr;
-                       p->nmissed++;
-                       prepare_singlestep(p, regs);
-                       kcb->kprobe_status = KPROBE_REENTER;
-                       return 1;
-               } else {
-                       p = __get_cpu_var(current_kprobe);
-                       if (p->break_handler && p->break_handler(p, regs)) {
-                               goto ss_probe;
-                       }
-               }
-               goto no_kprobe;
-       }
-
-       p = get_kprobe(addr);
-       if (!p) {
-               if (*addr != BREAKPOINT_INSTRUCTION) {
-                       /*
-                        * PowerPC has multiple variants of the "trap"
-                        * instruction. If the current instruction is a
-                        * trap variant, it could belong to someone else
-                        */
-                       kprobe_opcode_t cur_insn = *addr;
-                       if (is_trap(cur_insn))
-                               goto no_kprobe;
-                       /*
-                        * The breakpoint instruction was removed right
-                        * after we hit it.  Another cpu has removed
-                        * either a probepoint or a debugger breakpoint
-                        * at this address.  In either case, no further
-                        * handling of this interrupt is appropriate.
-                        */
-                       ret = 1;
-               }
-               /* Not one of ours: let kernel handle it */
-               goto no_kprobe;
-       }
-
-       kcb->kprobe_status = KPROBE_HIT_ACTIVE;
-       set_current_kprobe(p, regs, kcb);
-       if (p->pre_handler && p->pre_handler(p, regs))
-               /* handler has already set things up, so skip ss setup */
-               return 1;
-
-ss_probe:
-       prepare_singlestep(p, regs);
-       kcb->kprobe_status = KPROBE_HIT_SS;
-       return 1;
-
-no_kprobe:
-       preempt_enable_no_resched();
-       return ret;
-}
-
-/*
- * Function return probe trampoline:
- *     - init_kprobes() establishes a probepoint here
- *     - When the probed function returns, this probe
- *             causes the handlers to fire
- */
-void kretprobe_trampoline_holder(void)
-{
-       asm volatile(".global kretprobe_trampoline\n"
-                       "kretprobe_trampoline:\n"
-                       "nop\n");
-}
-
-/*
- * Called when the probe at kretprobe trampoline is hit
- */
-int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
-{
-        struct kretprobe_instance *ri = NULL;
-        struct hlist_head *head;
-        struct hlist_node *node, *tmp;
-       unsigned long flags, orig_ret_address = 0;
-       unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
-
-       spin_lock_irqsave(&kretprobe_lock, flags);
-        head = kretprobe_inst_table_head(current);
-
-       /*
-        * It is possible to have multiple instances associated with a given
-        * task either because an multiple functions in the call path
-        * have a return probe installed on them, and/or more then one return
-        * return probe was registered for a target function.
-        *
-        * We can handle this because:
-        *     - instances are always inserted at the head of the list
-        *     - when multiple return probes are registered for the same
-         *       function, the first instance's ret_addr will point to the
-        *       real return address, and all the rest will point to
-        *       kretprobe_trampoline
-        */
-       hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
-                if (ri->task != current)
-                       /* another task is sharing our hash bucket */
-                        continue;
-
-               if (ri->rp && ri->rp->handler)
-                       ri->rp->handler(ri, regs);
-
-               orig_ret_address = (unsigned long)ri->ret_addr;
-               recycle_rp_inst(ri);
-
-               if (orig_ret_address != trampoline_address)
-                       /*
-                        * This is the real return address. Any other
-                        * instances associated with this task are for
-                        * other calls deeper on the call stack
-                        */
-                       break;
-       }
-
-       BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address));
-       regs->nip = orig_ret_address;
-
-       reset_current_kprobe();
-       spin_unlock_irqrestore(&kretprobe_lock, flags);
-       preempt_enable_no_resched();
-
-        /*
-         * By returning a non-zero value, we are telling
-         * kprobe_handler() that we don't want the post_handler
-         * to run (and have re-enabled preemption)
-         */
-        return 1;
-}
-
-/*
- * Called after single-stepping.  p->addr is the address of the
- * instruction whose first byte has been replaced by the "breakpoint"
- * instruction.  To avoid the SMP problems that can occur when we
- * temporarily put back the original opcode to single-step, we
- * single-stepped a copy of the instruction.  The address of this
- * copy is p->ainsn.insn.
- */
-static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
-{
-       int ret;
-       unsigned int insn = *p->ainsn.insn;
-
-       regs->nip = (unsigned long)p->addr;
-       ret = emulate_step(regs, insn);
-       if (ret == 0)
-               regs->nip = (unsigned long)p->addr + 4;
-}
-
-static inline int post_kprobe_handler(struct pt_regs *regs)
-{
-       struct kprobe *cur = kprobe_running();
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
-       if (!cur)
-               return 0;
-
-       if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
-               kcb->kprobe_status = KPROBE_HIT_SSDONE;
-               cur->post_handler(cur, regs, 0);
-       }
-
-       resume_execution(cur, regs);
-       regs->msr |= kcb->kprobe_saved_msr;
-
-       /*Restore back the original saved kprobes variables and continue. */
-       if (kcb->kprobe_status == KPROBE_REENTER) {
-               restore_previous_kprobe(kcb);
-               goto out;
-       }
-       reset_current_kprobe();
-out:
-       preempt_enable_no_resched();
-
-       /*
-        * if somebody else is singlestepping across a probe point, msr
-        * will have SE set, in which case, continue the remaining processing
-        * of do_debug, as if this is not a probe hit.
-        */
-       if (regs->msr & MSR_SE)
-               return 0;
-
-       return 1;
-}
-
-static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
-{
-       struct kprobe *cur = kprobe_running();
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
-       if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
-               return 1;
-
-       if (kcb->kprobe_status & KPROBE_HIT_SS) {
-               resume_execution(cur, regs);
-               regs->msr &= ~MSR_SE;
-               regs->msr |= kcb->kprobe_saved_msr;
-
-               reset_current_kprobe();
-               preempt_enable_no_resched();
-       }
-       return 0;
-}
-
-/*
- * Wrapper routine to for handling exceptions.
- */
-int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
-                                      unsigned long val, void *data)
-{
-       struct die_args *args = (struct die_args *)data;
-       int ret = NOTIFY_DONE;
-
-       switch (val) {
-       case DIE_BPT:
-               if (kprobe_handler(args->regs))
-                       ret = NOTIFY_STOP;
-               break;
-       case DIE_SSTEP:
-               if (post_kprobe_handler(args->regs))
-                       ret = NOTIFY_STOP;
-               break;
-       case DIE_PAGE_FAULT:
-               /* kprobe_running() needs smp_processor_id() */
-               preempt_disable();
-               if (kprobe_running() &&
-                   kprobe_fault_handler(args->regs, args->trapnr))
-                       ret = NOTIFY_STOP;
-               preempt_enable();
-               break;
-       default:
-               break;
-       }
-       return ret;
-}
-
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct jprobe *jp = container_of(p, struct jprobe, kp);
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
-       memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
-
-       /* setup return addr to the jprobe handler routine */
-       regs->nip = (unsigned long)(((func_descr_t *)jp->entry)->entry);
-       regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
-
-       return 1;
-}
-
-void __kprobes jprobe_return(void)
-{
-       asm volatile("trap" ::: "memory");
-}
-
-void __kprobes jprobe_return_end(void)
-{
-};
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
-       /*
-        * FIXME - we should ideally be validating that we got here 'cos
-        * of the "trap" in jprobe_return() above, before restoring the
-        * saved regs...
-        */
-       memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
-       preempt_enable_no_resched();
-       return 1;
-}
-
-static struct kprobe trampoline_p = {
-       .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
-       .pre_handler = trampoline_probe_handler
-};
-
-int __init arch_init_kprobes(void)
-{
-       return register_kprobe(&trampoline_p);
-}
diff --git a/arch/ppc64/kernel/machine_kexec.c b/arch/ppc64/kernel/machine_kexec.c
deleted file mode 100644 (file)
index 97c51e4..0000000
+++ /dev/null
@@ -1,358 +0,0 @@
-/*
- * machine_kexec.c - handle transition of Linux booting another kernel
- *
- * Copyright (C) 2004-2005, IBM Corp.
- *
- * Created by: Milton D Miller II
- *
- * This source code is licensed under the GNU General Public License,
- * Version 2.  See the file COPYING for more details.
- */
-
-
-#include <linux/cpumask.h>
-#include <linux/kexec.h>
-#include <linux/smp.h>
-#include <linux/thread_info.h>
-#include <linux/errno.h>
-
-#include <asm/page.h>
-#include <asm/current.h>
-#include <asm/machdep.h>
-#include <asm/cacheflush.h>
-#include <asm/paca.h>
-#include <asm/mmu.h>
-#include <asm/sections.h>      /* _end */
-#include <asm/prom.h>
-#include <asm/smp.h>
-
-#define HASH_GROUP_SIZE 0x80   /* size of each hash group, asm/mmu.h */
-
-/* Have this around till we move it into crash specific file */
-note_buf_t crash_notes[NR_CPUS];
-
-/* Dummy for now. Not sure if we need to have a crash shutdown in here
- * and if what it will achieve. Letting it be now to compile the code
- * in generic kexec environment
- */
-void machine_crash_shutdown(struct pt_regs *regs)
-{
-       /* do nothing right now */
-       /* smp_relase_cpus() if we want smp on panic kernel */
-       /* cpu_irq_down to isolate us until we are ready */
-}
-
-int machine_kexec_prepare(struct kimage *image)
-{
-       int i;
-       unsigned long begin, end;       /* limits of segment */
-       unsigned long low, high;        /* limits of blocked memory range */
-       struct device_node *node;
-       unsigned long *basep;
-       unsigned int *sizep;
-
-       if (!ppc_md.hpte_clear_all)
-               return -ENOENT;
-
-       /*
-        * Since we use the kernel fault handlers and paging code to
-        * handle the virtual mode, we must make sure no destination
-        * overlaps kernel static data or bss.
-        */
-       for (i = 0; i < image->nr_segments; i++)
-               if (image->segment[i].mem < __pa(_end))
-                       return -ETXTBSY;
-
-       /*
-        * For non-LPAR, we absolutely can not overwrite the mmu hash
-        * table, since we are still using the bolted entries in it to
-        * do the copy.  Check that here.
-        *
-        * It is safe if the end is below the start of the blocked
-        * region (end <= low), or if the beginning is after the
-        * end of the blocked region (begin >= high).  Use the
-        * boolean identity !(a || b)  === (!a && !b).
-        */
-       if (htab_address) {
-               low = __pa(htab_address);
-               high = low + (htab_hash_mask + 1) * HASH_GROUP_SIZE;
-
-               for (i = 0; i < image->nr_segments; i++) {
-                       begin = image->segment[i].mem;
-                       end = begin + image->segment[i].memsz;
-
-                       if ((begin < high) && (end > low))
-                               return -ETXTBSY;
-               }
-       }
-
-       /* We also should not overwrite the tce tables */
-       for (node = of_find_node_by_type(NULL, "pci"); node != NULL;
-                       node = of_find_node_by_type(node, "pci")) {
-               basep = (unsigned long *)get_property(node, "linux,tce-base",
-                                                       NULL);
-               sizep = (unsigned int *)get_property(node, "linux,tce-size",
-                                                       NULL);
-               if (basep == NULL || sizep == NULL)
-                       continue;
-
-               low = *basep;
-               high = low + (*sizep);
-
-               for (i = 0; i < image->nr_segments; i++) {
-                       begin = image->segment[i].mem;
-                       end = begin + image->segment[i].memsz;
-
-                       if ((begin < high) && (end > low))
-                               return -ETXTBSY;
-               }
-       }
-
-       return 0;
-}
-
-void machine_kexec_cleanup(struct kimage *image)
-{
-       /* we do nothing in prepare that needs to be undone */
-}
-
-#define IND_FLAGS (IND_DESTINATION | IND_INDIRECTION | IND_DONE | IND_SOURCE)
-
-static void copy_segments(unsigned long ind)
-{
-       unsigned long entry;
-       unsigned long *ptr;
-       void *dest;
-       void *addr;
-
-       /*
-        * We rely on kexec_load to create a lists that properly
-        * initializes these pointers before they are used.
-        * We will still crash if the list is wrong, but at least
-        * the compiler will be quiet.
-        */
-       ptr = NULL;
-       dest = NULL;
-
-       for (entry = ind; !(entry & IND_DONE); entry = *ptr++) {
-               addr = __va(entry & PAGE_MASK);
-
-               switch (entry & IND_FLAGS) {
-               case IND_DESTINATION:
-                       dest = addr;
-                       break;
-               case IND_INDIRECTION:
-                       ptr = addr;
-                       break;
-               case IND_SOURCE:
-                       copy_page(dest, addr);
-                       dest += PAGE_SIZE;
-               }
-       }
-}
-
-void kexec_copy_flush(struct kimage *image)
-{
-       long i, nr_segments = image->nr_segments;
-       struct  kexec_segment ranges[KEXEC_SEGMENT_MAX];
-
-       /* save the ranges on the stack to efficiently flush the icache */
-       memcpy(ranges, image->segment, sizeof(ranges));
-
-       /*
-        * After this call we may not use anything allocated in dynamic
-        * memory, including *image.
-        *
-        * Only globals and the stack are allowed.
-        */
-       copy_segments(image->head);
-
-       /*
-        * we need to clear the icache for all dest pages sometime,
-        * including ones that were in place on the original copy
-        */
-       for (i = 0; i < nr_segments; i++)
-               flush_icache_range(ranges[i].mem + KERNELBASE,
-                               ranges[i].mem + KERNELBASE +
-                               ranges[i].memsz);
-}
-
-#ifdef CONFIG_SMP
-
-/* FIXME: we should schedule this function to be called on all cpus based
- * on calling the interrupts, but we would like to call it off irq level
- * so that the interrupt controller is clean.
- */
-void kexec_smp_down(void *arg)
-{
-       if (ppc_md.kexec_cpu_down)
-               ppc_md.kexec_cpu_down(0, 1);
-
-       local_irq_disable();
-       kexec_smp_wait();
-       /* NOTREACHED */
-}
-
-static void kexec_prepare_cpus(void)
-{
-       int my_cpu, i, notified=-1;
-
-       smp_call_function(kexec_smp_down, NULL, 0, /* wait */0);
-       my_cpu = get_cpu();
-
-       /* check the others cpus are now down (via paca hw cpu id == -1) */
-       for (i=0; i < NR_CPUS; i++) {
-               if (i == my_cpu)
-                       continue;
-
-               while (paca[i].hw_cpu_id != -1) {
-                       barrier();
-                       if (!cpu_possible(i)) {
-                               printk("kexec: cpu %d hw_cpu_id %d is not"
-                                               " possible, ignoring\n",
-                                               i, paca[i].hw_cpu_id);
-                               break;
-                       }
-                       if (!cpu_online(i)) {
-                               /* Fixme: this can be spinning in
-                                * pSeries_secondary_wait with a paca
-                                * waiting for it to go online.
-                                */
-                               printk("kexec: cpu %d hw_cpu_id %d is not"
-                                               " online, ignoring\n",
-                                               i, paca[i].hw_cpu_id);
-                               break;
-                       }
-                       if (i != notified) {
-                               printk( "kexec: waiting for cpu %d (physical"
-                                               " %d) to go down\n",
-                                               i, paca[i].hw_cpu_id);
-                               notified = i;
-                       }
-               }
-       }
-
-       /* after we tell the others to go down */
-       if (ppc_md.kexec_cpu_down)
-               ppc_md.kexec_cpu_down(0, 0);
-
-       put_cpu();
-
-       local_irq_disable();
-}
-
-#else /* ! SMP */
-
-static void kexec_prepare_cpus(void)
-{
-       /*
-        * move the secondarys to us so that we can copy
-        * the new kernel 0-0x100 safely
-        *
-        * do this if kexec in setup.c ?
-        *
-        * We need to release the cpus if we are ever going from an
-        * UP to an SMP kernel.
-        */
-       smp_release_cpus();
-       if (ppc_md.kexec_cpu_down)
-               ppc_md.kexec_cpu_down(0, 0);
-       local_irq_disable();
-}
-
-#endif /* SMP */
-
-/*
- * kexec thread structure and stack.
- *
- * We need to make sure that this is 16384-byte aligned due to the
- * way process stacks are handled.  It also must be statically allocated
- * or allocated as part of the kimage, because everything else may be
- * overwritten when we copy the kexec image.  We piggyback on the
- * "init_task" linker section here to statically allocate a stack.
- *
- * We could use a smaller stack if we don't care about anything using
- * current, but that audit has not been performed.
- */
-union thread_union kexec_stack
-       __attribute__((__section__(".data.init_task"))) = { };
-
-/* Our assembly helper, in kexec_stub.S */
-extern NORET_TYPE void kexec_sequence(void *newstack, unsigned long start,
-                                       void *image, void *control,
-                                       void (*clear_all)(void)) ATTRIB_NORET;
-
-/* too late to fail here */
-void machine_kexec(struct kimage *image)
-{
-
-       /* prepare control code if any */
-
-       /* shutdown other cpus into our wait loop and quiesce interrupts */
-       kexec_prepare_cpus();
-
-       /* switch to a staticly allocated stack.  Based on irq stack code.
-        * XXX: the task struct will likely be invalid once we do the copy!
-        */
-       kexec_stack.thread_info.task = current_thread_info()->task;
-       kexec_stack.thread_info.flags = 0;
-
-       /* Some things are best done in assembly.  Finding globals with
-        * a toc is easier in C, so pass in what we can.
-        */
-       kexec_sequence(&kexec_stack, image->start, image,
-                       page_address(image->control_code_page),
-                       ppc_md.hpte_clear_all);
-       /* NOTREACHED */
-}
-
-/* Values we need to export to the second kernel via the device tree. */
-static unsigned long htab_base, htab_size, kernel_end;
-
-static struct property htab_base_prop = {
-       .name = "linux,htab-base",
-       .length = sizeof(unsigned long),
-       .value = (unsigned char *)&htab_base,
-};
-
-static struct property htab_size_prop = {
-       .name = "linux,htab-size",
-       .length = sizeof(unsigned long),
-       .value = (unsigned char *)&htab_size,
-};
-
-static struct property kernel_end_prop = {
-       .name = "linux,kernel-end",
-       .length = sizeof(unsigned long),
-       .value = (unsigned char *)&kernel_end,
-};
-
-static void __init export_htab_values(void)
-{
-       struct device_node *node;
-
-       node = of_find_node_by_path("/chosen");
-       if (!node)
-               return;
-
-       kernel_end = __pa(_end);
-       prom_add_property(node, &kernel_end_prop);
-
-       /* On machines with no htab htab_address is NULL */
-       if (NULL == htab_address)
-               goto out;
-
-       htab_base = __pa(htab_address);
-       prom_add_property(node, &htab_base_prop);
-
-       htab_size = 1UL << ppc64_pft_size;
-       prom_add_property(node, &htab_size_prop);
-
- out:
-       of_node_put(node);
-}
-
-void __init kexec_setup(void)
-{
-       export_htab_values();
-}
diff --git a/arch/ppc64/kernel/misc.S b/arch/ppc64/kernel/misc.S
deleted file mode 100644 (file)
index 5e089de..0000000
+++ /dev/null
@@ -1,940 +0,0 @@
-/*
- *  arch/ppc/kernel/misc.S
- *
- *  
- *
- * This file contains miscellaneous low-level functions.
- *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
- *
- * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
- * and Paul Mackerras.
- * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
- * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com) 
- * 
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- */
-
-#include <linux/config.h>
-#include <linux/sys.h>
-#include <asm/unistd.h>
-#include <asm/errno.h>
-#include <asm/processor.h>
-#include <asm/page.h>
-#include <asm/cache.h>
-#include <asm/ppc_asm.h>
-#include <asm/asm-offsets.h>
-#include <asm/cputable.h>
-#include <asm/thread_info.h>
-
-       .text
-
-/*
- * Returns (address we were linked at) - (address we are running at)
- * for use before the text and data are mapped to KERNELBASE.
- */
-
-_GLOBAL(reloc_offset)
-       mflr    r0
-       bl      1f
-1:     mflr    r3
-       LOADADDR(r4,1b)
-       sub     r3,r4,r3
-       mtlr    r0
-       blr
-
-_GLOBAL(get_msr)
-       mfmsr   r3
-       blr
-
-_GLOBAL(get_dar)
-       mfdar   r3
-       blr
-
-_GLOBAL(get_srr0)
-       mfsrr0  r3
-       blr
-
-_GLOBAL(get_srr1)
-       mfsrr1  r3
-       blr
-       
-_GLOBAL(get_sp)
-       mr      r3,r1
-       blr
-
-#ifdef CONFIG_IRQSTACKS
-_GLOBAL(call_do_softirq)
-       mflr    r0
-       std     r0,16(r1)
-       stdu    r1,THREAD_SIZE-112(r3)
-       mr      r1,r3
-       bl      .__do_softirq
-       ld      r1,0(r1)
-       ld      r0,16(r1)
-       mtlr    r0
-       blr
-
-_GLOBAL(call___do_IRQ)
-       mflr    r0
-       std     r0,16(r1)
-       stdu    r1,THREAD_SIZE-112(r5)
-       mr      r1,r5
-       bl      .__do_IRQ
-       ld      r1,0(r1)
-       ld      r0,16(r1)
-       mtlr    r0
-       blr
-#endif /* CONFIG_IRQSTACKS */
-
-       /*
- * To be called by C code which needs to do some operations with MMU
- * disabled. Note that interrupts have to be disabled by the caller
- * prior to calling us. The code called _MUST_ be in the RMO of course
- * and part of the linear mapping as we don't attempt to translate the
- * stack pointer at all. The function is called with the stack switched
- * to this CPU emergency stack
- *
- * prototype is void *call_with_mmu_off(void *func, void *data);
- *
- * the called function is expected to be of the form
- *
- * void *called(void *data); 
- */
-_GLOBAL(call_with_mmu_off)
-       mflr    r0                      /* get link, save it on stackframe */
-       std     r0,16(r1)
-       mr      r1,r5                   /* save old stack ptr */
-       ld      r1,PACAEMERGSP(r13)     /* get emerg. stack */
-       subi    r1,r1,STACK_FRAME_OVERHEAD
-       std     r0,16(r1)               /* save link on emerg. stack */
-       std     r5,0(r1)                /* save old stack ptr in backchain */
-       ld      r3,0(r3)                /* get to real function ptr (assume same TOC) */
-       bl      2f                      /* we need LR to return, continue at label 2 */
-
-       ld      r0,16(r1)               /* we return here from the call, get LR and */
-       ld      r1,0(r1)                /* .. old stack ptr */
-       mtspr   SPRN_SRR0,r0            /* and get back to virtual mode with these */
-       mfmsr   r4
-       ori     r4,r4,MSR_IR|MSR_DR
-       mtspr   SPRN_SRR1,r4
-       rfid
-
-2:     mtspr   SPRN_SRR0,r3            /* coming from above, enter real mode */
-       mr      r3,r4                   /* get parameter */
-       mfmsr   r0
-       ori     r0,r0,MSR_IR|MSR_DR
-       xori    r0,r0,MSR_IR|MSR_DR
-       mtspr   SPRN_SRR1,r0
-       rfid
-
-
-       .section        ".toc","aw"
-PPC64_CACHES:
-       .tc             ppc64_caches[TC],ppc64_caches
-       .section        ".text"
-
-/*
- * Write any modified data cache blocks out to memory
- * and invalidate the corresponding instruction cache blocks.
- *
- * flush_icache_range(unsigned long start, unsigned long stop)
- *
- *   flush all bytes from start through stop-1 inclusive
- */
-
-_KPROBE(__flush_icache_range)
-
-/*
- * Flush the data cache to memory 
- * 
- * Different systems have different cache line sizes
- * and in some cases i-cache and d-cache line sizes differ from
- * each other.
- */
-       ld      r10,PPC64_CACHES@toc(r2)
-       lwz     r7,DCACHEL1LINESIZE(r10)/* Get cache line size */
-       addi    r5,r7,-1
-       andc    r6,r3,r5                /* round low to line bdy */
-       subf    r8,r6,r4                /* compute length */
-       add     r8,r8,r5                /* ensure we get enough */
-       lwz     r9,DCACHEL1LOGLINESIZE(r10)     /* Get log-2 of cache line size */
-       srw.    r8,r8,r9                /* compute line count */
-       beqlr                           /* nothing to do? */
-       mtctr   r8
-1:     dcbst   0,r6
-       add     r6,r6,r7
-       bdnz    1b
-       sync
-
-/* Now invalidate the instruction cache */
-       
-       lwz     r7,ICACHEL1LINESIZE(r10)        /* Get Icache line size */
-       addi    r5,r7,-1
-       andc    r6,r3,r5                /* round low to line bdy */
-       subf    r8,r6,r4                /* compute length */
-       add     r8,r8,r5
-       lwz     r9,ICACHEL1LOGLINESIZE(r10)     /* Get log-2 of Icache line size */
-       srw.    r8,r8,r9                /* compute line count */
-       beqlr                           /* nothing to do? */
-       mtctr   r8
-2:     icbi    0,r6
-       add     r6,r6,r7
-       bdnz    2b
-       isync
-       blr
-
-       .text
-/*
- * Like above, but only do the D-cache.
- *
- * flush_dcache_range(unsigned long start, unsigned long stop)
- *
- *    flush all bytes from start to stop-1 inclusive
- */
-_GLOBAL(flush_dcache_range)
-
-/*
- * Flush the data cache to memory 
- * 
- * Different systems have different cache line sizes
- */
-       ld      r10,PPC64_CACHES@toc(r2)
-       lwz     r7,DCACHEL1LINESIZE(r10)        /* Get dcache line size */
-       addi    r5,r7,-1
-       andc    r6,r3,r5                /* round low to line bdy */
-       subf    r8,r6,r4                /* compute length */
-       add     r8,r8,r5                /* ensure we get enough */
-       lwz     r9,DCACHEL1LOGLINESIZE(r10)     /* Get log-2 of dcache line size */
-       srw.    r8,r8,r9                /* compute line count */
-       beqlr                           /* nothing to do? */
-       mtctr   r8
-0:     dcbst   0,r6
-       add     r6,r6,r7
-       bdnz    0b
-       sync
-       blr
-
-/*
- * Like above, but works on non-mapped physical addresses.
- * Use only for non-LPAR setups ! It also assumes real mode
- * is cacheable. Used for flushing out the DART before using
- * it as uncacheable memory 
- *
- * flush_dcache_phys_range(unsigned long start, unsigned long stop)
- *
- *    flush all bytes from start to stop-1 inclusive
- */
-_GLOBAL(flush_dcache_phys_range)
-       ld      r10,PPC64_CACHES@toc(r2)
-       lwz     r7,DCACHEL1LINESIZE(r10)        /* Get dcache line size */
-       addi    r5,r7,-1
-       andc    r6,r3,r5                /* round low to line bdy */
-       subf    r8,r6,r4                /* compute length */
-       add     r8,r8,r5                /* ensure we get enough */
-       lwz     r9,DCACHEL1LOGLINESIZE(r10)     /* Get log-2 of dcache line size */
-       srw.    r8,r8,r9                /* compute line count */
-       beqlr                           /* nothing to do? */
-       mfmsr   r5                      /* Disable MMU Data Relocation */
-       ori     r0,r5,MSR_DR
-       xori    r0,r0,MSR_DR
-       sync
-       mtmsr   r0
-       sync
-       isync
-       mtctr   r8
-0:     dcbst   0,r6
-       add     r6,r6,r7
-       bdnz    0b
-       sync
-       isync
-       mtmsr   r5                      /* Re-enable MMU Data Relocation */
-       sync
-       isync
-       blr
-
-_GLOBAL(flush_inval_dcache_range)
-       ld      r10,PPC64_CACHES@toc(r2)
-       lwz     r7,DCACHEL1LINESIZE(r10)        /* Get dcache line size */
-       addi    r5,r7,-1
-       andc    r6,r3,r5                /* round low to line bdy */
-       subf    r8,r6,r4                /* compute length */
-       add     r8,r8,r5                /* ensure we get enough */
-       lwz     r9,DCACHEL1LOGLINESIZE(r10)/* Get log-2 of dcache line size */
-       srw.    r8,r8,r9                /* compute line count */
-       beqlr                           /* nothing to do? */
-       sync
-       isync
-       mtctr   r8
-0:     dcbf    0,r6
-       add     r6,r6,r7
-       bdnz    0b
-       sync
-       isync
-       blr
-
-
-/*
- * Flush a particular page from the data cache to RAM.
- * Note: this is necessary because the instruction cache does *not*
- * snoop from the data cache.
- *
- *     void __flush_dcache_icache(void *page)
- */
-_GLOBAL(__flush_dcache_icache)
-/*
- * Flush the data cache to memory 
- * 
- * Different systems have different cache line sizes
- */
-
-/* Flush the dcache */
-       ld      r7,PPC64_CACHES@toc(r2)
-       clrrdi  r3,r3,PAGE_SHIFT                    /* Page align */
-       lwz     r4,DCACHEL1LINESPERPAGE(r7)     /* Get # dcache lines per page */
-       lwz     r5,DCACHEL1LINESIZE(r7)         /* Get dcache line size */
-       mr      r6,r3
-       mtctr   r4
-0:     dcbst   0,r6
-       add     r6,r6,r5
-       bdnz    0b
-       sync
-
-/* Now invalidate the icache */        
-
-       lwz     r4,ICACHEL1LINESPERPAGE(r7)     /* Get # icache lines per page */
-       lwz     r5,ICACHEL1LINESIZE(r7)         /* Get icache line size */
-       mtctr   r4
-1:     icbi    0,r3
-       add     r3,r3,r5
-       bdnz    1b
-       isync
-       blr
-       
-/*
- * I/O string operations
- *
- * insb(port, buf, len)
- * outsb(port, buf, len)
- * insw(port, buf, len)
- * outsw(port, buf, len)
- * insl(port, buf, len)
- * outsl(port, buf, len)
- * insw_ns(port, buf, len)
- * outsw_ns(port, buf, len)
- * insl_ns(port, buf, len)
- * outsl_ns(port, buf, len)
- *
- * The *_ns versions don't do byte-swapping.
- */
-_GLOBAL(_insb)
-       cmpwi   0,r5,0
-       mtctr   r5
-       subi    r4,r4,1
-       blelr-
-00:    lbz     r5,0(r3)
-       eieio
-       stbu    r5,1(r4)
-       bdnz    00b
-       twi     0,r5,0
-       isync
-       blr
-
-_GLOBAL(_outsb)
-       cmpwi   0,r5,0
-       mtctr   r5
-       subi    r4,r4,1
-       blelr-
-00:    lbzu    r5,1(r4)
-       stb     r5,0(r3)
-       bdnz    00b
-       sync
-       blr     
-
-_GLOBAL(_insw)
-       cmpwi   0,r5,0
-       mtctr   r5
-       subi    r4,r4,2
-       blelr-
-00:    lhbrx   r5,0,r3
-       eieio
-       sthu    r5,2(r4)
-       bdnz    00b
-       twi     0,r5,0
-       isync
-       blr
-
-_GLOBAL(_outsw)
-       cmpwi   0,r5,0
-       mtctr   r5
-       subi    r4,r4,2
-       blelr-
-00:    lhzu    r5,2(r4)
-       sthbrx  r5,0,r3 
-       bdnz    00b
-       sync
-       blr     
-
-_GLOBAL(_insl)
-       cmpwi   0,r5,0
-       mtctr   r5
-       subi    r4,r4,4
-       blelr-
-00:    lwbrx   r5,0,r3
-       eieio
-       stwu    r5,4(r4)
-       bdnz    00b
-       twi     0,r5,0
-       isync
-       blr
-
-_GLOBAL(_outsl)
-       cmpwi   0,r5,0
-       mtctr   r5
-       subi    r4,r4,4
-       blelr-
-00:    lwzu    r5,4(r4)
-       stwbrx  r5,0,r3
-       bdnz    00b
-       sync
-       blr     
-
-/* _GLOBAL(ide_insw) now in drivers/ide/ide-iops.c */
-_GLOBAL(_insw_ns)
-       cmpwi   0,r5,0
-       mtctr   r5
-       subi    r4,r4,2
-       blelr-
-00:    lhz     r5,0(r3)
-       eieio
-       sthu    r5,2(r4)
-       bdnz    00b
-       twi     0,r5,0
-       isync
-       blr
-
-/* _GLOBAL(ide_outsw) now in drivers/ide/ide-iops.c */
-_GLOBAL(_outsw_ns)
-       cmpwi   0,r5,0
-       mtctr   r5
-       subi    r4,r4,2
-       blelr-
-00:    lhzu    r5,2(r4)
-       sth     r5,0(r3)
-       bdnz    00b
-       sync
-       blr     
-
-_GLOBAL(_insl_ns)
-       cmpwi   0,r5,0
-       mtctr   r5
-       subi    r4,r4,4
-       blelr-
-00:    lwz     r5,0(r3)
-       eieio
-       stwu    r5,4(r4)
-       bdnz    00b
-       twi     0,r5,0
-       isync
-       blr
-
-_GLOBAL(_outsl_ns)
-       cmpwi   0,r5,0
-       mtctr   r5
-       subi    r4,r4,4
-       blelr-
-00:    lwzu    r5,4(r4)
-       stw     r5,0(r3)
-       bdnz    00b
-       sync
-       blr     
-
-/*
- * identify_cpu and calls setup_cpu
- * In: r3 = base of the cpu_specs array
- *     r4 = address of cur_cpu_spec
- *     r5 = relocation offset
- */
-_GLOBAL(identify_cpu)
-       mfpvr   r7
-1:
-       lwz     r8,CPU_SPEC_PVR_MASK(r3)
-       and     r8,r8,r7
-       lwz     r9,CPU_SPEC_PVR_VALUE(r3)
-       cmplw   0,r9,r8
-       beq     1f
-       addi    r3,r3,CPU_SPEC_ENTRY_SIZE
-       b       1b
-1:
-       add     r0,r3,r5
-       std     r0,0(r4)
-       ld      r4,CPU_SPEC_SETUP(r3)
-       sub     r4,r4,r5
-       ld      r4,0(r4)
-       sub     r4,r4,r5
-       mtctr   r4
-       /* Calling convention for cpu setup is r3=offset, r4=cur_cpu_spec */
-       mr      r4,r3
-       mr      r3,r5
-       bctr
-
-/*
- * do_cpu_ftr_fixups - goes through the list of CPU feature fixups
- * and writes nop's over sections of code that don't apply for this cpu.
- * r3 = data offset (not changed)
- */
-_GLOBAL(do_cpu_ftr_fixups)
-       /* Get CPU 0 features */
-       LOADADDR(r6,cur_cpu_spec)
-       sub     r6,r6,r3
-       ld      r4,0(r6)
-       sub     r4,r4,r3
-       ld      r4,CPU_SPEC_FEATURES(r4)
-       /* Get the fixup table */
-       LOADADDR(r6,__start___ftr_fixup)
-       sub     r6,r6,r3
-       LOADADDR(r7,__stop___ftr_fixup)
-       sub     r7,r7,r3
-       /* Do the fixup */
-1:     cmpld   r6,r7
-       bgelr
-       addi    r6,r6,32
-       ld      r8,-32(r6)      /* mask */
-       and     r8,r8,r4
-       ld      r9,-24(r6)      /* value */
-       cmpld   r8,r9
-       beq     1b
-       ld      r8,-16(r6)      /* section begin */
-       ld      r9,-8(r6)       /* section end */
-       subf.   r9,r8,r9
-       beq     1b
-       /* write nops over the section of code */
-       /* todo: if large section, add a branch at the start of it */
-       srwi    r9,r9,2
-       mtctr   r9
-       sub     r8,r8,r3
-       lis     r0,0x60000000@h /* nop */
-3:     stw     r0,0(r8)
-       andi.   r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
-       beq     2f
-       dcbst   0,r8            /* suboptimal, but simpler */
-       sync
-       icbi    0,r8
-2:     addi    r8,r8,4
-       bdnz    3b
-       sync                    /* additional sync needed on g4 */
-       isync
-       b       1b
-
-#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
-/*
- * Do an IO access in real mode
- */
-_GLOBAL(real_readb)
-       mfmsr   r7
-       ori     r0,r7,MSR_DR
-       xori    r0,r0,MSR_DR
-       sync
-       mtmsrd  r0
-       sync
-       isync
-       mfspr   r6,SPRN_HID4
-       rldicl  r5,r6,32,0
-       ori     r5,r5,0x100
-       rldicl  r5,r5,32,0
-       sync
-       mtspr   SPRN_HID4,r5
-       isync
-       slbia
-       isync
-       lbz     r3,0(r3)
-       sync
-       mtspr   SPRN_HID4,r6
-       isync
-       slbia
-       isync
-       mtmsrd  r7
-       sync
-       isync
-       blr
-
-/*
- * Do an IO access in real mode
- */
-_GLOBAL(real_writeb)
-       mfmsr   r7
-       ori     r0,r7,MSR_DR
-       xori    r0,r0,MSR_DR
-       sync
-       mtmsrd  r0
-       sync
-       isync
-       mfspr   r6,SPRN_HID4
-       rldicl  r5,r6,32,0
-       ori     r5,r5,0x100
-       rldicl  r5,r5,32,0
-       sync
-       mtspr   SPRN_HID4,r5
-       isync
-       slbia
-       isync
-       stb     r3,0(r4)
-       sync
-       mtspr   SPRN_HID4,r6
-       isync
-       slbia
-       isync
-       mtmsrd  r7
-       sync
-       isync
-       blr
-#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
-
-/*
- * SCOM access functions for 970 (FX only for now)
- *
- * unsigned long scom970_read(unsigned int address);
- * void scom970_write(unsigned int address, unsigned long value);
- *
- * The address passed in is the 24 bits register address. This code
- * is 970 specific and will not check the status bits, so you should
- * know what you are doing.
- */
-_GLOBAL(scom970_read)
-       /* interrupts off */
-       mfmsr   r4
-       ori     r0,r4,MSR_EE
-       xori    r0,r0,MSR_EE
-       mtmsrd  r0,1
-
-       /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
-        * (including parity). On current CPUs they must be 0'd,
-        * and finally or in RW bit
-        */
-       rlwinm  r3,r3,8,0,15
-       ori     r3,r3,0x8000
-
-       /* do the actual scom read */
-       sync
-       mtspr   SPRN_SCOMC,r3
-       isync
-       mfspr   r3,SPRN_SCOMD
-       isync
-       mfspr   r0,SPRN_SCOMC
-       isync
-
-       /* XXX: fixup result on some buggy 970's (ouch ! we lost a bit, bah
-        * that's the best we can do). Not implemented yet as we don't use
-        * the scom on any of the bogus CPUs yet, but may have to be done
-        * ultimately
-        */
-
-       /* restore interrupts */
-       mtmsrd  r4,1
-       blr
-
-
-_GLOBAL(scom970_write)
-       /* interrupts off */
-       mfmsr   r5
-       ori     r0,r5,MSR_EE
-       xori    r0,r0,MSR_EE
-       mtmsrd  r0,1
-
-       /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
-        * (including parity). On current CPUs they must be 0'd.
-        */
-
-       rlwinm  r3,r3,8,0,15
-
-       sync
-       mtspr   SPRN_SCOMD,r4      /* write data */
-       isync
-       mtspr   SPRN_SCOMC,r3      /* write command */
-       isync
-       mfspr   3,SPRN_SCOMC
-       isync
-
-       /* restore interrupts */
-       mtmsrd  r5,1
-       blr
-
-
-/*
- * Create a kernel thread
- *   kernel_thread(fn, arg, flags)
- */
-_GLOBAL(kernel_thread)
-       std     r29,-24(r1)
-       std     r30,-16(r1)
-       stdu    r1,-STACK_FRAME_OVERHEAD(r1)
-       mr      r29,r3
-       mr      r30,r4
-       ori     r3,r5,CLONE_VM  /* flags */
-       oris    r3,r3,(CLONE_UNTRACED>>16)
-       li      r4,0            /* new sp (unused) */
-       li      r0,__NR_clone
-       sc
-       cmpdi   0,r3,0          /* parent or child? */
-       bne     1f              /* return if parent */
-       li      r0,0
-       stdu    r0,-STACK_FRAME_OVERHEAD(r1)
-       ld      r2,8(r29)
-       ld      r29,0(r29)
-       mtlr    r29              /* fn addr in lr */
-       mr      r3,r30          /* load arg and call fn */
-       blrl
-       li      r0,__NR_exit    /* exit after child exits */
-        li     r3,0
-       sc
-1:     addi    r1,r1,STACK_FRAME_OVERHEAD      
-       ld      r29,-24(r1)
-       ld      r30,-16(r1)
-       blr
-
-/*
- * disable_kernel_fp()
- * Disable the FPU.
- */
-_GLOBAL(disable_kernel_fp)
-       mfmsr   r3
-       rldicl  r0,r3,(63-MSR_FP_LG),1
-       rldicl  r3,r0,(MSR_FP_LG+1),0
-       mtmsrd  r3                      /* disable use of fpu now */
-       isync
-       blr
-
-#ifdef CONFIG_ALTIVEC
-
-#if 0 /* this has no callers for now */
-/*
- * disable_kernel_altivec()
- * Disable the VMX.
- */
-_GLOBAL(disable_kernel_altivec)
-       mfmsr   r3
-       rldicl  r0,r3,(63-MSR_VEC_LG),1
-       rldicl  r3,r0,(MSR_VEC_LG+1),0
-       mtmsrd  r3                      /* disable use of VMX now */
-       isync
-       blr
-#endif /* 0 */
-
-/*
- * giveup_altivec(tsk)
- * Disable VMX for the task given as the argument,
- * and save the vector registers in its thread_struct.
- * Enables the VMX for use in the kernel on return.
- */
-_GLOBAL(giveup_altivec)
-       mfmsr   r5
-       oris    r5,r5,MSR_VEC@h
-       mtmsrd  r5                      /* enable use of VMX now */
-       isync
-       cmpdi   0,r3,0
-       beqlr-                          /* if no previous owner, done */
-       addi    r3,r3,THREAD            /* want THREAD of task */
-       ld      r5,PT_REGS(r3)
-       cmpdi   0,r5,0
-       SAVE_32VRS(0,r4,r3)
-       mfvscr  vr0
-       li      r4,THREAD_VSCR
-       stvx    vr0,r4,r3
-       beq     1f
-       ld      r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-       lis     r3,MSR_VEC@h
-       andc    r4,r4,r3                /* disable FP for previous task */
-       std     r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#ifndef CONFIG_SMP
-       li      r5,0
-       ld      r4,last_task_used_altivec@got(r2)
-       std     r5,0(r4)
-#endif /* CONFIG_SMP */
-       blr
-
-#endif /* CONFIG_ALTIVEC */
-
-_GLOBAL(__setup_cpu_power3)
-       blr
-
-_GLOBAL(execve)
-       li      r0,__NR_execve
-       sc
-       bnslr
-       neg     r3,r3
-       blr
-
-/* kexec_wait(phys_cpu)
- *
- * wait for the flag to change, indicating this kernel is going away but
- * the slave code for the next one is at addresses 0 to 100.
- *
- * This is used by all slaves.
- *
- * Physical (hardware) cpu id should be in r3.
- */
-_GLOBAL(kexec_wait)
-       bl      1f
-1:     mflr    r5
-       addi    r5,r5,kexec_flag-1b
-
-99:    HMT_LOW
-#ifdef CONFIG_KEXEC            /* use no memory without kexec */
-       lwz     r4,0(r5)
-       cmpwi   0,r4,0
-       bnea    0x60
-#endif
-       b       99b
-
-/* this can be in text because we won't change it until we are
- * running in real anyways
- */
-kexec_flag:
-       .long   0
-
-
-#ifdef CONFIG_KEXEC
-
-/* kexec_smp_wait(void)
- *
- * call with interrupts off
- * note: this is a terminal routine, it does not save lr
- *
- * get phys id from paca
- * set paca id to -1 to say we got here
- * switch to real mode
- * join other cpus in kexec_wait(phys_id)
- */
-_GLOBAL(kexec_smp_wait)
-       lhz     r3,PACAHWCPUID(r13)
-       li      r4,-1
-       sth     r4,PACAHWCPUID(r13)     /* let others know we left */
-       bl      real_mode
-       b       .kexec_wait
-
-/*
- * switch to real mode (turn mmu off)
- * we use the early kernel trick that the hardware ignores bits
- * 0 and 1 (big endian) of the effective address in real mode
- *
- * don't overwrite r3 here, it is live for kexec_wait above.
- */
-real_mode:     /* assume normal blr return */
-1:     li      r9,MSR_RI
-       li      r10,MSR_DR|MSR_IR
-       mflr    r11             /* return address to SRR0 */
-       mfmsr   r12
-       andc    r9,r12,r9
-       andc    r10,r12,r10
-
-       mtmsrd  r9,1
-       mtspr   SPRN_SRR1,r10
-       mtspr   SPRN_SRR0,r11
-       rfid
-
-
-/*
- * kexec_sequence(newstack, start, image, control, clear_all())
- *
- * does the grungy work with stack switching and real mode switches
- * also does simple calls to other code
- */
-
-_GLOBAL(kexec_sequence)
-       mflr    r0
-       std     r0,16(r1)
-
-       /* switch stacks to newstack -- &kexec_stack.stack */
-       stdu    r1,THREAD_SIZE-112(r3)
-       mr      r1,r3
-
-       li      r0,0
-       std     r0,16(r1)
-
-       /* save regs for local vars on new stack.
-        * yes, we won't go back, but ...
-        */
-       std     r31,-8(r1)
-       std     r30,-16(r1)
-       std     r29,-24(r1)
-       std     r28,-32(r1)
-       std     r27,-40(r1)
-       std     r26,-48(r1)
-       std     r25,-56(r1)
-
-       stdu    r1,-112-64(r1)
-
-       /* save args into preserved regs */
-       mr      r31,r3                  /* newstack (both) */
-       mr      r30,r4                  /* start (real) */
-       mr      r29,r5                  /* image (virt) */
-       mr      r28,r6                  /* control, unused */
-       mr      r27,r7                  /* clear_all() fn desc */
-       mr      r26,r8                  /* spare */
-       lhz     r25,PACAHWCPUID(r13)    /* get our phys cpu from paca */
-
-       /* disable interrupts, we are overwriting kernel data next */
-       mfmsr   r3
-       rlwinm  r3,r3,0,17,15
-       mtmsrd  r3,1
-
-       /* copy dest pages, flush whole dest image */
-       mr      r3,r29
-       bl      .kexec_copy_flush       /* (image) */
-
-       /* turn off mmu */
-       bl      real_mode
-
-       /* clear out hardware hash page table and tlb */
-       ld      r5,0(r27)               /* deref function descriptor */
-       mtctr   r5
-       bctrl                           /* ppc_md.hash_clear_all(void); */
-
-/*
- *   kexec image calling is:
- *      the first 0x100 bytes of the entry point are copied to 0
- *
- *      all slaves branch to slave = 0x60 (absolute)
- *              slave(phys_cpu_id);
- *
- *      master goes to start = entry point
- *              start(phys_cpu_id, start, 0);
- *
- *
- *   a wrapper is needed to call existing kernels, here is an approximate
- *   description of one method:
- *
- * v2: (2.6.10)
- *   start will be near the boot_block (maybe 0x100 bytes before it?)
- *   it will have a 0x60, which will b to boot_block, where it will wait
- *   and 0 will store phys into struct boot-block and load r3 from there,
- *   copy kernel 0-0x100 and tell slaves to back down to 0x60 again
- *
- * v1: (2.6.9)
- *    boot block will have all cpus scanning device tree to see if they
- *    are the boot cpu ?????
- *    other device tree differences (prop sizes, va vs pa, etc)...
- */
-
-       /* copy  0x100 bytes starting at start to 0 */
-       li      r3,0
-       mr      r4,r30
-       li      r5,0x100
-       li      r6,0
-       bl      .copy_and_flush /* (dest, src, copy limit, start offset) */
-1:     /* assume normal blr return */
-
-       /* release other cpus to the new kernel secondary start at 0x60 */
-       mflr    r5
-       li      r6,1
-       stw     r6,kexec_flag-1b(5)
-       mr      r3,r25  # my phys cpu
-       mr      r4,r30  # start, aka phys mem offset
-       mtlr    4
-       li      r5,0
-       blr     /* image->start(physid, image->start, 0); */
-#endif /* CONFIG_KEXEC */
diff --git a/arch/ppc64/kernel/module.c b/arch/ppc64/kernel/module.c
deleted file mode 100644 (file)
index 928b858..0000000
+++ /dev/null
@@ -1,455 +0,0 @@
-/*  Kernel module help for PPC64.
-    Copyright (C) 2001, 2003 Rusty Russell IBM Corporation.
-
-    This program is free software; you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation; either version 2 of the License, or
-    (at your option) any later version.
-
-    This program is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, write to the Free Software
-    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-*/
-#include <linux/module.h>
-#include <linux/elf.h>
-#include <linux/moduleloader.h>
-#include <linux/err.h>
-#include <linux/vmalloc.h>
-#include <asm/module.h>
-#include <asm/uaccess.h>
-
-/* FIXME: We don't do .init separately.  To do this, we'd need to have
-   a separate r2 value in the init and core section, and stub between
-   them, too.
-
-   Using a magic allocator which places modules within 32MB solves
-   this, and makes other things simpler.  Anton?
-   --RR.  */
-#if 0
-#define DEBUGP printk
-#else
-#define DEBUGP(fmt , ...)
-#endif
-
-/* There's actually a third entry here, but it's unused */
-struct ppc64_opd_entry
-{
-       unsigned long funcaddr;
-       unsigned long r2;
-};
-
-/* Like PPC32, we need little trampolines to do > 24-bit jumps (into
-   the kernel itself).  But on PPC64, these need to be used for every
-   jump, actually, to reset r2 (TOC+0x8000). */
-struct ppc64_stub_entry
-{
-       /* 28 byte jump instruction sequence (7 instructions) */
-       unsigned char jump[28];
-       unsigned char unused[4];
-       /* Data for the above code */
-       struct ppc64_opd_entry opd;
-};
-
-/* We use a stub to fix up r2 (TOC ptr) and to jump to the (external)
-   function which may be more than 24-bits away.  We could simply
-   patch the new r2 value and function pointer into the stub, but it's
-   significantly shorter to put these values at the end of the stub
-   code, and patch the stub address (32-bits relative to the TOC ptr,
-   r2) into the stub. */
-static struct ppc64_stub_entry ppc64_stub =
-{ .jump = {
-       0x3d, 0x82, 0x00, 0x00, /* addis   r12,r2, <high> */
-       0x39, 0x8c, 0x00, 0x00, /* addi    r12,r12, <low> */
-       /* Save current r2 value in magic place on the stack. */
-       0xf8, 0x41, 0x00, 0x28, /* std     r2,40(r1) */
-       0xe9, 0x6c, 0x00, 0x20, /* ld      r11,32(r12) */
-       0xe8, 0x4c, 0x00, 0x28, /* ld      r2,40(r12) */
-       0x7d, 0x69, 0x03, 0xa6, /* mtctr   r11 */
-       0x4e, 0x80, 0x04, 0x20  /* bctr */
-} };
-
-/* Count how many different 24-bit relocations (different symbol,
-   different addend) */
-static unsigned int count_relocs(const Elf64_Rela *rela, unsigned int num)
-{
-       unsigned int i, j, ret = 0;
-
-       /* FIXME: Only count external ones --RR */
-       /* Sure, this is order(n^2), but it's usually short, and not
-           time critical */
-       for (i = 0; i < num; i++) {
-               /* Only count 24-bit relocs, others don't need stubs */
-               if (ELF64_R_TYPE(rela[i].r_info) != R_PPC_REL24)
-                       continue;
-               for (j = 0; j < i; j++) {
-                       /* If this addend appeared before, it's
-                           already been counted */
-                       if (rela[i].r_info == rela[j].r_info
-                           && rela[i].r_addend == rela[j].r_addend)
-                               break;
-               }
-               if (j == i) ret++;
-       }
-       return ret;
-}
-
-void *module_alloc(unsigned long size)
-{
-       if (size == 0)
-               return NULL;
-
-       return vmalloc_exec(size);
-}
-
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
-{
-       vfree(module_region);
-       /* FIXME: If module_region == mod->init_region, trim exception
-           table entries. */
-}
-
-/* Get size of potential trampolines required. */
-static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
-                                   const Elf64_Shdr *sechdrs)
-{
-       /* One extra reloc so it's always 0-funcaddr terminated */
-       unsigned long relocs = 1;
-       unsigned i;
-
-       /* Every relocated section... */
-       for (i = 1; i < hdr->e_shnum; i++) {
-               if (sechdrs[i].sh_type == SHT_RELA) {
-                       DEBUGP("Found relocations in section %u\n", i);
-                       DEBUGP("Ptr: %p.  Number: %lu\n",
-                              (void *)sechdrs[i].sh_addr,
-                              sechdrs[i].sh_size / sizeof(Elf64_Rela));
-                       relocs += count_relocs((void *)sechdrs[i].sh_addr,
-                                              sechdrs[i].sh_size
-                                              / sizeof(Elf64_Rela));
-               }
-       }
-
-       DEBUGP("Looks like a total of %lu stubs, max\n", relocs);
-       return relocs * sizeof(struct ppc64_stub_entry);
-}
-
-static void dedotify_versions(struct modversion_info *vers,
-                             unsigned long size)
-{
-       struct modversion_info *end;
-
-       for (end = (void *)vers + size; vers < end; vers++)
-               if (vers->name[0] == '.')
-                       memmove(vers->name, vers->name+1, strlen(vers->name));
-}
-
-/* Undefined symbols which refer to .funcname, hack to funcname */
-static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab)
-{
-       unsigned int i;
-
-       for (i = 1; i < numsyms; i++) {
-               if (syms[i].st_shndx == SHN_UNDEF) {
-                       char *name = strtab + syms[i].st_name;
-                       if (name[0] == '.')
-                               memmove(name, name+1, strlen(name));
-               }
-       }
-}
-
-int module_frob_arch_sections(Elf64_Ehdr *hdr,
-                             Elf64_Shdr *sechdrs,
-                             char *secstrings,
-                             struct module *me)
-{
-       unsigned int i;
-
-       /* Find .toc and .stubs sections, symtab and strtab */
-       for (i = 1; i < hdr->e_shnum; i++) {
-               char *p;
-               if (strcmp(secstrings + sechdrs[i].sh_name, ".stubs") == 0)
-                       me->arch.stubs_section = i;
-               else if (strcmp(secstrings + sechdrs[i].sh_name, ".toc") == 0)
-                       me->arch.toc_section = i;
-               else if (strcmp(secstrings+sechdrs[i].sh_name,"__versions")==0)
-                       dedotify_versions((void *)hdr + sechdrs[i].sh_offset,
-                                         sechdrs[i].sh_size);
-
-               /* We don't handle .init for the moment: rename to _init */
-               while ((p = strstr(secstrings + sechdrs[i].sh_name, ".init")))
-                       p[0] = '_';
-
-               if (sechdrs[i].sh_type == SHT_SYMTAB)
-                       dedotify((void *)hdr + sechdrs[i].sh_offset,
-                                sechdrs[i].sh_size / sizeof(Elf64_Sym),
-                                (void *)hdr
-                                + sechdrs[sechdrs[i].sh_link].sh_offset);
-       }
-       if (!me->arch.stubs_section || !me->arch.toc_section) {
-               printk("%s: doesn't contain .toc or .stubs.\n", me->name);
-               return -ENOEXEC;
-       }
-
-       /* Override the stubs size */
-       sechdrs[me->arch.stubs_section].sh_size = get_stubs_size(hdr, sechdrs);
-       return 0;
-}
-
-int apply_relocate(Elf64_Shdr *sechdrs,
-                  const char *strtab,
-                  unsigned int symindex,
-                  unsigned int relsec,
-                  struct module *me)
-{
-       printk(KERN_ERR "%s: Non-ADD RELOCATION unsupported\n", me->name);
-       return -ENOEXEC;
-}
-
-/* r2 is the TOC pointer: it actually points 0x8000 into the TOC (this
-   gives the value maximum span in an instruction which uses a signed
-   offset) */
-static inline unsigned long my_r2(Elf64_Shdr *sechdrs, struct module *me)
-{
-       return sechdrs[me->arch.toc_section].sh_addr + 0x8000;
-}
-
-/* Both low and high 16 bits are added as SIGNED additions, so if low
-   16 bits has high bit set, high 16 bits must be adjusted.  These
-   macros do that (stolen from binutils). */
-#define PPC_LO(v) ((v) & 0xffff)
-#define PPC_HI(v) (((v) >> 16) & 0xffff)
-#define PPC_HA(v) PPC_HI ((v) + 0x8000)
-
-/* Patch stub to reference function and correct r2 value. */
-static inline int create_stub(Elf64_Shdr *sechdrs,
-                             struct ppc64_stub_entry *entry,
-                             struct ppc64_opd_entry *opd,
-                             struct module *me)
-{
-       Elf64_Half *loc1, *loc2;
-       long reladdr;
-
-       *entry = ppc64_stub;
-
-       loc1 = (Elf64_Half *)&entry->jump[2];
-       loc2 = (Elf64_Half *)&entry->jump[6];
-
-       /* Stub uses address relative to r2. */
-       reladdr = (unsigned long)entry - my_r2(sechdrs, me);
-       if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
-               printk("%s: Address %p of stub out of range of %p.\n",
-                      me->name, (void *)reladdr, (void *)my_r2);
-               return 0;
-       }
-       DEBUGP("Stub %p get data from reladdr %li\n", entry, reladdr);
-
-       *loc1 = PPC_HA(reladdr);
-       *loc2 = PPC_LO(reladdr);
-       entry->opd.funcaddr = opd->funcaddr;
-       entry->opd.r2 = opd->r2;
-       return 1;
-}
-
-/* Create stub to jump to function described in this OPD: we need the
-   stub to set up the TOC ptr (r2) for the function. */
-static unsigned long stub_for_addr(Elf64_Shdr *sechdrs,
-                                  unsigned long opdaddr,
-                                  struct module *me)
-{
-       struct ppc64_stub_entry *stubs;
-       struct ppc64_opd_entry *opd = (void *)opdaddr;
-       unsigned int i, num_stubs;
-
-       num_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*stubs);
-
-       /* Find this stub, or if that fails, the next avail. entry */
-       stubs = (void *)sechdrs[me->arch.stubs_section].sh_addr;
-       for (i = 0; stubs[i].opd.funcaddr; i++) {
-               BUG_ON(i >= num_stubs);
-
-               if (stubs[i].opd.funcaddr == opd->funcaddr)
-                       return (unsigned long)&stubs[i];
-       }
-
-       if (!create_stub(sechdrs, &stubs[i], opd, me))
-               return 0;
-
-       return (unsigned long)&stubs[i];
-}
-
-/* We expect a noop next: if it is, replace it with instruction to
-   restore r2. */
-static int restore_r2(u32 *instruction, struct module *me)
-{
-       if (*instruction != 0x60000000) {
-               printk("%s: Expect noop after relocate, got %08x\n",
-                      me->name, *instruction);
-               return 0;
-       }
-       *instruction = 0xe8410028;      /* ld r2,40(r1) */
-       return 1;
-}
-
-int apply_relocate_add(Elf64_Shdr *sechdrs,
-                      const char *strtab,
-                      unsigned int symindex,
-                      unsigned int relsec,
-                      struct module *me)
-{
-       unsigned int i;
-       Elf64_Rela *rela = (void *)sechdrs[relsec].sh_addr;
-       Elf64_Sym *sym;
-       unsigned long *location;
-       unsigned long value;
-
-       DEBUGP("Applying ADD relocate section %u to %u\n", relsec,
-              sechdrs[relsec].sh_info);
-       for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
-               /* This is where to make the change */
-               location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
-                       + rela[i].r_offset;
-               /* This is the symbol it is referring to */
-               sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
-                       + ELF64_R_SYM(rela[i].r_info);
-
-               DEBUGP("RELOC at %p: %li-type as %s (%lu) + %li\n",
-                      location, (long)ELF64_R_TYPE(rela[i].r_info),
-                      strtab + sym->st_name, (unsigned long)sym->st_value,
-                      (long)rela[i].r_addend);
-
-               /* `Everything is relative'. */
-               value = sym->st_value + rela[i].r_addend;
-
-               switch (ELF64_R_TYPE(rela[i].r_info)) {
-               case R_PPC64_ADDR32:
-                       /* Simply set it */
-                       *(u32 *)location = value;
-                       break;
-                       
-               case R_PPC64_ADDR64:
-                       /* Simply set it */
-                       *(unsigned long *)location = value;
-                       break;
-
-               case R_PPC64_TOC:
-                       *(unsigned long *)location = my_r2(sechdrs, me);
-                       break;
-
-               case R_PPC64_TOC16:
-                       /* Subtact TOC pointer */
-                       value -= my_r2(sechdrs, me);
-                       if (value + 0x8000 > 0xffff) {
-                               printk("%s: bad TOC16 relocation (%lu)\n",
-                                      me->name, value);
-                               return -ENOEXEC;
-                       }
-                       *((uint16_t *) location)
-                               = (*((uint16_t *) location) & ~0xffff)
-                               | (value & 0xffff);
-                       break;
-
-               case R_PPC64_TOC16_DS:
-                       /* Subtact TOC pointer */
-                       value -= my_r2(sechdrs, me);
-                       if ((value & 3) != 0 || value + 0x8000 > 0xffff) {
-                               printk("%s: bad TOC16_DS relocation (%lu)\n",
-                                      me->name, value);
-                               return -ENOEXEC;
-                       }
-                       *((uint16_t *) location)
-                               = (*((uint16_t *) location) & ~0xfffc)
-                               | (value & 0xfffc);
-                       break;
-
-               case R_PPC_REL24:
-                       /* FIXME: Handle weak symbols here --RR */
-                       if (sym->st_shndx == SHN_UNDEF) {
-                               /* External: go via stub */
-                               value = stub_for_addr(sechdrs, value, me);
-                               if (!value)
-                                       return -ENOENT;
-                               if (!restore_r2((u32 *)location + 1, me))
-                                       return -ENOEXEC;
-                       }
-
-                       /* Convert value to relative */
-                       value -= (unsigned long)location;
-                       if (value + 0x2000000 > 0x3ffffff || (value & 3) != 0){
-                               printk("%s: REL24 %li out of range!\n",
-                                      me->name, (long int)value);
-                               return -ENOEXEC;
-                       }
-
-                       /* Only replace bits 2 through 26 */
-                       *(uint32_t *)location 
-                               = (*(uint32_t *)location & ~0x03fffffc)
-                               | (value & 0x03fffffc);
-                       break;
-
-               default:
-                       printk("%s: Unknown ADD relocation: %lu\n",
-                              me->name,
-                              (unsigned long)ELF64_R_TYPE(rela[i].r_info));
-                       return -ENOEXEC;
-               }
-       }
-
-       return 0;
-}
-
-LIST_HEAD(module_bug_list);
-
-int module_finalize(const Elf_Ehdr *hdr,
-               const Elf_Shdr *sechdrs, struct module *me)
-{
-       char *secstrings;
-       unsigned int i;
-
-       me->arch.bug_table = NULL;
-       me->arch.num_bugs = 0;
-
-       /* Find the __bug_table section, if present */
-       secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
-       for (i = 1; i < hdr->e_shnum; i++) {
-               if (strcmp(secstrings+sechdrs[i].sh_name, "__bug_table"))
-                       continue;
-               me->arch.bug_table = (void *) sechdrs[i].sh_addr;
-               me->arch.num_bugs = sechdrs[i].sh_size / sizeof(struct bug_entry);
-               break;
-       }
-
-       /*
-        * Strictly speaking this should have a spinlock to protect against
-        * traversals, but since we only traverse on BUG()s, a spinlock
-        * could potentially lead to deadlock and thus be counter-productive.
-        */
-       list_add(&me->arch.bug_list, &module_bug_list);
-
-       return 0;
-}
-
-void module_arch_cleanup(struct module *mod)
-{
-       list_del(&mod->arch.bug_list);
-}
-
-struct bug_entry *module_find_bug(unsigned long bugaddr)
-{
-       struct mod_arch_specific *mod;
-       unsigned int i;
-       struct bug_entry *bug;
-
-       list_for_each_entry(mod, &module_bug_list, bug_list) {
-               bug = mod->bug_table;
-               for (i = 0; i < mod->num_bugs; ++i, ++bug)
-                       if (bugaddr == bug->bug_addr)
-                               return bug;
-       }
-       return NULL;
-}
diff --git a/arch/ppc64/kernel/pci.c b/arch/ppc64/kernel/pci.c
deleted file mode 100644 (file)
index 3cef1b8..0000000
+++ /dev/null
@@ -1,1319 +0,0 @@
-/*
- * Port for PPC64 David Engebretsen, IBM Corp.
- * Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
- * 
- * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
- *   Rework, based on alpha PCI code.
- *
- *      This program is free software; you can redistribute it and/or
- *      modify it under the terms of the GNU General Public License
- *      as published by the Free Software Foundation; either version
- *      2 of the License, or (at your option) any later version.
- */
-
-#undef DEBUG
-
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/bootmem.h>
-#include <linux/mm.h>
-#include <linux/list.h>
-#include <linux/syscalls.h>
-
-#include <asm/processor.h>
-#include <asm/io.h>
-#include <asm/prom.h>
-#include <asm/pci-bridge.h>
-#include <asm/byteorder.h>
-#include <asm/irq.h>
-#include <asm/machdep.h>
-#include <asm/udbg.h>
-#include <asm/ppc-pci.h>
-
-#ifdef DEBUG
-#define DBG(fmt...) udbg_printf(fmt)
-#else
-#define DBG(fmt...)
-#endif
-
-unsigned long pci_probe_only = 1;
-unsigned long pci_assign_all_buses = 0;
-
-/*
- * legal IO pages under MAX_ISA_PORT.  This is to ensure we don't touch
- * devices we don't have access to.
- */
-unsigned long io_page_mask;
-
-EXPORT_SYMBOL(io_page_mask);
-
-#ifdef CONFIG_PPC_MULTIPLATFORM
-static void fixup_resource(struct resource *res, struct pci_dev *dev);
-static void do_bus_setup(struct pci_bus *bus);
-#endif
-
-unsigned int pcibios_assign_all_busses(void)
-{
-       return pci_assign_all_buses;
-}
-
-/* pci_io_base -- the base address from which io bars are offsets.
- * This is the lowest I/O base address (so bar values are always positive),
- * and it *must* be the start of ISA space if an ISA bus exists because
- * ISA drivers use hard coded offsets.  If no ISA bus exists a dummy
- * page is mapped and isa_io_limit prevents access to it.
- */
-unsigned long isa_io_base;     /* NULL if no ISA bus */
-EXPORT_SYMBOL(isa_io_base);
-unsigned long pci_io_base;
-EXPORT_SYMBOL(pci_io_base);
-
-void iSeries_pcibios_init(void);
-
-LIST_HEAD(hose_list);
-
-struct dma_mapping_ops pci_dma_ops;
-EXPORT_SYMBOL(pci_dma_ops);
-
-int global_phb_number;         /* Global phb counter */
-
-/* Cached ISA bridge dev. */
-struct pci_dev *ppc64_isabridge_dev = NULL;
-
-static void fixup_broken_pcnet32(struct pci_dev* dev)
-{
-       if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) {
-               dev->vendor = PCI_VENDOR_ID_AMD;
-               pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD);
-       }
-}
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32);
-
-void  pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
-                             struct resource *res)
-{
-       unsigned long offset = 0;
-       struct pci_controller *hose = pci_bus_to_host(dev->bus);
-
-       if (!hose)
-               return;
-
-       if (res->flags & IORESOURCE_IO)
-               offset = (unsigned long)hose->io_base_virt - pci_io_base;
-
-       if (res->flags & IORESOURCE_MEM)
-               offset = hose->pci_mem_offset;
-
-       region->start = res->start - offset;
-       region->end = res->end - offset;
-}
-
-void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
-                             struct pci_bus_region *region)
-{
-       unsigned long offset = 0;
-       struct pci_controller *hose = pci_bus_to_host(dev->bus);
-
-       if (!hose)
-               return;
-
-       if (res->flags & IORESOURCE_IO)
-               offset = (unsigned long)hose->io_base_virt - pci_io_base;
-
-       if (res->flags & IORESOURCE_MEM)
-               offset = hose->pci_mem_offset;
-
-       res->start = region->start + offset;
-       res->end = region->end + offset;
-}
-
-#ifdef CONFIG_HOTPLUG
-EXPORT_SYMBOL(pcibios_resource_to_bus);
-EXPORT_SYMBOL(pcibios_bus_to_resource);
-#endif
-
-/*
- * We need to avoid collisions with `mirrored' VGA ports
- * and other strange ISA hardware, so we always want the
- * addresses to be allocated in the 0x000-0x0ff region
- * modulo 0x400.
- *
- * Why? Because some silly external IO cards only decode
- * the low 10 bits of the IO address. The 0x00-0xff region
- * is reserved for motherboard devices that decode all 16
- * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
- * but we want to try to avoid allocating at 0x2900-0x2bff
- * which might have be mirrored at 0x0100-0x03ff..
- */
-void pcibios_align_resource(void *data, struct resource *res,
-                           unsigned long size, unsigned long align)
-{
-       struct pci_dev *dev = data;
-       struct pci_controller *hose = pci_bus_to_host(dev->bus);
-       unsigned long start = res->start;
-       unsigned long alignto;
-
-       if (res->flags & IORESOURCE_IO) {
-               unsigned long offset = (unsigned long)hose->io_base_virt -
-                                       pci_io_base;
-               /* Make sure we start at our min on all hoses */
-               if (start - offset < PCIBIOS_MIN_IO)
-                       start = PCIBIOS_MIN_IO + offset;
-
-               /*
-                * Put everything into 0x00-0xff region modulo 0x400
-                */
-               if (start & 0x300)
-                       start = (start + 0x3ff) & ~0x3ff;
-
-       } else if (res->flags & IORESOURCE_MEM) {
-               /* Make sure we start at our min on all hoses */
-               if (start - hose->pci_mem_offset < PCIBIOS_MIN_MEM)
-                       start = PCIBIOS_MIN_MEM + hose->pci_mem_offset;
-
-               /* Align to multiple of size of minimum base.  */
-               alignto = max(0x1000UL, align);
-               start = ALIGN(start, alignto);
-       }
-
-       res->start = start;
-}
-
-static DEFINE_SPINLOCK(hose_spinlock);
-
-/*
- * pci_controller(phb) initialized common variables.
- */
-void __devinit pci_setup_pci_controller(struct pci_controller *hose)
-{
-       memset(hose, 0, sizeof(struct pci_controller));
-
-       spin_lock(&hose_spinlock);
-       hose->global_number = global_phb_number++;
-       list_add_tail(&hose->list_node, &hose_list);
-       spin_unlock(&hose_spinlock);
-}
-
-static void __init pcibios_claim_one_bus(struct pci_bus *b)
-{
-       struct pci_dev *dev;
-       struct pci_bus *child_bus;
-
-       list_for_each_entry(dev, &b->devices, bus_list) {
-               int i;
-
-               for (i = 0; i < PCI_NUM_RESOURCES; i++) {
-                       struct resource *r = &dev->resource[i];
-
-                       if (r->parent || !r->start || !r->flags)
-                               continue;
-                       pci_claim_resource(dev, i);
-               }
-       }
-
-       list_for_each_entry(child_bus, &b->children, node)
-               pcibios_claim_one_bus(child_bus);
-}
-
-#ifndef CONFIG_PPC_ISERIES
-static void __init pcibios_claim_of_setup(void)
-{
-       struct pci_bus *b;
-
-       list_for_each_entry(b, &pci_root_buses, node)
-               pcibios_claim_one_bus(b);
-}
-#endif
-
-#ifdef CONFIG_PPC_MULTIPLATFORM
-static u32 get_int_prop(struct device_node *np, const char *name, u32 def)
-{
-       u32 *prop;
-       int len;
-
-       prop = (u32 *) get_property(np, name, &len);
-       if (prop && len >= 4)
-               return *prop;
-       return def;
-}
-
-static unsigned int pci_parse_of_flags(u32 addr0)
-{
-       unsigned int flags = 0;
-
-       if (addr0 & 0x02000000) {
-               flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY;
-               flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64;
-               flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M;
-               if (addr0 & 0x40000000)
-                       flags |= IORESOURCE_PREFETCH
-                                | PCI_BASE_ADDRESS_MEM_PREFETCH;
-       } else if (addr0 & 0x01000000)
-               flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO;
-       return flags;
-}
-
-#define GET_64BIT(prop, i)     ((((u64) (prop)[(i)]) << 32) | (prop)[(i)+1])
-
-static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev)
-{
-       u64 base, size;
-       unsigned int flags;
-       struct resource *res;
-       u32 *addrs, i;
-       int proplen;
-
-       addrs = (u32 *) get_property(node, "assigned-addresses", &proplen);
-       if (!addrs)
-               return;
-       for (; proplen >= 20; proplen -= 20, addrs += 5) {
-               flags = pci_parse_of_flags(addrs[0]);
-               if (!flags)
-                       continue;
-               base = GET_64BIT(addrs, 1);
-               size = GET_64BIT(addrs, 3);
-               if (!size)
-                       continue;
-               i = addrs[0] & 0xff;
-               if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) {
-                       res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
-               } else if (i == dev->rom_base_reg) {
-                       res = &dev->resource[PCI_ROM_RESOURCE];
-                       flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE;
-               } else {
-                       printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i);
-                       continue;
-               }
-               res->start = base;
-               res->end = base + size - 1;
-               res->flags = flags;
-               res->name = pci_name(dev);
-               fixup_resource(res, dev);
-       }
-}
-
-struct pci_dev *of_create_pci_dev(struct device_node *node,
-                                struct pci_bus *bus, int devfn)
-{
-       struct pci_dev *dev;
-       const char *type;
-
-       dev = kmalloc(sizeof(struct pci_dev), GFP_KERNEL);
-       if (!dev)
-               return NULL;
-       type = get_property(node, "device_type", NULL);
-       if (type == NULL)
-               type = "";
-
-       memset(dev, 0, sizeof(struct pci_dev));
-       dev->bus = bus;
-       dev->sysdata = node;
-       dev->dev.parent = bus->bridge;
-       dev->dev.bus = &pci_bus_type;
-       dev->devfn = devfn;
-       dev->multifunction = 0;         /* maybe a lie? */
-
-       dev->vendor = get_int_prop(node, "vendor-id", 0xffff);
-       dev->device = get_int_prop(node, "device-id", 0xffff);
-       dev->subsystem_vendor = get_int_prop(node, "subsystem-vendor-id", 0);
-       dev->subsystem_device = get_int_prop(node, "subsystem-id", 0);
-
-       dev->cfg_size = 256; /*pci_cfg_space_size(dev);*/
-
-       sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus),
-               dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
-       dev->class = get_int_prop(node, "class-code", 0);
-
-       dev->current_state = 4;         /* unknown power state */
-
-       if (!strcmp(type, "pci")) {
-               /* a PCI-PCI bridge */
-               dev->hdr_type = PCI_HEADER_TYPE_BRIDGE;
-               dev->rom_base_reg = PCI_ROM_ADDRESS1;
-       } else if (!strcmp(type, "cardbus")) {
-               dev->hdr_type = PCI_HEADER_TYPE_CARDBUS;
-       } else {
-               dev->hdr_type = PCI_HEADER_TYPE_NORMAL;
-               dev->rom_base_reg = PCI_ROM_ADDRESS;
-               dev->irq = NO_IRQ;
-               if (node->n_intrs > 0) {
-                       dev->irq = node->intrs[0].line;
-                       pci_write_config_byte(dev, PCI_INTERRUPT_LINE,
-                                             dev->irq);
-               }
-       }
-
-       pci_parse_of_addrs(node, dev);
-
-       pci_device_add(dev, bus);
-
-       /* XXX pci_scan_msi_device(dev); */
-
-       return dev;
-}
-EXPORT_SYMBOL(of_create_pci_dev);
-
-void __devinit of_scan_bus(struct device_node *node,
-                                 struct pci_bus *bus)
-{
-       struct device_node *child = NULL;
-       u32 *reg;
-       int reglen, devfn;
-       struct pci_dev *dev;
-
-       while ((child = of_get_next_child(node, child)) != NULL) {
-               reg = (u32 *) get_property(child, "reg", &reglen);
-               if (reg == NULL || reglen < 20)
-                       continue;
-               devfn = (reg[0] >> 8) & 0xff;
-               /* create a new pci_dev for this device */
-               dev = of_create_pci_dev(child, bus, devfn);
-               if (!dev)
-                       continue;
-               if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
-                   dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
-                       of_scan_pci_bridge(child, dev);
-       }
-
-       do_bus_setup(bus);
-}
-EXPORT_SYMBOL(of_scan_bus);
-
-void __devinit of_scan_pci_bridge(struct device_node *node,
-                               struct pci_dev *dev)
-{
-       struct pci_bus *bus;
-       u32 *busrange, *ranges;
-       int len, i, mode;
-       struct resource *res;
-       unsigned int flags;
-       u64 size;
-
-       /* parse bus-range property */
-       busrange = (u32 *) get_property(node, "bus-range", &len);
-       if (busrange == NULL || len != 8) {
-               printk(KERN_ERR "Can't get bus-range for PCI-PCI bridge %s\n",
-                      node->full_name);
-               return;
-       }
-       ranges = (u32 *) get_property(node, "ranges", &len);
-       if (ranges == NULL) {
-               printk(KERN_ERR "Can't get ranges for PCI-PCI bridge %s\n",
-                      node->full_name);
-               return;
-       }
-
-       bus = pci_add_new_bus(dev->bus, dev, busrange[0]);
-       if (!bus) {
-               printk(KERN_ERR "Failed to create pci bus for %s\n",
-                      node->full_name);
-               return;
-       }
-
-       bus->primary = dev->bus->number;
-       bus->subordinate = busrange[1];
-       bus->bridge_ctl = 0;
-       bus->sysdata = node;
-
-       /* parse ranges property */
-       /* PCI #address-cells == 3 and #size-cells == 2 always */
-       res = &dev->resource[PCI_BRIDGE_RESOURCES];
-       for (i = 0; i < PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES; ++i) {
-               res->flags = 0;
-               bus->resource[i] = res;
-               ++res;
-       }
-       i = 1;
-       for (; len >= 32; len -= 32, ranges += 8) {
-               flags = pci_parse_of_flags(ranges[0]);
-               size = GET_64BIT(ranges, 6);
-               if (flags == 0 || size == 0)
-                       continue;
-               if (flags & IORESOURCE_IO) {
-                       res = bus->resource[0];
-                       if (res->flags) {
-                               printk(KERN_ERR "PCI: ignoring extra I/O range"
-                                      " for bridge %s\n", node->full_name);
-                               continue;
-                       }
-               } else {
-                       if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) {
-                               printk(KERN_ERR "PCI: too many memory ranges"
-                                      " for bridge %s\n", node->full_name);
-                               continue;
-                       }
-                       res = bus->resource[i];
-                       ++i;
-               }
-               res->start = GET_64BIT(ranges, 1);
-               res->end = res->start + size - 1;
-               res->flags = flags;
-               fixup_resource(res, dev);
-       }
-       sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
-               bus->number);
-
-       mode = PCI_PROBE_NORMAL;
-       if (ppc_md.pci_probe_mode)
-               mode = ppc_md.pci_probe_mode(bus);
-       if (mode == PCI_PROBE_DEVTREE)
-               of_scan_bus(node, bus);
-       else if (mode == PCI_PROBE_NORMAL)
-               pci_scan_child_bus(bus);
-}
-EXPORT_SYMBOL(of_scan_pci_bridge);
-#endif /* CONFIG_PPC_MULTIPLATFORM */
-
-void __devinit scan_phb(struct pci_controller *hose)
-{
-       struct pci_bus *bus;
-       struct device_node *node = hose->arch_data;
-       int i, mode;
-       struct resource *res;
-
-       bus = pci_create_bus(NULL, hose->first_busno, hose->ops, node);
-       if (bus == NULL) {
-               printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",
-                      hose->global_number);
-               return;
-       }
-       bus->secondary = hose->first_busno;
-       hose->bus = bus;
-
-       bus->resource[0] = res = &hose->io_resource;
-       if (res->flags && request_resource(&ioport_resource, res))
-               printk(KERN_ERR "Failed to request PCI IO region "
-                      "on PCI domain %04x\n", hose->global_number);
-
-       for (i = 0; i < 3; ++i) {
-               res = &hose->mem_resources[i];
-               bus->resource[i+1] = res;
-               if (res->flags && request_resource(&iomem_resource, res))
-                       printk(KERN_ERR "Failed to request PCI memory region "
-                              "on PCI domain %04x\n", hose->global_number);
-       }
-
-       mode = PCI_PROBE_NORMAL;
-#ifdef CONFIG_PPC_MULTIPLATFORM
-       if (ppc_md.pci_probe_mode)
-               mode = ppc_md.pci_probe_mode(bus);
-       if (mode == PCI_PROBE_DEVTREE) {
-               bus->subordinate = hose->last_busno;
-               of_scan_bus(node, bus);
-       }
-#endif /* CONFIG_PPC_MULTIPLATFORM */
-       if (mode == PCI_PROBE_NORMAL)
-               hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
-       pci_bus_add_devices(bus);
-}
-
-static int __init pcibios_init(void)
-{
-       struct pci_controller *hose, *tmp;
-
-       /* For now, override phys_mem_access_prot. If we need it,
-        * later, we may move that initialization to each ppc_md
-        */
-       ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot;
-
-#ifdef CONFIG_PPC_ISERIES
-       iSeries_pcibios_init(); 
-#endif
-
-       printk("PCI: Probing PCI hardware\n");
-
-       /* Scan all of the recorded PCI controllers.  */
-       list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
-               scan_phb(hose);
-
-#ifndef CONFIG_PPC_ISERIES
-       if (pci_probe_only)
-               pcibios_claim_of_setup();
-       else
-               /* FIXME: `else' will be removed when
-                  pci_assign_unassigned_resources() is able to work
-                  correctly with [partially] allocated PCI tree. */
-               pci_assign_unassigned_resources();
-#endif /* !CONFIG_PPC_ISERIES */
-
-       /* Call machine dependent final fixup */
-       if (ppc_md.pcibios_fixup)
-               ppc_md.pcibios_fixup();
-
-       /* Cache the location of the ISA bridge (if we have one) */
-       ppc64_isabridge_dev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
-       if (ppc64_isabridge_dev != NULL)
-               printk("ISA bridge at %s\n", pci_name(ppc64_isabridge_dev));
-
-#ifdef CONFIG_PPC_MULTIPLATFORM
-       /* map in PCI I/O space */
-       phbs_remap_io();
-#endif
-
-       printk("PCI: Probing PCI hardware done\n");
-
-       return 0;
-}
-
-subsys_initcall(pcibios_init);
-
-char __init *pcibios_setup(char *str)
-{
-       return str;
-}
-
-int pcibios_enable_device(struct pci_dev *dev, int mask)
-{
-       u16 cmd, oldcmd;
-       int i;
-
-       pci_read_config_word(dev, PCI_COMMAND, &cmd);
-       oldcmd = cmd;
-
-       for (i = 0; i < PCI_NUM_RESOURCES; i++) {
-               struct resource *res = &dev->resource[i];
-
-               /* Only set up the requested stuff */
-               if (!(mask & (1<<i)))
-                       continue;
-
-               if (res->flags & IORESOURCE_IO)
-                       cmd |= PCI_COMMAND_IO;
-               if (res->flags & IORESOURCE_MEM)
-                       cmd |= PCI_COMMAND_MEMORY;
-       }
-
-       if (cmd != oldcmd) {
-               printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n",
-                      pci_name(dev), cmd);
-                /* Enable the appropriate bits in the PCI command register.  */
-               pci_write_config_word(dev, PCI_COMMAND, cmd);
-       }
-       return 0;
-}
-
-/*
- * Return the domain number for this bus.
- */
-int pci_domain_nr(struct pci_bus *bus)
-{
-#ifdef CONFIG_PPC_ISERIES
-       return 0;
-#else
-       struct pci_controller *hose = pci_bus_to_host(bus);
-
-       return hose->global_number;
-#endif
-}
-
-EXPORT_SYMBOL(pci_domain_nr);
-
-/* Decide whether to display the domain number in /proc */
-int pci_proc_domain(struct pci_bus *bus)
-{
-#ifdef CONFIG_PPC_ISERIES
-       return 0;
-#else
-       struct pci_controller *hose = pci_bus_to_host(bus);
-       return hose->buid;
-#endif
-}
-
-/*
- * Platform support for /proc/bus/pci/X/Y mmap()s,
- * modelled on the sparc64 implementation by Dave Miller.
- *  -- paulus.
- */
-
-/*
- * Adjust vm_pgoff of VMA such that it is the physical page offset
- * corresponding to the 32-bit pci bus offset for DEV requested by the user.
- *
- * Basically, the user finds the base address for his device which he wishes
- * to mmap.  They read the 32-bit value from the config space base register,
- * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
- * offset parameter of mmap on /proc/bus/pci/XXX for that device.
- *
- * Returns negative error code on failure, zero on success.
- */
-static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
-                                              unsigned long *offset,
-                                              enum pci_mmap_state mmap_state)
-{
-       struct pci_controller *hose = pci_bus_to_host(dev->bus);
-       unsigned long io_offset = 0;
-       int i, res_bit;
-
-       if (hose == 0)
-               return NULL;            /* should never happen */
-
-       /* If memory, add on the PCI bridge address offset */
-       if (mmap_state == pci_mmap_mem) {
-               *offset += hose->pci_mem_offset;
-               res_bit = IORESOURCE_MEM;
-       } else {
-               io_offset = (unsigned long)hose->io_base_virt - pci_io_base;
-               *offset += io_offset;
-               res_bit = IORESOURCE_IO;
-       }
-
-       /*
-        * Check that the offset requested corresponds to one of the
-        * resources of the device.
-        */
-       for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
-               struct resource *rp = &dev->resource[i];
-               int flags = rp->flags;
-
-               /* treat ROM as memory (should be already) */
-               if (i == PCI_ROM_RESOURCE)
-                       flags |= IORESOURCE_MEM;
-
-               /* Active and same type? */
-               if ((flags & res_bit) == 0)
-                       continue;
-
-               /* In the range of this resource? */
-               if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end)
-                       continue;
-
-               /* found it! construct the final physical address */
-               if (mmap_state == pci_mmap_io)
-                       *offset += hose->io_base_phys - io_offset;
-               return rp;
-       }
-
-       return NULL;
-}
-
-/*
- * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
- * device mapping.
- */
-static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
-                                     pgprot_t protection,
-                                     enum pci_mmap_state mmap_state,
-                                     int write_combine)
-{
-       unsigned long prot = pgprot_val(protection);
-
-       /* Write combine is always 0 on non-memory space mappings. On
-        * memory space, if the user didn't pass 1, we check for a
-        * "prefetchable" resource. This is a bit hackish, but we use
-        * this to workaround the inability of /sysfs to provide a write
-        * combine bit
-        */
-       if (mmap_state != pci_mmap_mem)
-               write_combine = 0;
-       else if (write_combine == 0) {
-               if (rp->flags & IORESOURCE_PREFETCH)
-                       write_combine = 1;
-       }
-
-       /* XXX would be nice to have a way to ask for write-through */
-       prot |= _PAGE_NO_CACHE;
-       if (write_combine)
-               prot &= ~_PAGE_GUARDED;
-       else
-               prot |= _PAGE_GUARDED;
-
-       printk("PCI map for %s:%lx, prot: %lx\n", pci_name(dev), rp->start,
-              prot);
-
-       return __pgprot(prot);
-}
-
-/*
- * This one is used by /dev/mem and fbdev who have no clue about the
- * PCI device, it tries to find the PCI device first and calls the
- * above routine
- */
-pgprot_t pci_phys_mem_access_prot(struct file *file,
-                                 unsigned long pfn,
-                                 unsigned long size,
-                                 pgprot_t protection)
-{
-       struct pci_dev *pdev = NULL;
-       struct resource *found = NULL;
-       unsigned long prot = pgprot_val(protection);
-       unsigned long offset = pfn << PAGE_SHIFT;
-       int i;
-
-       if (page_is_ram(pfn))
-               return __pgprot(prot);
-
-       prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
-
-       for_each_pci_dev(pdev) {
-               for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
-                       struct resource *rp = &pdev->resource[i];
-                       int flags = rp->flags;
-
-                       /* Active and same type? */
-                       if ((flags & IORESOURCE_MEM) == 0)
-                               continue;
-                       /* In the range of this resource? */
-                       if (offset < (rp->start & PAGE_MASK) ||
-                           offset > rp->end)
-                               continue;
-                       found = rp;
-                       break;
-               }
-               if (found)
-                       break;
-       }
-       if (found) {
-               if (found->flags & IORESOURCE_PREFETCH)
-                       prot &= ~_PAGE_GUARDED;
-               pci_dev_put(pdev);
-       }
-
-       DBG("non-PCI map for %lx, prot: %lx\n", offset, prot);
-
-       return __pgprot(prot);
-}
-
-
-/*
- * Perform the actual remap of the pages for a PCI device mapping, as
- * appropriate for this architecture.  The region in the process to map
- * is described by vm_start and vm_end members of VMA, the base physical
- * address is found in vm_pgoff.
- * The pci device structure is provided so that architectures may make mapping
- * decisions on a per-device or per-bus basis.
- *
- * Returns a negative error code on failure, zero on success.
- */
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
-                       enum pci_mmap_state mmap_state,
-                       int write_combine)
-{
-       unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
-       struct resource *rp;
-       int ret;
-
-       rp = __pci_mmap_make_offset(dev, &offset, mmap_state);
-       if (rp == NULL)
-               return -EINVAL;
-
-       vma->vm_pgoff = offset >> PAGE_SHIFT;
-       vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO;
-       vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
-                                                 vma->vm_page_prot,
-                                                 mmap_state, write_combine);
-
-       ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
-                              vma->vm_end - vma->vm_start, vma->vm_page_prot);
-
-       return ret;
-}
-
-#ifdef CONFIG_PPC_MULTIPLATFORM
-static ssize_t pci_show_devspec(struct device *dev, struct device_attribute *attr, char *buf)
-{
-       struct pci_dev *pdev;
-       struct device_node *np;
-
-       pdev = to_pci_dev (dev);
-       np = pci_device_to_OF_node(pdev);
-       if (np == NULL || np->full_name == NULL)
-               return 0;
-       return sprintf(buf, "%s", np->full_name);
-}
-static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
-#endif /* CONFIG_PPC_MULTIPLATFORM */
-
-void pcibios_add_platform_entries(struct pci_dev *pdev)
-{
-#ifdef CONFIG_PPC_MULTIPLATFORM
-       device_create_file(&pdev->dev, &dev_attr_devspec);
-#endif /* CONFIG_PPC_MULTIPLATFORM */
-}
-
-#ifdef CONFIG_PPC_MULTIPLATFORM
-
-#define ISA_SPACE_MASK 0x1
-#define ISA_SPACE_IO 0x1
-
-static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node,
-                                     unsigned long phb_io_base_phys,
-                                     void __iomem * phb_io_base_virt)
-{
-       struct isa_range *range;
-       unsigned long pci_addr;
-       unsigned int isa_addr;
-       unsigned int size;
-       int rlen = 0;
-
-       range = (struct isa_range *) get_property(isa_node, "ranges", &rlen);
-       if (range == NULL || (rlen < sizeof(struct isa_range))) {
-               printk(KERN_ERR "no ISA ranges or unexpected isa range size,"
-                      "mapping 64k\n");
-               __ioremap_explicit(phb_io_base_phys,
-                                  (unsigned long)phb_io_base_virt,
-                                  0x10000, _PAGE_NO_CACHE | _PAGE_GUARDED);
-               return; 
-       }
-       
-       /* From "ISA Binding to 1275"
-        * The ranges property is laid out as an array of elements,
-        * each of which comprises:
-        *   cells 0 - 1:       an ISA address
-        *   cells 2 - 4:       a PCI address 
-        *                      (size depending on dev->n_addr_cells)
-        *   cell 5:            the size of the range
-        */
-       if ((range->isa_addr.a_hi && ISA_SPACE_MASK) == ISA_SPACE_IO) {
-               isa_addr = range->isa_addr.a_lo;
-               pci_addr = (unsigned long) range->pci_addr.a_mid << 32 | 
-                       range->pci_addr.a_lo;
-
-               /* Assume these are both zero */
-               if ((pci_addr != 0) || (isa_addr != 0)) {
-                       printk(KERN_ERR "unexpected isa to pci mapping: %s\n",
-                                       __FUNCTION__);
-                       return;
-               }
-               
-               size = PAGE_ALIGN(range->size);
-
-               __ioremap_explicit(phb_io_base_phys, 
-                                  (unsigned long) phb_io_base_virt, 
-                                  size, _PAGE_NO_CACHE | _PAGE_GUARDED);
-       }
-}
-
-void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
-                                           struct device_node *dev, int prim)
-{
-       unsigned int *ranges, pci_space;
-       unsigned long size;
-       int rlen = 0;
-       int memno = 0;
-       struct resource *res;
-       int np, na = prom_n_addr_cells(dev);
-       unsigned long pci_addr, cpu_phys_addr;
-
-       np = na + 5;
-
-       /* From "PCI Binding to 1275"
-        * The ranges property is laid out as an array of elements,
-        * each of which comprises:
-        *   cells 0 - 2:       a PCI address
-        *   cells 3 or 3+4:    a CPU physical address
-        *                      (size depending on dev->n_addr_cells)
-        *   cells 4+5 or 5+6:  the size of the range
-        */
-       rlen = 0;
-       hose->io_base_phys = 0;
-       ranges = (unsigned int *) get_property(dev, "ranges", &rlen);
-       while ((rlen -= np * sizeof(unsigned int)) >= 0) {
-               res = NULL;
-               pci_space = ranges[0];
-               pci_addr = ((unsigned long)ranges[1] << 32) | ranges[2];
-
-               cpu_phys_addr = ranges[3];
-               if (na >= 2)
-                       cpu_phys_addr = (cpu_phys_addr << 32) | ranges[4];
-
-               size = ((unsigned long)ranges[na+3] << 32) | ranges[na+4];
-               ranges += np;
-               if (size == 0)
-                       continue;
-
-               /* Now consume following elements while they are contiguous */
-               while (rlen >= np * sizeof(unsigned int)) {
-                       unsigned long addr, phys;
-
-                       if (ranges[0] != pci_space)
-                               break;
-                       addr = ((unsigned long)ranges[1] << 32) | ranges[2];
-                       phys = ranges[3];
-                       if (na >= 2)
-                               phys = (phys << 32) | ranges[4];
-                       if (addr != pci_addr + size ||
-                           phys != cpu_phys_addr + size)
-                               break;
-
-                       size += ((unsigned long)ranges[na+3] << 32)
-                               | ranges[na+4];
-                       ranges += np;
-                       rlen -= np * sizeof(unsigned int);
-               }
-
-               switch ((pci_space >> 24) & 0x3) {
-               case 1:         /* I/O space */
-                       hose->io_base_phys = cpu_phys_addr;
-                       hose->pci_io_size = size;
-
-                       res = &hose->io_resource;
-                       res->flags = IORESOURCE_IO;
-                       res->start = pci_addr;
-                       DBG("phb%d: IO 0x%lx -> 0x%lx\n", hose->global_number,
-                                   res->start, res->start + size - 1);
-                       break;
-               case 2:         /* memory space */
-                       memno = 0;
-                       while (memno < 3 && hose->mem_resources[memno].flags)
-                               ++memno;
-
-                       if (memno == 0)
-                               hose->pci_mem_offset = cpu_phys_addr - pci_addr;
-                       if (memno < 3) {
-                               res = &hose->mem_resources[memno];
-                               res->flags = IORESOURCE_MEM;
-                               res->start = cpu_phys_addr;
-                               DBG("phb%d: MEM 0x%lx -> 0x%lx\n", hose->global_number,
-                                           res->start, res->start + size - 1);
-                       }
-                       break;
-               }
-               if (res != NULL) {
-                       res->name = dev->full_name;
-                       res->end = res->start + size - 1;
-                       res->parent = NULL;
-                       res->sibling = NULL;
-                       res->child = NULL;
-               }
-       }
-}
-
-void __init pci_setup_phb_io(struct pci_controller *hose, int primary)
-{
-       unsigned long size = hose->pci_io_size;
-       unsigned long io_virt_offset;
-       struct resource *res;
-       struct device_node *isa_dn;
-
-       hose->io_base_virt = reserve_phb_iospace(size);
-       DBG("phb%d io_base_phys 0x%lx io_base_virt 0x%lx\n",
-               hose->global_number, hose->io_base_phys,
-               (unsigned long) hose->io_base_virt);
-
-       if (primary) {
-               pci_io_base = (unsigned long)hose->io_base_virt;
-               isa_dn = of_find_node_by_type(NULL, "isa");
-               if (isa_dn) {
-                       isa_io_base = pci_io_base;
-                       pci_process_ISA_OF_ranges(isa_dn, hose->io_base_phys,
-                                               hose->io_base_virt);
-                       of_node_put(isa_dn);
-                       /* Allow all IO */
-                       io_page_mask = -1;
-               }
-       }
-
-       io_virt_offset = (unsigned long)hose->io_base_virt - pci_io_base;
-       res = &hose->io_resource;
-       res->start += io_virt_offset;
-       res->end += io_virt_offset;
-}
-
-void __devinit pci_setup_phb_io_dynamic(struct pci_controller *hose,
-                                       int primary)
-{
-       unsigned long size = hose->pci_io_size;
-       unsigned long io_virt_offset;
-       struct resource *res;
-
-       hose->io_base_virt = __ioremap(hose->io_base_phys, size,
-                                       _PAGE_NO_CACHE | _PAGE_GUARDED);
-       DBG("phb%d io_base_phys 0x%lx io_base_virt 0x%lx\n",
-               hose->global_number, hose->io_base_phys,
-               (unsigned long) hose->io_base_virt);
-
-       if (primary)
-               pci_io_base = (unsigned long)hose->io_base_virt;
-
-       io_virt_offset = (unsigned long)hose->io_base_virt - pci_io_base;
-       res = &hose->io_resource;
-       res->start += io_virt_offset;
-       res->end += io_virt_offset;
-}
-
-
-static int get_bus_io_range(struct pci_bus *bus, unsigned long *start_phys,
-                               unsigned long *start_virt, unsigned long *size)
-{
-       struct pci_controller *hose = pci_bus_to_host(bus);
-       struct pci_bus_region region;
-       struct resource *res;
-
-       if (bus->self) {
-               res = bus->resource[0];
-               pcibios_resource_to_bus(bus->self, &region, res);
-               *start_phys = hose->io_base_phys + region.start;
-               *start_virt = (unsigned long) hose->io_base_virt + 
-                               region.start;
-               if (region.end > region.start) 
-                       *size = region.end - region.start + 1;
-               else {
-                       printk("%s(): unexpected region 0x%lx->0x%lx\n", 
-                                       __FUNCTION__, region.start, region.end);
-                       return 1;
-               }
-               
-       } else {
-               /* Root Bus */
-               res = &hose->io_resource;
-               *start_phys = hose->io_base_phys;
-               *start_virt = (unsigned long) hose->io_base_virt;
-               if (res->end > res->start)
-                       *size = res->end - res->start + 1;
-               else {
-                       printk("%s(): unexpected region 0x%lx->0x%lx\n", 
-                                       __FUNCTION__, res->start, res->end);
-                       return 1;
-               }
-       }
-
-       return 0;
-}
-
-int unmap_bus_range(struct pci_bus *bus)
-{
-       unsigned long start_phys;
-       unsigned long start_virt;
-       unsigned long size;
-
-       if (!bus) {
-               printk(KERN_ERR "%s() expected bus\n", __FUNCTION__);
-               return 1;
-       }
-       
-       if (get_bus_io_range(bus, &start_phys, &start_virt, &size))
-               return 1;
-       if (iounmap_explicit((void __iomem *) start_virt, size))
-               return 1;
-
-       return 0;
-}
-EXPORT_SYMBOL(unmap_bus_range);
-
-int remap_bus_range(struct pci_bus *bus)
-{
-       unsigned long start_phys;
-       unsigned long start_virt;
-       unsigned long size;
-
-       if (!bus) {
-               printk(KERN_ERR "%s() expected bus\n", __FUNCTION__);
-               return 1;
-       }
-       
-       
-       if (get_bus_io_range(bus, &start_phys, &start_virt, &size))
-               return 1;
-       printk("mapping IO %lx -> %lx, size: %lx\n", start_phys, start_virt, size);
-       if (__ioremap_explicit(start_phys, start_virt, size,
-                              _PAGE_NO_CACHE | _PAGE_GUARDED))
-               return 1;
-
-       return 0;
-}
-EXPORT_SYMBOL(remap_bus_range);
-
-void phbs_remap_io(void)
-{
-       struct pci_controller *hose, *tmp;
-
-       list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
-               remap_bus_range(hose->bus);
-}
-
-/*
- * ppc64 can have multifunction devices that do not respond to function 0.
- * In this case we must scan all functions.
- * XXX this can go now, we use the OF device tree in all the
- * cases that caused problems. -- paulus
- */
-int pcibios_scan_all_fns(struct pci_bus *bus, int devfn)
-{
-       return 0;
-}
-
-static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev)
-{
-       struct pci_controller *hose = pci_bus_to_host(dev->bus);
-       unsigned long start, end, mask, offset;
-
-       if (res->flags & IORESOURCE_IO) {
-               offset = (unsigned long)hose->io_base_virt - pci_io_base;
-
-               start = res->start += offset;
-               end = res->end += offset;
-
-               /* Need to allow IO access to pages that are in the
-                  ISA range */
-               if (start < MAX_ISA_PORT) {
-                       if (end > MAX_ISA_PORT)
-                               end = MAX_ISA_PORT;
-
-                       start >>= PAGE_SHIFT;
-                       end >>= PAGE_SHIFT;
-
-                       /* get the range of pages for the map */
-                       mask = ((1 << (end+1)) - 1) ^ ((1 << start) - 1);
-                       io_page_mask |= mask;
-               }
-       } else if (res->flags & IORESOURCE_MEM) {
-               res->start += hose->pci_mem_offset;
-               res->end += hose->pci_mem_offset;
-       }
-}
-
-void __devinit pcibios_fixup_device_resources(struct pci_dev *dev,
-                                             struct pci_bus *bus)
-{
-       /* Update device resources.  */
-       int i;
-
-       for (i = 0; i < PCI_NUM_RESOURCES; i++)
-               if (dev->resource[i].flags)
-                       fixup_resource(&dev->resource[i], dev);
-}
-EXPORT_SYMBOL(pcibios_fixup_device_resources);
-
-static void __devinit do_bus_setup(struct pci_bus *bus)
-{
-       struct pci_dev *dev;
-
-       ppc_md.iommu_bus_setup(bus);
-
-       list_for_each_entry(dev, &bus->devices, bus_list)
-               ppc_md.iommu_dev_setup(dev);
-
-       if (ppc_md.irq_bus_setup)
-               ppc_md.irq_bus_setup(bus);
-}
-
-void __devinit pcibios_fixup_bus(struct pci_bus *bus)
-{
-       struct pci_dev *dev = bus->self;
-
-       if (dev && pci_probe_only &&
-           (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
-               /* This is a subordinate bridge */
-
-               pci_read_bridge_bases(bus);
-               pcibios_fixup_device_resources(dev, bus);
-       }
-
-       do_bus_setup(bus);
-
-       if (!pci_probe_only)
-               return;
-
-       list_for_each_entry(dev, &bus->devices, bus_list)
-               if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
-                       pcibios_fixup_device_resources(dev, bus);
-}
-EXPORT_SYMBOL(pcibios_fixup_bus);
-
-/*
- * Reads the interrupt pin to determine if interrupt is use by card.
- * If the interrupt is used, then gets the interrupt line from the 
- * openfirmware and sets it in the pci_dev and pci_config line.
- */
-int pci_read_irq_line(struct pci_dev *pci_dev)
-{
-       u8 intpin;
-       struct device_node *node;
-
-       pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &intpin);
-       if (intpin == 0)
-               return 0;
-
-       node = pci_device_to_OF_node(pci_dev);
-       if (node == NULL)
-               return -1;
-
-       if (node->n_intrs == 0)
-               return -1;
-
-       pci_dev->irq = node->intrs[0].line;
-
-       pci_write_config_byte(pci_dev, PCI_INTERRUPT_LINE, pci_dev->irq);
-
-       return 0;
-}
-EXPORT_SYMBOL(pci_read_irq_line);
-
-void pci_resource_to_user(const struct pci_dev *dev, int bar,
-                         const struct resource *rsrc,
-                         u64 *start, u64 *end)
-{
-       struct pci_controller *hose = pci_bus_to_host(dev->bus);
-       unsigned long offset = 0;
-
-       if (hose == NULL)
-               return;
-
-       if (rsrc->flags & IORESOURCE_IO)
-               offset = pci_io_base - (unsigned long)hose->io_base_virt +
-                       hose->io_base_phys;
-
-       *start = rsrc->start + offset;
-       *end = rsrc->end + offset;
-}
-
-#endif /* CONFIG_PPC_MULTIPLATFORM */
-
-
-#define IOBASE_BRIDGE_NUMBER   0
-#define IOBASE_MEMORY          1
-#define IOBASE_IO              2
-#define IOBASE_ISA_IO          3
-#define IOBASE_ISA_MEM         4
-
-long sys_pciconfig_iobase(long which, unsigned long in_bus,
-                         unsigned long in_devfn)
-{
-       struct pci_controller* hose;
-       struct list_head *ln;
-       struct pci_bus *bus = NULL;
-       struct device_node *hose_node;
-
-       /* Argh ! Please forgive me for that hack, but that's the
-        * simplest way to get existing XFree to not lockup on some
-        * G5 machines... So when something asks for bus 0 io base
-        * (bus 0 is HT root), we return the AGP one instead.
-        */
-       if (machine_is_compatible("MacRISC4"))
-               if (in_bus == 0)
-                       in_bus = 0xf0;
-
-       /* That syscall isn't quite compatible with PCI domains, but it's
-        * used on pre-domains setup. We return the first match
-        */
-
-       for (ln = pci_root_buses.next; ln != &pci_root_buses; ln = ln->next) {
-               bus = pci_bus_b(ln);
-               if (in_bus >= bus->number && in_bus < (bus->number + bus->subordinate))
-                       break;
-               bus = NULL;
-       }
-       if (bus == NULL || bus->sysdata == NULL)
-               return -ENODEV;
-
-       hose_node = (struct device_node *)bus->sysdata;
-       hose = PCI_DN(hose_node)->phb;
-
-       switch (which) {
-       case IOBASE_BRIDGE_NUMBER:
-               return (long)hose->first_busno;
-       case IOBASE_MEMORY:
-               return (long)hose->pci_mem_offset;
-       case IOBASE_IO:
-               return (long)hose->io_base_phys;
-       case IOBASE_ISA_IO:
-               return (long)isa_io_base;
-       case IOBASE_ISA_MEM:
-               return -EINVAL;
-       }
-
-       return -EOPNOTSUPP;
-}
diff --git a/arch/ppc64/kernel/pci_direct_iommu.c b/arch/ppc64/kernel/pci_direct_iommu.c
deleted file mode 100644 (file)
index e1a32f8..0000000
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Support for DMA from PCI devices to main memory on
- * machines without an iommu or with directly addressable
- * RAM (typically a pmac with 2Gb of RAM or less)
- *
- * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/bootmem.h>
-#include <linux/mm.h>
-#include <linux/dma-mapping.h>
-
-#include <asm/sections.h>
-#include <asm/io.h>
-#include <asm/prom.h>
-#include <asm/pci-bridge.h>
-#include <asm/machdep.h>
-#include <asm/pmac_feature.h>
-#include <asm/abs_addr.h>
-#include <asm/ppc-pci.h>
-
-static void *pci_direct_alloc_coherent(struct device *hwdev, size_t size,
-                                  dma_addr_t *dma_handle, gfp_t flag)
-{
-       void *ret;
-
-       ret = (void *)__get_free_pages(flag, get_order(size));
-       if (ret != NULL) {
-               memset(ret, 0, size);
-               *dma_handle = virt_to_abs(ret);
-       }
-       return ret;
-}
-
-static void pci_direct_free_coherent(struct device *hwdev, size_t size,
-                                void *vaddr, dma_addr_t dma_handle)
-{
-       free_pages((unsigned long)vaddr, get_order(size));
-}
-
-static dma_addr_t pci_direct_map_single(struct device *hwdev, void *ptr,
-               size_t size, enum dma_data_direction direction)
-{
-       return virt_to_abs(ptr);
-}
-
-static void pci_direct_unmap_single(struct device *hwdev, dma_addr_t dma_addr,
-               size_t size, enum dma_data_direction direction)
-{
-}
-
-static int pci_direct_map_sg(struct device *hwdev, struct scatterlist *sg,
-               int nents, enum dma_data_direction direction)
-{
-       int i;
-
-       for (i = 0; i < nents; i++, sg++) {
-               sg->dma_address = page_to_phys(sg->page) + sg->offset;
-               sg->dma_length = sg->length;
-       }
-
-       return nents;
-}
-
-static void pci_direct_unmap_sg(struct device *hwdev, struct scatterlist *sg,
-               int nents, enum dma_data_direction direction)
-{
-}
-
-static int pci_direct_dma_supported(struct device *dev, u64 mask)
-{
-       return mask < 0x100000000ull;
-}
-
-void __init pci_direct_iommu_init(void)
-{
-       pci_dma_ops.alloc_coherent = pci_direct_alloc_coherent;
-       pci_dma_ops.free_coherent = pci_direct_free_coherent;
-       pci_dma_ops.map_single = pci_direct_map_single;
-       pci_dma_ops.unmap_single = pci_direct_unmap_single;
-       pci_dma_ops.map_sg = pci_direct_map_sg;
-       pci_dma_ops.unmap_sg = pci_direct_unmap_sg;
-       pci_dma_ops.dma_supported = pci_direct_dma_supported;
-}
diff --git a/arch/ppc64/kernel/pci_dn.c b/arch/ppc64/kernel/pci_dn.c
deleted file mode 100644 (file)
index 12c4c9e..0000000
+++ /dev/null
@@ -1,230 +0,0 @@
-/*
- * pci_dn.c
- *
- * Copyright (C) 2001 Todd Inglett, IBM Corporation
- *
- * PCI manipulation via device_nodes.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *    
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- * 
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
- */
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/bootmem.h>
-
-#include <asm/io.h>
-#include <asm/prom.h>
-#include <asm/pci-bridge.h>
-#include <asm/pSeries_reconfig.h>
-#include <asm/ppc-pci.h>
-
-/*
- * Traverse_func that inits the PCI fields of the device node.
- * NOTE: this *must* be done before read/write config to the device.
- */
-static void * __devinit update_dn_pci_info(struct device_node *dn, void *data)
-{
-       struct pci_controller *phb = data;
-       int *type = (int *)get_property(dn, "ibm,pci-config-space-type", NULL);
-       u32 *regs;
-       struct pci_dn *pdn;
-
-       if (mem_init_done)
-               pdn = kmalloc(sizeof(*pdn), GFP_KERNEL);
-       else
-               pdn = alloc_bootmem(sizeof(*pdn));
-       if (pdn == NULL)
-               return NULL;
-       memset(pdn, 0, sizeof(*pdn));
-       dn->data = pdn;
-       pdn->node = dn;
-       pdn->phb = phb;
-       regs = (u32 *)get_property(dn, "reg", NULL);
-       if (regs) {
-               /* First register entry is addr (00BBSS00)  */
-               pdn->busno = (regs[0] >> 16) & 0xff;
-               pdn->devfn = (regs[0] >> 8) & 0xff;
-       }
-
-       pdn->pci_ext_config_space = (type && *type == 1);
-       return NULL;
-}
-
-/*
- * Traverse a device tree stopping each PCI device in the tree.
- * This is done depth first.  As each node is processed, a "pre"
- * function is called and the children are processed recursively.
- *
- * The "pre" func returns a value.  If non-zero is returned from
- * the "pre" func, the traversal stops and this value is returned.
- * This return value is useful when using traverse as a method of
- * finding a device.
- *
- * NOTE: we do not run the func for devices that do not appear to
- * be PCI except for the start node which we assume (this is good
- * because the start node is often a phb which may be missing PCI
- * properties).
- * We use the class-code as an indicator. If we run into
- * one of these nodes we also assume its siblings are non-pci for
- * performance.
- */
-void *traverse_pci_devices(struct device_node *start, traverse_func pre,
-               void *data)
-{
-       struct device_node *dn, *nextdn;
-       void *ret;
-
-       /* We started with a phb, iterate all childs */
-       for (dn = start->child; dn; dn = nextdn) {
-               u32 *classp, class;
-
-               nextdn = NULL;
-               classp = (u32 *)get_property(dn, "class-code", NULL);
-               class = classp ? *classp : 0;
-
-               if (pre && ((ret = pre(dn, data)) != NULL))
-                       return ret;
-
-               /* If we are a PCI bridge, go down */
-               if (dn->child && ((class >> 8) == PCI_CLASS_BRIDGE_PCI ||
-                                 (class >> 8) == PCI_CLASS_BRIDGE_CARDBUS))
-                       /* Depth first...do children */
-                       nextdn = dn->child;
-               else if (dn->sibling)
-                       /* ok, try next sibling instead. */
-                       nextdn = dn->sibling;
-               if (!nextdn) {
-                       /* Walk up to next valid sibling. */
-                       do {
-                               dn = dn->parent;
-                               if (dn == start)
-                                       return NULL;
-                       } while (dn->sibling == NULL);
-                       nextdn = dn->sibling;
-               }
-       }
-       return NULL;
-}
-
-/** 
- * pci_devs_phb_init_dynamic - setup pci devices under this PHB
- * phb: pci-to-host bridge (top-level bridge connecting to cpu)
- *
- * This routine is called both during boot, (before the memory
- * subsystem is set up, before kmalloc is valid) and during the 
- * dynamic lpar operation of adding a PHB to a running system.
- */
-void __devinit pci_devs_phb_init_dynamic(struct pci_controller *phb)
-{
-       struct device_node * dn = (struct device_node *) phb->arch_data;
-       struct pci_dn *pdn;
-
-       /* PHB nodes themselves must not match */
-       update_dn_pci_info(dn, phb);
-       pdn = dn->data;
-       if (pdn) {
-               pdn->devfn = pdn->busno = -1;
-               pdn->phb = phb;
-       }
-
-       /* Update dn->phb ptrs for new phb and children devices */
-       traverse_pci_devices(dn, update_dn_pci_info, phb);
-}
-
-/*
- * Traversal func that looks for a <busno,devfcn> value.
- * If found, the pci_dn is returned (thus terminating the traversal).
- */
-static void *is_devfn_node(struct device_node *dn, void *data)
-{
-       int busno = ((unsigned long)data >> 8) & 0xff;
-       int devfn = ((unsigned long)data) & 0xff;
-       struct pci_dn *pci = dn->data;
-
-       if (pci && (devfn == pci->devfn) && (busno == pci->busno))
-               return dn;
-       return NULL;
-}
-
-/*
- * This is the "slow" path for looking up a device_node from a
- * pci_dev.  It will hunt for the device under its parent's
- * phb and then update sysdata for a future fastpath.
- *
- * It may also do fixups on the actual device since this happens
- * on the first read/write.
- *
- * Note that it also must deal with devices that don't exist.
- * In this case it may probe for real hardware ("just in case")
- * and add a device_node to the device tree if necessary.
- *
- */
-struct device_node *fetch_dev_dn(struct pci_dev *dev)
-{
-       struct device_node *orig_dn = dev->sysdata;
-       struct device_node *dn;
-       unsigned long searchval = (dev->bus->number << 8) | dev->devfn;
-
-       dn = traverse_pci_devices(orig_dn, is_devfn_node, (void *)searchval);
-       if (dn)
-               dev->sysdata = dn;
-       return dn;
-}
-EXPORT_SYMBOL(fetch_dev_dn);
-
-static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *node)
-{
-       struct device_node *np = node;
-       struct pci_dn *pci = NULL;
-       int err = NOTIFY_OK;
-
-       switch (action) {
-       case PSERIES_RECONFIG_ADD:
-               pci = np->parent->data;
-               if (pci)
-                       update_dn_pci_info(np, pci->phb);
-               break;
-       default:
-               err = NOTIFY_DONE;
-               break;
-       }
-       return err;
-}
-
-static struct notifier_block pci_dn_reconfig_nb = {
-       .notifier_call = pci_dn_reconfig_notifier,
-};
-
-/** 
- * pci_devs_phb_init - Initialize phbs and pci devs under them.
- * 
- * This routine walks over all phb's (pci-host bridges) on the
- * system, and sets up assorted pci-related structures 
- * (including pci info in the device node structs) for each
- * pci device found underneath.  This routine runs once,
- * early in the boot sequence.
- */
-void __init pci_devs_phb_init(void)
-{
-       struct pci_controller *phb, *tmp;
-
-       /* This must be done first so the device nodes have valid pci info! */
-       list_for_each_entry_safe(phb, tmp, &hose_list, list_node)
-               pci_devs_phb_init_dynamic(phb);
-
-       pSeries_reconfig_notifier_register(&pci_dn_reconfig_nb);
-}
diff --git a/arch/ppc64/kernel/pci_iommu.c b/arch/ppc64/kernel/pci_iommu.c
deleted file mode 100644 (file)
index bdf15db..0000000
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * arch/ppc64/kernel/pci_iommu.c
- * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
- *
- * Rewrite, cleanup, new allocation schemes:
- * Copyright (C) 2004 Olof Johansson, IBM Corporation
- *
- * Dynamic DMA mapping support, platform-independent parts.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
- */
-
-
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/spinlock.h>
-#include <linux/string.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <asm/io.h>
-#include <asm/prom.h>
-#include <asm/iommu.h>
-#include <asm/pci-bridge.h>
-#include <asm/machdep.h>
-#include <asm/ppc-pci.h>
-
-/*
- * We can use ->sysdata directly and avoid the extra work in
- * pci_device_to_OF_node since ->sysdata will have been initialised
- * in the iommu init code for all devices.
- */
-#define PCI_GET_DN(dev) ((struct device_node *)((dev)->sysdata))
-
-static inline struct iommu_table *devnode_table(struct device *dev)
-{
-       struct pci_dev *pdev;
-
-       if (!dev) {
-               pdev = ppc64_isabridge_dev;
-               if (!pdev)
-                       return NULL;
-       } else
-               pdev = to_pci_dev(dev);
-
-       return PCI_DN(PCI_GET_DN(pdev))->iommu_table;
-}
-
-
-/* Allocates a contiguous real buffer and creates mappings over it.
- * Returns the virtual address of the buffer and sets dma_handle
- * to the dma address (mapping) of the first page.
- */
-static void *pci_iommu_alloc_coherent(struct device *hwdev, size_t size,
-                          dma_addr_t *dma_handle, gfp_t flag)
-{
-       return iommu_alloc_coherent(devnode_table(hwdev), size, dma_handle,
-                       flag);
-}
-
-static void pci_iommu_free_coherent(struct device *hwdev, size_t size,
-                        void *vaddr, dma_addr_t dma_handle)
-{
-       iommu_free_coherent(devnode_table(hwdev), size, vaddr, dma_handle);
-}
-
-/* Creates TCEs for a user provided buffer.  The user buffer must be 
- * contiguous real kernel storage (not vmalloc).  The address of the buffer
- * passed here is the kernel (virtual) address of the buffer.  The buffer
- * need not be page aligned, the dma_addr_t returned will point to the same
- * byte within the page as vaddr.
- */
-static dma_addr_t pci_iommu_map_single(struct device *hwdev, void *vaddr,
-               size_t size, enum dma_data_direction direction)
-{
-       return iommu_map_single(devnode_table(hwdev), vaddr, size, direction);
-}
-
-
-static void pci_iommu_unmap_single(struct device *hwdev, dma_addr_t dma_handle,
-               size_t size, enum dma_data_direction direction)
-{
-       iommu_unmap_single(devnode_table(hwdev), dma_handle, size, direction);
-}
-
-
-static int pci_iommu_map_sg(struct device *pdev, struct scatterlist *sglist,
-               int nelems, enum dma_data_direction direction)
-{
-       return iommu_map_sg(pdev, devnode_table(pdev), sglist,
-                       nelems, direction);
-}
-
-static void pci_iommu_unmap_sg(struct device *pdev, struct scatterlist *sglist,
-               int nelems, enum dma_data_direction direction)
-{
-       iommu_unmap_sg(devnode_table(pdev), sglist, nelems, direction);
-}
-
-/* We support DMA to/from any memory page via the iommu */
-static int pci_iommu_dma_supported(struct device *dev, u64 mask)
-{
-       return 1;
-}
-
-void pci_iommu_init(void)
-{
-       pci_dma_ops.alloc_coherent = pci_iommu_alloc_coherent;
-       pci_dma_ops.free_coherent = pci_iommu_free_coherent;
-       pci_dma_ops.map_single = pci_iommu_map_single;
-       pci_dma_ops.unmap_single = pci_iommu_unmap_single;
-       pci_dma_ops.map_sg = pci_iommu_map_sg;
-       pci_dma_ops.unmap_sg = pci_iommu_unmap_sg;
-       pci_dma_ops.dma_supported = pci_iommu_dma_supported;
-}
diff --git a/arch/ppc64/kernel/ppc_ksyms.c b/arch/ppc64/kernel/ppc_ksyms.c
deleted file mode 100644 (file)
index 84006e2..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-/* 
- * c 2001 PPC 64 Team, IBM Corp
- *
- *      This program is free software; you can redistribute it and/or
- *      modify it under the terms of the GNU General Public License
- *      as published by the Free Software Foundation; either version
- *      2 of the License, or (at your option) any later version.
- */
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/console.h>
-#include <net/checksum.h>
-
-#include <asm/processor.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/system.h>
-#include <asm/hw_irq.h>
-#include <asm/abs_addr.h>
-#include <asm/cacheflush.h>
-
-EXPORT_SYMBOL(strcpy);
-EXPORT_SYMBOL(strncpy);
-EXPORT_SYMBOL(strcat);
-EXPORT_SYMBOL(strncat);
-EXPORT_SYMBOL(strchr);
-EXPORT_SYMBOL(strrchr);
-EXPORT_SYMBOL(strpbrk);
-EXPORT_SYMBOL(strstr);
-EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(strnlen);
-EXPORT_SYMBOL(strcmp);
-EXPORT_SYMBOL(strncmp);
-
-EXPORT_SYMBOL(csum_partial);
-EXPORT_SYMBOL(csum_partial_copy_generic);
-EXPORT_SYMBOL(ip_fast_csum);
-EXPORT_SYMBOL(csum_tcpudp_magic);
-
-EXPORT_SYMBOL(__copy_tofrom_user);
-EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(__strncpy_from_user);
-EXPORT_SYMBOL(__strnlen_user);
-
-EXPORT_SYMBOL(reloc_offset);
-
-EXPORT_SYMBOL(_insb);
-EXPORT_SYMBOL(_outsb);
-EXPORT_SYMBOL(_insw);
-EXPORT_SYMBOL(_outsw);
-EXPORT_SYMBOL(_insl);
-EXPORT_SYMBOL(_outsl);
-EXPORT_SYMBOL(_insw_ns);
-EXPORT_SYMBOL(_outsw_ns);
-EXPORT_SYMBOL(_insl_ns);
-EXPORT_SYMBOL(_outsl_ns);
-
-EXPORT_SYMBOL(kernel_thread);
-
-EXPORT_SYMBOL(giveup_fpu);
-#ifdef CONFIG_ALTIVEC
-EXPORT_SYMBOL(giveup_altivec);
-#endif
-EXPORT_SYMBOL(__flush_icache_range);
-EXPORT_SYMBOL(flush_dcache_range);
-
-EXPORT_SYMBOL(memcpy);
-EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(memscan);
-EXPORT_SYMBOL(memcmp);
-EXPORT_SYMBOL(memchr);
-
-EXPORT_SYMBOL(timer_interrupt);
-EXPORT_SYMBOL(console_drivers);
diff --git a/arch/ppc64/kernel/prom.c b/arch/ppc64/kernel/prom.c
deleted file mode 100644 (file)
index 47cc26e..0000000
+++ /dev/null
@@ -1,1956 +0,0 @@
-/*
- * 
- *
- * Procedures for interfacing to Open Firmware.
- *
- * Paul Mackerras      August 1996.
- * Copyright (C) 1996 Paul Mackerras.
- * 
- *  Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
- *    {engebret|bergner}@us.ibm.com 
- *
- *      This program is free software; you can redistribute it and/or
- *      modify it under the terms of the GNU General Public License
- *      as published by the Free Software Foundation; either version
- *      2 of the License, or (at your option) any later version.
- */
-
-#undef DEBUG
-
-#include <stdarg.h>
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/threads.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/stringify.h>
-#include <linux/delay.h>
-#include <linux/initrd.h>
-#include <linux/bitops.h>
-#include <linux/module.h>
-#include <linux/module.h>
-
-#include <asm/prom.h>
-#include <asm/rtas.h>
-#include <asm/lmb.h>
-#include <asm/abs_addr.h>
-#include <asm/page.h>
-#include <asm/processor.h>
-#include <asm/irq.h>
-#include <asm/io.h>
-#include <asm/smp.h>
-#include <asm/system.h>
-#include <asm/mmu.h>
-#include <asm/pgtable.h>
-#include <asm/pci.h>
-#include <asm/iommu.h>
-#include <asm/btext.h>
-#include <asm/sections.h>
-#include <asm/machdep.h>
-#include <asm/pSeries_reconfig.h>
-
-#ifdef DEBUG
-#define DBG(fmt...) udbg_printf(fmt)
-#else
-#define DBG(fmt...)
-#endif
-
-struct pci_reg_property {
-       struct pci_address addr;
-       u32 size_hi;
-       u32 size_lo;
-};
-
-struct isa_reg_property {
-       u32 space;
-       u32 address;
-       u32 size;
-};
-
-
-typedef int interpret_func(struct device_node *, unsigned long *,
-                          int, int, int);
-
-extern struct rtas_t rtas;
-extern struct lmb lmb;
-extern unsigned long klimit;
-extern unsigned long memory_limit;
-
-static int __initdata dt_root_addr_cells;
-static int __initdata dt_root_size_cells;
-static int __initdata iommu_is_off;
-int __initdata iommu_force_on;
-unsigned long tce_alloc_start, tce_alloc_end;
-
-typedef u32 cell_t;
-
-#if 0
-static struct boot_param_header *initial_boot_params __initdata;
-#else
-struct boot_param_header *initial_boot_params;
-#endif
-
-static struct device_node *allnodes = NULL;
-
-/* use when traversing tree through the allnext, child, sibling,
- * or parent members of struct device_node.
- */
-static DEFINE_RWLOCK(devtree_lock);
-
-/* export that to outside world */
-struct device_node *of_chosen;
-
-/*
- * Wrapper for allocating memory for various data that needs to be
- * attached to device nodes as they are processed at boot or when
- * added to the device tree later (e.g. DLPAR).  At boot there is
- * already a region reserved so we just increment *mem_start by size;
- * otherwise we call kmalloc.
- */
-static void * prom_alloc(unsigned long size, unsigned long *mem_start)
-{
-       unsigned long tmp;
-
-       if (!mem_start)
-               return kmalloc(size, GFP_KERNEL);
-
-       tmp = *mem_start;
-       *mem_start += size;
-       return (void *)tmp;
-}
-
-/*
- * Find the device_node with a given phandle.
- */
-static struct device_node * find_phandle(phandle ph)
-{
-       struct device_node *np;
-
-       for (np = allnodes; np != 0; np = np->allnext)
-               if (np->linux_phandle == ph)
-                       return np;
-       return NULL;
-}
-
-/*
- * Find the interrupt parent of a node.
- */
-static struct device_node * __devinit intr_parent(struct device_node *p)
-{
-       phandle *parp;
-
-       parp = (phandle *) get_property(p, "interrupt-parent", NULL);
-       if (parp == NULL)
-               return p->parent;
-       return find_phandle(*parp);
-}
-
-/*
- * Find out the size of each entry of the interrupts property
- * for a node.
- */
-int __devinit prom_n_intr_cells(struct device_node *np)
-{
-       struct device_node *p;
-       unsigned int *icp;
-
-       for (p = np; (p = intr_parent(p)) != NULL; ) {
-               icp = (unsigned int *)
-                       get_property(p, "#interrupt-cells", NULL);
-               if (icp != NULL)
-                       return *icp;
-               if (get_property(p, "interrupt-controller", NULL) != NULL
-                   || get_property(p, "interrupt-map", NULL) != NULL) {
-                       printk("oops, node %s doesn't have #interrupt-cells\n",
-                              p->full_name);
-                       return 1;
-               }
-       }
-#ifdef DEBUG_IRQ
-       printk("prom_n_intr_cells failed for %s\n", np->full_name);
-#endif
-       return 1;
-}
-
-/*
- * Map an interrupt from a device up to the platform interrupt
- * descriptor.
- */
-static int __devinit map_interrupt(unsigned int **irq, struct device_node **ictrler,
-                                  struct device_node *np, unsigned int *ints,
-                                  int nintrc)
-{
-       struct device_node *p, *ipar;
-       unsigned int *imap, *imask, *ip;
-       int i, imaplen, match;
-       int newintrc = 0, newaddrc = 0;
-       unsigned int *reg;
-       int naddrc;
-
-       reg = (unsigned int *) get_property(np, "reg", NULL);
-       naddrc = prom_n_addr_cells(np);
-       p = intr_parent(np);
-       while (p != NULL) {
-               if (get_property(p, "interrupt-controller", NULL) != NULL)
-                       /* this node is an interrupt controller, stop here */
-                       break;
-               imap = (unsigned int *)
-                       get_property(p, "interrupt-map", &imaplen);
-               if (imap == NULL) {
-                       p = intr_parent(p);
-                       continue;
-               }
-               imask = (unsigned int *)
-                       get_property(p, "interrupt-map-mask", NULL);
-               if (imask == NULL) {
-                       printk("oops, %s has interrupt-map but no mask\n",
-                              p->full_name);
-                       return 0;
-               }
-               imaplen /= sizeof(unsigned int);
-               match = 0;
-               ipar = NULL;
-               while (imaplen > 0 && !match) {
-                       /* check the child-interrupt field */
-                       match = 1;
-                       for (i = 0; i < naddrc && match; ++i)
-                               match = ((reg[i] ^ imap[i]) & imask[i]) == 0;
-                       for (; i < naddrc + nintrc && match; ++i)
-                               match = ((ints[i-naddrc] ^ imap[i]) & imask[i]) == 0;
-                       imap += naddrc + nintrc;
-                       imaplen -= naddrc + nintrc;
-                       /* grab the interrupt parent */
-                       ipar = find_phandle((phandle) *imap++);
-                       --imaplen;
-                       if (ipar == NULL) {
-                               printk("oops, no int parent %x in map of %s\n",
-                                      imap[-1], p->full_name);
-                               return 0;
-                       }
-                       /* find the parent's # addr and intr cells */
-                       ip = (unsigned int *)
-                               get_property(ipar, "#interrupt-cells", NULL);
-                       if (ip == NULL) {
-                               printk("oops, no #interrupt-cells on %s\n",
-                                      ipar->full_name);
-                               return 0;
-                       }
-                       newintrc = *ip;
-                       ip = (unsigned int *)
-                               get_property(ipar, "#address-cells", NULL);
-                       newaddrc = (ip == NULL)? 0: *ip;
-                       imap += newaddrc + newintrc;
-                       imaplen -= newaddrc + newintrc;
-               }
-               if (imaplen < 0) {
-                       printk("oops, error decoding int-map on %s, len=%d\n",
-                              p->full_name, imaplen);
-                       return 0;
-               }
-               if (!match) {
-#ifdef DEBUG_IRQ
-                       printk("oops, no match in %s int-map for %s\n",
-                              p->full_name, np->full_name);
-#endif
-                       return 0;
-               }
-               p = ipar;
-               naddrc = newaddrc;
-               nintrc = newintrc;
-               ints = imap - nintrc;
-               reg = ints - naddrc;
-       }
-       if (p == NULL) {
-#ifdef DEBUG_IRQ
-               printk("hmmm, int tree for %s doesn't have ctrler\n",
-                      np->full_name);
-#endif
-               return 0;
-       }
-       *irq = ints;
-       *ictrler = p;
-       return nintrc;
-}
-
-static int __devinit finish_node_interrupts(struct device_node *np,
-                                           unsigned long *mem_start,
-                                           int measure_only)
-{
-       unsigned int *ints;
-       int intlen, intrcells, intrcount;
-       int i, j, n;
-       unsigned int *irq, virq;
-       struct device_node *ic;
-
-       ints = (unsigned int *) get_property(np, "interrupts", &intlen);
-       if (ints == NULL)
-               return 0;
-       intrcells = prom_n_intr_cells(np);
-       intlen /= intrcells * sizeof(unsigned int);
-
-       np->intrs = prom_alloc(intlen * sizeof(*(np->intrs)), mem_start);
-       if (!np->intrs)
-               return -ENOMEM;
-
-       if (measure_only)
-               return 0;
-
-       intrcount = 0;
-       for (i = 0; i < intlen; ++i, ints += intrcells) {
-               n = map_interrupt(&irq, &ic, np, ints, intrcells);
-               if (n <= 0)
-                       continue;
-
-               /* don't map IRQ numbers under a cascaded 8259 controller */
-               if (ic && device_is_compatible(ic, "chrp,iic")) {
-                       np->intrs[intrcount].line = irq[0];
-               } else {
-                       virq = virt_irq_create_mapping(irq[0]);
-                       if (virq == NO_IRQ) {
-                               printk(KERN_CRIT "Could not allocate interrupt"
-                                      " number for %s\n", np->full_name);
-                               continue;
-                       }
-                       np->intrs[intrcount].line = irq_offset_up(virq);
-               }
-
-               /* We offset irq numbers for the u3 MPIC by 128 in PowerMac */
-               if (_machine == PLATFORM_POWERMAC && ic && ic->parent) {
-                       char *name = get_property(ic->parent, "name", NULL);
-                       if (name && !strcmp(name, "u3"))
-                               np->intrs[intrcount].line += 128;
-                       else if (!(name && !strcmp(name, "mac-io")))
-                               /* ignore other cascaded controllers, such as
-                                  the k2-sata-root */
-                               break;
-               }
-               np->intrs[intrcount].sense = 1;
-               if (n > 1)
-                       np->intrs[intrcount].sense = irq[1];
-               if (n > 2) {
-                       printk("hmmm, got %d intr cells for %s:", n,
-                              np->full_name);
-                       for (j = 0; j < n; ++j)
-                               printk(" %d", irq[j]);
-                       printk("\n");
-               }
-               ++intrcount;
-       }
-       np->n_intrs = intrcount;
-
-       return 0;
-}
-
-static int __devinit interpret_pci_props(struct device_node *np,
-                                        unsigned long *mem_start,
-                                        int naddrc, int nsizec,
-                                        int measure_only)
-{
-       struct address_range *adr;
-       struct pci_reg_property *pci_addrs;
-       int i, l, n_addrs;
-
-       pci_addrs = (struct pci_reg_property *)
-               get_property(np, "assigned-addresses", &l);
-       if (!pci_addrs)
-               return 0;
-
-       n_addrs = l / sizeof(*pci_addrs);
-
-       adr = prom_alloc(n_addrs * sizeof(*adr), mem_start);
-       if (!adr)
-               return -ENOMEM;
-
-       if (measure_only)
-               return 0;
-
-       np->addrs = adr;
-       np->n_addrs = n_addrs;
-
-       for (i = 0; i < n_addrs; i++) {
-               adr[i].space = pci_addrs[i].addr.a_hi;
-               adr[i].address = pci_addrs[i].addr.a_lo |
-                       ((u64)pci_addrs[i].addr.a_mid << 32);
-               adr[i].size = pci_addrs[i].size_lo;
-       }
-
-       return 0;
-}
-
-static int __init interpret_dbdma_props(struct device_node *np,
-                                       unsigned long *mem_start,
-                                       int naddrc, int nsizec,
-                                       int measure_only)
-{
-       struct reg_property32 *rp;
-       struct address_range *adr;
-       unsigned long base_address;
-       int i, l;
-       struct device_node *db;
-
-       base_address = 0;
-       if (!measure_only) {
-               for (db = np->parent; db != NULL; db = db->parent) {
-                       if (!strcmp(db->type, "dbdma") && db->n_addrs != 0) {
-                               base_address = db->addrs[0].address;
-                               break;
-                       }
-               }
-       }
-
-       rp = (struct reg_property32 *) get_property(np, "reg", &l);
-       if (rp != 0 && l >= sizeof(struct reg_property32)) {
-               i = 0;
-               adr = (struct address_range *) (*mem_start);
-               while ((l -= sizeof(struct reg_property32)) >= 0) {
-                       if (!measure_only) {
-                               adr[i].space = 2;
-                               adr[i].address = rp[i].address + base_address;
-                               adr[i].size = rp[i].size;
-                       }
-                       ++i;
-               }
-               np->addrs = adr;
-               np->n_addrs = i;
-               (*mem_start) += i * sizeof(struct address_range);
-       }
-
-       return 0;
-}
-
-static int __init interpret_macio_props(struct device_node *np,
-                                       unsigned long *mem_start,
-                                       int naddrc, int nsizec,
-                                       int measure_only)
-{
-       struct reg_property32 *rp;
-       struct address_range *adr;
-       unsigned long base_address;
-       int i, l;
-       struct device_node *db;
-
-       base_address = 0;
-       if (!measure_only) {
-               for (db = np->parent; db != NULL; db = db->parent) {
-                       if (!strcmp(db->type, "mac-io") && db->n_addrs != 0) {
-                               base_address = db->addrs[0].address;
-                               break;
-                       }
-               }
-       }
-
-       rp = (struct reg_property32 *) get_property(np, "reg", &l);
-       if (rp != 0 && l >= sizeof(struct reg_property32)) {
-               i = 0;
-               adr = (struct address_range *) (*mem_start);
-               while ((l -= sizeof(struct reg_property32)) >= 0) {
-                       if (!measure_only) {
-                               adr[i].space = 2;
-                               adr[i].address = rp[i].address + base_address;
-                               adr[i].size = rp[i].size;
-                       }
-                       ++i;
-               }
-               np->addrs = adr;
-               np->n_addrs = i;
-               (*mem_start) += i * sizeof(struct address_range);
-       }
-
-       return 0;
-}
-
-static int __init interpret_isa_props(struct device_node *np,
-                                     unsigned long *mem_start,
-                                     int naddrc, int nsizec,
-                                     int measure_only)
-{
-       struct isa_reg_property *rp;
-       struct address_range *adr;
-       int i, l;
-
-       rp = (struct isa_reg_property *) get_property(np, "reg", &l);
-       if (rp != 0 && l >= sizeof(struct isa_reg_property)) {
-               i = 0;
-               adr = (struct address_range *) (*mem_start);
-               while ((l -= sizeof(struct isa_reg_property)) >= 0) {
-                       if (!measure_only) {
-                               adr[i].space = rp[i].space;
-                               adr[i].address = rp[i].address;
-                               adr[i].size = rp[i].size;
-                       }
-                       ++i;
-               }
-               np->addrs = adr;
-               np->n_addrs = i;
-               (*mem_start) += i * sizeof(struct address_range);
-       }
-
-       return 0;
-}
-
-static int __init interpret_root_props(struct device_node *np,
-                                      unsigned long *mem_start,
-                                      int naddrc, int nsizec,
-                                      int measure_only)
-{
-       struct address_range *adr;
-       int i, l;
-       unsigned int *rp;
-       int rpsize = (naddrc + nsizec) * sizeof(unsigned int);
-
-       rp = (unsigned int *) get_property(np, "reg", &l);
-       if (rp != 0 && l >= rpsize) {
-               i = 0;
-               adr = (struct address_range *) (*mem_start);
-               while ((l -= rpsize) >= 0) {
-                       if (!measure_only) {
-                               adr[i].space = 0;
-                               adr[i].address = rp[naddrc - 1];
-                               adr[i].size = rp[naddrc + nsizec - 1];
-                       }
-                       ++i;
-                       rp += naddrc + nsizec;
-               }
-               np->addrs = adr;
-               np->n_addrs = i;
-               (*mem_start) += i * sizeof(struct address_range);
-       }
-
-       return 0;
-}
-
-static int __devinit finish_node(struct device_node *np,
-                                unsigned long *mem_start,
-                                interpret_func *ifunc,
-                                int naddrc, int nsizec,
-                                int measure_only)
-{
-       struct device_node *child;
-       int *ip, rc = 0;
-
-       /* get the device addresses and interrupts */
-       if (ifunc != NULL)
-               rc = ifunc(np, mem_start, naddrc, nsizec, measure_only);
-       if (rc)
-               goto out;
-
-       rc = finish_node_interrupts(np, mem_start, measure_only);
-       if (rc)
-               goto out;
-
-       /* Look for #address-cells and #size-cells properties. */
-       ip = (int *) get_property(np, "#address-cells", NULL);
-       if (ip != NULL)
-               naddrc = *ip;
-       ip = (int *) get_property(np, "#size-cells", NULL);
-       if (ip != NULL)
-               nsizec = *ip;
-
-       if (!strcmp(np->name, "device-tree") || np->parent == NULL)
-               ifunc = interpret_root_props;
-       else if (np->type == 0)
-               ifunc = NULL;
-       else if (!strcmp(np->type, "pci") || !strcmp(np->type, "vci"))
-               ifunc = interpret_pci_props;
-       else if (!strcmp(np->type, "dbdma"))
-               ifunc = interpret_dbdma_props;
-       else if (!strcmp(np->type, "mac-io") || ifunc == interpret_macio_props)
-               ifunc = interpret_macio_props;
-       else if (!strcmp(np->type, "isa"))
-               ifunc = interpret_isa_props;
-       else if (!strcmp(np->name, "uni-n") || !strcmp(np->name, "u3"))
-               ifunc = interpret_root_props;
-       else if (!((ifunc == interpret_dbdma_props
-                   || ifunc == interpret_macio_props)
-                  && (!strcmp(np->type, "escc")
-                      || !strcmp(np->type, "media-bay"))))
-               ifunc = NULL;
-
-       for (child = np->child; child != NULL; child = child->sibling) {
-               rc = finish_node(child, mem_start, ifunc,
-                                naddrc, nsizec, measure_only);
-               if (rc)
-                       goto out;
-       }
-out:
-       return rc;
-}
-
-/**
- * finish_device_tree is called once things are running normally
- * (i.e. with text and data mapped to the address they were linked at).
- * It traverses the device tree and fills in some of the additional,
- * fields in each node like {n_}addrs and {n_}intrs, the virt interrupt
- * mapping is also initialized at this point.
- */
-void __init finish_device_tree(void)
-{
-       unsigned long start, end, size = 0;
-
-       DBG(" -> finish_device_tree\n");
-
-       if (ppc64_interrupt_controller == IC_INVALID) {
-               DBG("failed to configure interrupt controller type\n");
-               panic("failed to configure interrupt controller type\n");
-       }
-       
-       /* Initialize virtual IRQ map */
-       virt_irq_init();
-
-       /*
-        * Finish device-tree (pre-parsing some properties etc...)
-        * We do this in 2 passes. One with "measure_only" set, which
-        * will only measure the amount of memory needed, then we can
-        * allocate that memory, and call finish_node again. However,
-        * we must be careful as most routines will fail nowadays when
-        * prom_alloc() returns 0, so we must make sure our first pass
-        * doesn't start at 0. We pre-initialize size to 16 for that
-        * reason and then remove those additional 16 bytes
-        */
-       size = 16;
-       finish_node(allnodes, &size, NULL, 0, 0, 1);
-       size -= 16;
-       end = start = (unsigned long)abs_to_virt(lmb_alloc(size, 128));
-       finish_node(allnodes, &end, NULL, 0, 0, 0);
-       BUG_ON(end != start + size);
-
-       DBG(" <- finish_device_tree\n");
-}
-
-#ifdef DEBUG
-#define printk udbg_printf
-#endif
-
-static inline char *find_flat_dt_string(u32 offset)
-{
-       return ((char *)initial_boot_params) +
-               initial_boot_params->off_dt_strings + offset;
-}
-
-/**
- * This function is used to scan the flattened device-tree, it is
- * used to extract the memory informations at boot before we can
- * unflatten the tree
- */
-int __init of_scan_flat_dt(int (*it)(unsigned long node,
-                                    const char *uname, int depth,
-                                    void *data),
-                          void *data)
-{
-       unsigned long p = ((unsigned long)initial_boot_params) +
-               initial_boot_params->off_dt_struct;
-       int rc = 0;
-       int depth = -1;
-
-       do {
-               u32 tag = *((u32 *)p);
-               char *pathp;
-               
-               p += 4;
-               if (tag == OF_DT_END_NODE) {
-                       depth --;
-                       continue;
-               }
-               if (tag == OF_DT_NOP)
-                       continue;
-               if (tag == OF_DT_END)
-                       break;
-               if (tag == OF_DT_PROP) {
-                       u32 sz = *((u32 *)p);
-                       p += 8;
-                       if (initial_boot_params->version < 0x10)
-                               p = _ALIGN(p, sz >= 8 ? 8 : 4);
-                       p += sz;
-                       p = _ALIGN(p, 4);
-                       continue;
-               }
-               if (tag != OF_DT_BEGIN_NODE) {
-                       printk(KERN_WARNING "Invalid tag %x scanning flattened"
-                              " device tree !\n", tag);
-                       return -EINVAL;
-               }
-               depth++;
-               pathp = (char *)p;
-               p = _ALIGN(p + strlen(pathp) + 1, 4);
-               if ((*pathp) == '/') {
-                       char *lp, *np;
-                       for (lp = NULL, np = pathp; *np; np++)
-                               if ((*np) == '/')
-                                       lp = np+1;
-                       if (lp != NULL)
-                               pathp = lp;
-               }
-               rc = it(p, pathp, depth, data);
-               if (rc != 0)
-                       break;          
-       } while(1);
-
-       return rc;
-}
-
-/**
- * This  function can be used within scan_flattened_dt callback to get
- * access to properties
- */
-void* __init of_get_flat_dt_prop(unsigned long node, const char *name,
-                                unsigned long *size)
-{
-       unsigned long p = node;
-
-       do {
-               u32 tag = *((u32 *)p);
-               u32 sz, noff;
-               const char *nstr;
-
-               p += 4;
-               if (tag == OF_DT_NOP)
-                       continue;
-               if (tag != OF_DT_PROP)
-                       return NULL;
-
-               sz = *((u32 *)p);
-               noff = *((u32 *)(p + 4));
-               p += 8;
-               if (initial_boot_params->version < 0x10)
-                       p = _ALIGN(p, sz >= 8 ? 8 : 4);
-
-               nstr = find_flat_dt_string(noff);
-               if (nstr == NULL) {
-                       printk(KERN_WARNING "Can't find property index"
-                              " name !\n");
-                       return NULL;
-               }
-               if (strcmp(name, nstr) == 0) {
-                       if (size)
-                               *size = sz;
-                       return (void *)p;
-               }
-               p += sz;
-               p = _ALIGN(p, 4);
-       } while(1);
-}
-
-static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
-                                      unsigned long align)
-{
-       void *res;
-
-       *mem = _ALIGN(*mem, align);
-       res = (void *)*mem;
-       *mem += size;
-
-       return res;
-}
-
-static unsigned long __init unflatten_dt_node(unsigned long mem,
-                                             unsigned long *p,
-                                             struct device_node *dad,
-                                             struct device_node ***allnextpp,
-                                             unsigned long fpsize)
-{
-       struct device_node *np;
-       struct property *pp, **prev_pp = NULL;
-       char *pathp;
-       u32 tag;
-       unsigned int l, allocl;
-       int has_name = 0;
-       int new_format = 0;
-
-       tag = *((u32 *)(*p));
-       if (tag != OF_DT_BEGIN_NODE) {
-               printk("Weird tag at start of node: %x\n", tag);
-               return mem;
-       }
-       *p += 4;
-       pathp = (char *)*p;
-       l = allocl = strlen(pathp) + 1;
-       *p = _ALIGN(*p + l, 4);
-
-       /* version 0x10 has a more compact unit name here instead of the full
-        * path. we accumulate the full path size using "fpsize", we'll rebuild
-        * it later. We detect this because the first character of the name is
-        * not '/'.
-        */
-       if ((*pathp) != '/') {
-               new_format = 1;
-               if (fpsize == 0) {
-                       /* root node: special case. fpsize accounts for path
-                        * plus terminating zero. root node only has '/', so
-                        * fpsize should be 2, but we want to avoid the first
-                        * level nodes to have two '/' so we use fpsize 1 here
-                        */
-                       fpsize = 1;
-                       allocl = 2;
-               } else {
-                       /* account for '/' and path size minus terminal 0
-                        * already in 'l'
-                        */
-                       fpsize += l;
-                       allocl = fpsize;
-               }
-       }
-
-
-       np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl,
-                               __alignof__(struct device_node));
-       if (allnextpp) {
-               memset(np, 0, sizeof(*np));
-               np->full_name = ((char*)np) + sizeof(struct device_node);
-               if (new_format) {
-                       char *p = np->full_name;
-                       /* rebuild full path for new format */
-                       if (dad && dad->parent) {
-                               strcpy(p, dad->full_name);
-#ifdef DEBUG
-                               if ((strlen(p) + l + 1) != allocl) {
-                                       DBG("%s: p: %d, l: %d, a: %d\n",
-                                           pathp, strlen(p), l, allocl);
-                               }
-#endif
-                               p += strlen(p);
-                       }
-                       *(p++) = '/';
-                       memcpy(p, pathp, l);
-               } else
-                       memcpy(np->full_name, pathp, l);
-               prev_pp = &np->properties;
-               **allnextpp = np;
-               *allnextpp = &np->allnext;
-               if (dad != NULL) {
-                       np->parent = dad;
-                       /* we temporarily use the next field as `last_child'*/
-                       if (dad->next == 0)
-                               dad->child = np;
-                       else
-                               dad->next->sibling = np;
-                       dad->next = np;
-               }
-               kref_init(&np->kref);
-       }
-       while(1) {
-               u32 sz, noff;
-               char *pname;
-
-               tag = *((u32 *)(*p));
-               if (tag == OF_DT_NOP) {
-                       *p += 4;
-                       continue;
-               }
-               if (tag != OF_DT_PROP)
-                       break;
-               *p += 4;
-               sz = *((u32 *)(*p));
-               noff = *((u32 *)((*p) + 4));
-               *p += 8;
-               if (initial_boot_params->version < 0x10)
-                       *p = _ALIGN(*p, sz >= 8 ? 8 : 4);
-
-               pname = find_flat_dt_string(noff);
-               if (pname == NULL) {
-                       printk("Can't find property name in list !\n");
-                       break;
-               }
-               if (strcmp(pname, "name") == 0)
-                       has_name = 1;
-               l = strlen(pname) + 1;
-               pp = unflatten_dt_alloc(&mem, sizeof(struct property),
-                                       __alignof__(struct property));
-               if (allnextpp) {
-                       if (strcmp(pname, "linux,phandle") == 0) {
-                               np->node = *((u32 *)*p);
-                               if (np->linux_phandle == 0)
-                                       np->linux_phandle = np->node;
-                       }
-                       if (strcmp(pname, "ibm,phandle") == 0)
-                               np->linux_phandle = *((u32 *)*p);
-                       pp->name = pname;
-                       pp->length = sz;
-                       pp->value = (void *)*p;
-                       *prev_pp = pp;
-                       prev_pp = &pp->next;
-               }
-               *p = _ALIGN((*p) + sz, 4);
-       }
-       /* with version 0x10 we may not have the name property, recreate
-        * it here from the unit name if absent
-        */
-       if (!has_name) {
-               char *p = pathp, *ps = pathp, *pa = NULL;
-               int sz;
-
-               while (*p) {
-                       if ((*p) == '@')
-                               pa = p;
-                       if ((*p) == '/')
-                               ps = p + 1;
-                       p++;
-               }
-               if (pa < ps)
-                       pa = p;
-               sz = (pa - ps) + 1;
-               pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz,
-                                       __alignof__(struct property));
-               if (allnextpp) {
-                       pp->name = "name";
-                       pp->length = sz;
-                       pp->value = (unsigned char *)(pp + 1);
-                       *prev_pp = pp;
-                       prev_pp = &pp->next;
-                       memcpy(pp->value, ps, sz - 1);
-                       ((char *)pp->value)[sz - 1] = 0;
-                       DBG("fixed up name for %s -> %s\n", pathp, pp->value);
-               }
-       }
-       if (allnextpp) {
-               *prev_pp = NULL;
-               np->name = get_property(np, "name", NULL);
-               np->type = get_property(np, "device_type", NULL);
-
-               if (!np->name)
-                       np->name = "<NULL>";
-               if (!np->type)
-                       np->type = "<NULL>";
-       }
-       while (tag == OF_DT_BEGIN_NODE) {
-               mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
-               tag = *((u32 *)(*p));
-       }
-       if (tag != OF_DT_END_NODE) {
-               printk("Weird tag at end of node: %x\n", tag);
-               return mem;
-       }
-       *p += 4;
-       return mem;
-}
-
-
-/**
- * unflattens the device-tree passed by the firmware, creating the
- * tree of struct device_node. It also fills the "name" and "type"
- * pointers of the nodes so the normal device-tree walking functions
- * can be used (this used to be done by finish_device_tree)
- */
-void __init unflatten_device_tree(void)
-{
-       unsigned long start, mem, size;
-       struct device_node **allnextp = &allnodes;
-       char *p = NULL;
-       int l = 0;
-
-       DBG(" -> unflatten_device_tree()\n");
-
-       /* First pass, scan for size */
-       start = ((unsigned long)initial_boot_params) +
-               initial_boot_params->off_dt_struct;
-       size = unflatten_dt_node(0, &start, NULL, NULL, 0);
-       size = (size | 3) + 1;
-
-       DBG("  size is %lx, allocating...\n", size);
-
-       /* Allocate memory for the expanded device tree */
-       mem = lmb_alloc(size + 4, __alignof__(struct device_node));
-       if (!mem) {
-               DBG("Couldn't allocate memory with lmb_alloc()!\n");
-               panic("Couldn't allocate memory with lmb_alloc()!\n");
-       }
-       mem = (unsigned long)abs_to_virt(mem);
-
-       ((u32 *)mem)[size / 4] = 0xdeadbeef;
-
-       DBG("  unflattening...\n", mem);
-
-       /* Second pass, do actual unflattening */
-       start = ((unsigned long)initial_boot_params) +
-               initial_boot_params->off_dt_struct;
-       unflatten_dt_node(mem, &start, NULL, &allnextp, 0);
-       if (*((u32 *)start) != OF_DT_END)
-               printk(KERN_WARNING "Weird tag at end of tree: %08x\n", *((u32 *)start));
-       if (((u32 *)mem)[size / 4] != 0xdeadbeef)
-               printk(KERN_WARNING "End of tree marker overwritten: %08x\n",
-                      ((u32 *)mem)[size / 4] );
-       *allnextp = NULL;
-
-       /* Get pointer to OF "/chosen" node for use everywhere */
-       of_chosen = of_find_node_by_path("/chosen");
-
-       /* Retreive command line */
-       if (of_chosen != NULL) {
-               p = (char *)get_property(of_chosen, "bootargs", &l);
-               if (p != NULL && l > 0)
-                       strlcpy(cmd_line, p, min(l, COMMAND_LINE_SIZE));
-       }
-#ifdef CONFIG_CMDLINE
-       if (l == 0 || (l == 1 && (*p) == 0))
-               strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
-#endif /* CONFIG_CMDLINE */
-
-       DBG("Command line is: %s\n", cmd_line);
-
-       DBG(" <- unflatten_device_tree()\n");
-}
-
-
-static int __init early_init_dt_scan_cpus(unsigned long node,
-                                         const char *uname, int depth, void *data)
-{
-       char *type = of_get_flat_dt_prop(node, "device_type", NULL);
-       u32 *prop;
-       unsigned long size;
-
-       /* We are scanning "cpu" nodes only */
-       if (type == NULL || strcmp(type, "cpu") != 0)
-               return 0;
-
-       if (initial_boot_params && initial_boot_params->version >= 2) {
-               /* version 2 of the kexec param format adds the phys cpuid
-                * of booted proc.
-                */
-               boot_cpuid_phys = initial_boot_params->boot_cpuid_phys;
-               boot_cpuid = 0;
-       } else {
-               /* Check if it's the boot-cpu, set it's hw index in paca now */
-               if (of_get_flat_dt_prop(node, "linux,boot-cpu", NULL)
-                   != NULL) {
-                       u32 *prop = of_get_flat_dt_prop(node, "reg", NULL);
-                       set_hard_smp_processor_id(0, prop == NULL ? 0 : *prop);
-                       boot_cpuid_phys = get_hard_smp_processor_id(0);
-               }
-       }
-
-#ifdef CONFIG_ALTIVEC
-       /* Check if we have a VMX and eventually update CPU features */
-       prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", NULL);
-       if (prop && (*prop) > 0) {
-               cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
-               cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
-       }
-
-       /* Same goes for Apple's "altivec" property */
-       prop = (u32 *)of_get_flat_dt_prop(node, "altivec", NULL);
-       if (prop) {
-               cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
-               cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
-       }
-#endif /* CONFIG_ALTIVEC */
-
-       /*
-        * Check for an SMT capable CPU and set the CPU feature. We do
-        * this by looking at the size of the ibm,ppc-interrupt-server#s
-        * property
-        */
-       prop = (u32 *)of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s",
-                                      &size);
-       cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
-       if (prop && ((size / sizeof(u32)) > 1))
-               cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
-
-       return 0;
-}
-
-static int __init early_init_dt_scan_chosen(unsigned long node,
-                                           const char *uname, int depth, void *data)
-{
-       u32 *prop;
-       u64 *prop64;
-
-       DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
-
-       if (depth != 1 || strcmp(uname, "chosen") != 0)
-               return 0;
-
-       /* get platform type */
-       prop = (u32 *)of_get_flat_dt_prop(node, "linux,platform", NULL);
-       if (prop == NULL)
-               return 0;
-       _machine = *prop;
-
-       /* check if iommu is forced on or off */
-       if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
-               iommu_is_off = 1;
-       if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
-               iommu_force_on = 1;
-
-       prop64 = (u64*)of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
-       if (prop64)
-               memory_limit = *prop64;
-
-       prop64 = (u64*)of_get_flat_dt_prop(node, "linux,tce-alloc-start",NULL);
-       if (prop64)
-               tce_alloc_start = *prop64;
-
-       prop64 = (u64*)of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
-       if (prop64)
-               tce_alloc_end = *prop64;
-
-#ifdef CONFIG_PPC_RTAS
-       /* To help early debugging via the front panel, we retreive a minimal
-        * set of RTAS infos now if available
-        */
-       {
-               u64 *basep, *entryp;
-
-               basep = (u64*)of_get_flat_dt_prop(node,
-                                                 "linux,rtas-base", NULL);
-               entryp = (u64*)of_get_flat_dt_prop(node,
-                                                  "linux,rtas-entry", NULL);
-               prop = (u32*)of_get_flat_dt_prop(node,
-                                                "linux,rtas-size", NULL);
-               if (basep && entryp && prop) {
-                       rtas.base = *basep;
-                       rtas.entry = *entryp;
-                       rtas.size = *prop;
-               }
-       }
-#endif /* CONFIG_PPC_RTAS */
-
-       /* break now */
-       return 1;
-}
-
-static int __init early_init_dt_scan_root(unsigned long node,
-                                         const char *uname, int depth, void *data)
-{
-       u32 *prop;
-
-       if (depth != 0)
-               return 0;
-
-       prop = (u32 *)of_get_flat_dt_prop(node, "#size-cells", NULL);
-       dt_root_size_cells = (prop == NULL) ? 1 : *prop;
-       DBG("dt_root_size_cells = %x\n", dt_root_size_cells);
-
-       prop = (u32 *)of_get_flat_dt_prop(node, "#address-cells", NULL);
-       dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
-       DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells);
-       
-       /* break now */
-       return 1;
-}
-
-static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp)
-{
-       cell_t *p = *cellp;
-       unsigned long r = 0;
-
-       /* Ignore more than 2 cells */
-       while (s > 2) {
-               p++;
-               s--;
-       }
-       while (s) {
-               r <<= 32;
-               r |= *(p++);
-               s--;
-       }
-
-       *cellp = p;
-       return r;
-}
-
-
-static int __init early_init_dt_scan_memory(unsigned long node,
-                                           const char *uname, int depth, void *data)
-{
-       char *type = of_get_flat_dt_prop(node, "device_type", NULL);
-       cell_t *reg, *endp;
-       unsigned long l;
-
-       /* We are scanning "memory" nodes only */
-       if (type == NULL || strcmp(type, "memory") != 0)
-               return 0;
-
-       reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l);
-       if (reg == NULL)
-               return 0;
-
-       endp = reg + (l / sizeof(cell_t));
-
-       DBG("memory scan node %s ..., reg size %ld, data: %x %x %x %x, ...\n",
-           uname, l, reg[0], reg[1], reg[2], reg[3]);
-
-       while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
-               unsigned long base, size;
-
-               base = dt_mem_next_cell(dt_root_addr_cells, &reg);
-               size = dt_mem_next_cell(dt_root_size_cells, &reg);
-
-               if (size == 0)
-                       continue;
-               DBG(" - %lx ,  %lx\n", base, size);
-               if (iommu_is_off) {
-                       if (base >= 0x80000000ul)
-                               continue;
-                       if ((base + size) > 0x80000000ul)
-                               size = 0x80000000ul - base;
-               }
-               lmb_add(base, size);
-       }
-       return 0;
-}
-
-static void __init early_reserve_mem(void)
-{
-       u64 base, size;
-       u64 *reserve_map = (u64 *)(((unsigned long)initial_boot_params) +
-                                  initial_boot_params->off_mem_rsvmap);
-       while (1) {
-               base = *(reserve_map++);
-               size = *(reserve_map++);
-               if (size == 0)
-                       break;
-               DBG("reserving: %lx -> %lx\n", base, size);
-               lmb_reserve(base, size);
-       }
-
-#if 0
-       DBG("memory reserved, lmbs :\n");
-       lmb_dump_all();
-#endif
-}
-
-void __init early_init_devtree(void *params)
-{
-       DBG(" -> early_init_devtree()\n");
-
-       /* Setup flat device-tree pointer */
-       initial_boot_params = params;
-
-       /* Retreive various informations from the /chosen node of the
-        * device-tree, including the platform type, initrd location and
-        * size, TCE reserve, and more ...
-        */
-       of_scan_flat_dt(early_init_dt_scan_chosen, NULL);
-
-       /* Scan memory nodes and rebuild LMBs */
-       lmb_init();
-       of_scan_flat_dt(early_init_dt_scan_root, NULL);
-       of_scan_flat_dt(early_init_dt_scan_memory, NULL);
-       lmb_enforce_memory_limit(memory_limit);
-       lmb_analyze();
-       lmb_reserve(0, __pa(klimit));
-
-       /* Reserve LMB regions used by kernel, initrd, dt, etc... */
-       early_reserve_mem();
-
-       DBG("Scanning CPUs ...\n");
-
-       /* Retreive hash table size from flattened tree plus other
-        * CPU related informations (altivec support, boot CPU ID, ...)
-        */
-       of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
-
-       DBG(" <- early_init_devtree()\n");
-}
-
-#undef printk
-
-int
-prom_n_addr_cells(struct device_node* np)
-{
-       int* ip;
-       do {
-               if (np->parent)
-                       np = np->parent;
-               ip = (int *) get_property(np, "#address-cells", NULL);
-               if (ip != NULL)
-                       return *ip;
-       } while (np->parent);
-       /* No #address-cells property for the root node, default to 1 */
-       return 1;
-}
-EXPORT_SYMBOL_GPL(prom_n_addr_cells);
-
-int
-prom_n_size_cells(struct device_node* np)
-{
-       int* ip;
-       do {
-               if (np->parent)
-                       np = np->parent;
-               ip = (int *) get_property(np, "#size-cells", NULL);
-               if (ip != NULL)
-                       return *ip;
-       } while (np->parent);
-       /* No #size-cells property for the root node, default to 1 */
-       return 1;
-}
-EXPORT_SYMBOL_GPL(prom_n_size_cells);
-
-/**
- * Work out the sense (active-low level / active-high edge)
- * of each interrupt from the device tree.
- */
-void __init prom_get_irq_senses(unsigned char *senses, int off, int max)
-{
-       struct device_node *np;
-       int i, j;
-
-       /* default to level-triggered */
-       memset(senses, 1, max - off);
-
-       for (np = allnodes; np != 0; np = np->allnext) {
-               for (j = 0; j < np->n_intrs; j++) {
-                       i = np->intrs[j].line;
-                       if (i >= off && i < max)
-                               senses[i-off] = np->intrs[j].sense ?
-                                       IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE :
-                                       IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE;
-               }
-       }
-}
-
-/**
- * Construct and return a list of the device_nodes with a given name.
- */
-struct device_node *
-find_devices(const char *name)
-{
-       struct device_node *head, **prevp, *np;
-
-       prevp = &head;
-       for (np = allnodes; np != 0; np = np->allnext) {
-               if (np->name != 0 && strcasecmp(np->name, name) == 0) {
-                       *prevp = np;
-                       prevp = &np->next;
-               }
-       }
-       *prevp = NULL;
-       return head;
-}
-EXPORT_SYMBOL(find_devices);
-
-/**
- * Construct and return a list of the device_nodes with a given type.
- */
-struct device_node *
-find_type_devices(const char *type)
-{
-       struct device_node *head, **prevp, *np;
-
-       prevp = &head;
-       for (np = allnodes; np != 0; np = np->allnext) {
-               if (np->type != 0 && strcasecmp(np->type, type) == 0) {
-                       *prevp = np;
-                       prevp = &np->next;
-               }
-       }
-       *prevp = NULL;
-       return head;
-}
-EXPORT_SYMBOL(find_type_devices);
-
-/**
- * Returns all nodes linked together
- */
-struct device_node *
-find_all_nodes(void)
-{
-       struct device_node *head, **prevp, *np;
-
-       prevp = &head;
-       for (np = allnodes; np != 0; np = np->allnext) {
-               *prevp = np;
-               prevp = &np->next;
-       }
-       *prevp = NULL;
-       return head;
-}
-EXPORT_SYMBOL(find_all_nodes);
-
-/** Checks if the given "compat" string matches one of the strings in
- * the device's "compatible" property
- */
-int
-device_is_compatible(struct device_node *device, const char *compat)
-{
-       const char* cp;
-       int cplen, l;
-
-       cp = (char *) get_property(device, "compatible", &cplen);
-       if (cp == NULL)
-               return 0;
-       while (cplen > 0) {
-               if (strncasecmp(cp, compat, strlen(compat)) == 0)
-                       return 1;
-               l = strlen(cp) + 1;
-               cp += l;
-               cplen -= l;
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL(device_is_compatible);
-
-
-/**
- * Indicates whether the root node has a given value in its
- * compatible property.
- */
-int
-machine_is_compatible(const char *compat)
-{
-       struct device_node *root;
-       int rc = 0;
-
-       root = of_find_node_by_path("/");
-       if (root) {
-               rc = device_is_compatible(root, compat);
-               of_node_put(root);
-       }
-       return rc;
-}
-EXPORT_SYMBOL(machine_is_compatible);
-
-/**
- * Construct and return a list of the device_nodes with a given type
- * and compatible property.
- */
-struct device_node *
-find_compatible_devices(const char *type, const char *compat)
-{
-       struct device_node *head, **prevp, *np;
-
-       prevp = &head;
-       for (np = allnodes; np != 0; np = np->allnext) {
-               if (type != NULL
-                   && !(np->type != 0 && strcasecmp(np->type, type) == 0))
-                       continue;
-               if (device_is_compatible(np, compat)) {
-                       *prevp = np;
-                       prevp = &np->next;
-               }
-       }
-       *prevp = NULL;
-       return head;
-}
-EXPORT_SYMBOL(find_compatible_devices);
-
-/**
- * Find the device_node with a given full_name.
- */
-struct device_node *
-find_path_device(const char *path)
-{
-       struct device_node *np;
-
-       for (np = allnodes; np != 0; np = np->allnext)
-               if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0)
-                       return np;
-       return NULL;
-}
-EXPORT_SYMBOL(find_path_device);
-
-/*******
- *
- * New implementation of the OF "find" APIs, return a refcounted
- * object, call of_node_put() when done.  The device tree and list
- * are protected by a rw_lock.
- *
- * Note that property management will need some locking as well,
- * this isn't dealt with yet.
- *
- *******/
-
-/**
- *     of_find_node_by_name - Find a node by its "name" property
- *     @from:  The node to start searching from or NULL, the node
- *             you pass will not be searched, only the next one
- *             will; typically, you pass what the previous call
- *             returned. of_node_put() will be called on it
- *     @name:  The name string to match against
- *
- *     Returns a node pointer with refcount incremented, use
- *     of_node_put() on it when done.
- */
-struct device_node *of_find_node_by_name(struct device_node *from,
-       const char *name)
-{
-       struct device_node *np;
-
-       read_lock(&devtree_lock);
-       np = from ? from->allnext : allnodes;
-       for (; np != 0; np = np->allnext)
-               if (np->name != 0 && strcasecmp(np->name, name) == 0
-                   && of_node_get(np))
-                       break;
-       if (from)
-               of_node_put(from);
-       read_unlock(&devtree_lock);
-       return np;
-}
-EXPORT_SYMBOL(of_find_node_by_name);
-
-/**
- *     of_find_node_by_type - Find a node by its "device_type" property
- *     @from:  The node to start searching from or NULL, the node
- *             you pass will not be searched, only the next one
- *             will; typically, you pass what the previous call
- *             returned. of_node_put() will be called on it
- *     @name:  The type string to match against
- *
- *     Returns a node pointer with refcount incremented, use
- *     of_node_put() on it when done.
- */
-struct device_node *of_find_node_by_type(struct device_node *from,
-       const char *type)
-{
-       struct device_node *np;
-
-       read_lock(&devtree_lock);
-       np = from ? from->allnext : allnodes;
-       for (; np != 0; np = np->allnext)
-               if (np->type != 0 && strcasecmp(np->type, type) == 0
-                   && of_node_get(np))
-                       break;
-       if (from)
-               of_node_put(from);
-       read_unlock(&devtree_lock);
-       return np;
-}
-EXPORT_SYMBOL(of_find_node_by_type);
-
-/**
- *     of_find_compatible_node - Find a node based on type and one of the
- *                                tokens in its "compatible" property
- *     @from:          The node to start searching from or NULL, the node
- *                     you pass will not be searched, only the next one
- *                     will; typically, you pass what the previous call
- *                     returned. of_node_put() will be called on it
- *     @type:          The type string to match "device_type" or NULL to ignore
- *     @compatible:    The string to match to one of the tokens in the device
- *                     "compatible" list.
- *
- *     Returns a node pointer with refcount incremented, use
- *     of_node_put() on it when done.
- */
-struct device_node *of_find_compatible_node(struct device_node *from,
-       const char *type, const char *compatible)
-{
-       struct device_node *np;
-
-       read_lock(&devtree_lock);
-       np = from ? from->allnext : allnodes;
-       for (; np != 0; np = np->allnext) {
-               if (type != NULL
-                   && !(np->type != 0 && strcasecmp(np->type, type) == 0))
-                       continue;
-               if (device_is_compatible(np, compatible) && of_node_get(np))
-                       break;
-       }
-       if (from)
-               of_node_put(from);
-       read_unlock(&devtree_lock);
-       return np;
-}
-EXPORT_SYMBOL(of_find_compatible_node);
-
-/**
- *     of_find_node_by_path - Find a node matching a full OF path
- *     @path:  The full path to match
- *
- *     Returns a node pointer with refcount incremented, use
- *     of_node_put() on it when done.
- */
-struct device_node *of_find_node_by_path(const char *path)
-{
-       struct device_node *np = allnodes;
-
-       read_lock(&devtree_lock);
-       for (; np != 0; np = np->allnext) {
-               if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0
-                   && of_node_get(np))
-                       break;
-       }
-       read_unlock(&devtree_lock);
-       return np;
-}
-EXPORT_SYMBOL(of_find_node_by_path);
-
-/**
- *     of_find_node_by_phandle - Find a node given a phandle
- *     @handle:        phandle of the node to find
- *
- *     Returns a node pointer with refcount incremented, use
- *     of_node_put() on it when done.
- */
-struct device_node *of_find_node_by_phandle(phandle handle)
-{
-       struct device_node *np;
-
-       read_lock(&devtree_lock);
-       for (np = allnodes; np != 0; np = np->allnext)
-               if (np->linux_phandle == handle)
-                       break;
-       if (np)
-               of_node_get(np);
-       read_unlock(&devtree_lock);
-       return np;
-}
-EXPORT_SYMBOL(of_find_node_by_phandle);
-
-/**
- *     of_find_all_nodes - Get next node in global list
- *     @prev:  Previous node or NULL to start iteration
- *             of_node_put() will be called on it
- *
- *     Returns a node pointer with refcount incremented, use
- *     of_node_put() on it when done.
- */
-struct device_node *of_find_all_nodes(struct device_node *prev)
-{
-       struct device_node *np;
-
-       read_lock(&devtree_lock);
-       np = prev ? prev->allnext : allnodes;
-       for (; np != 0; np = np->allnext)
-               if (of_node_get(np))
-                       break;
-       if (prev)
-               of_node_put(prev);
-       read_unlock(&devtree_lock);
-       return np;
-}
-EXPORT_SYMBOL(of_find_all_nodes);
-
-/**
- *     of_get_parent - Get a node's parent if any
- *     @node:  Node to get parent
- *
- *     Returns a node pointer with refcount incremented, use
- *     of_node_put() on it when done.
- */
-struct device_node *of_get_parent(const struct device_node *node)
-{
-       struct device_node *np;
-
-       if (!node)
-               return NULL;
-
-       read_lock(&devtree_lock);
-       np = of_node_get(node->parent);
-       read_unlock(&devtree_lock);
-       return np;
-}
-EXPORT_SYMBOL(of_get_parent);
-
-/**
- *     of_get_next_child - Iterate a node childs
- *     @node:  parent node
- *     @prev:  previous child of the parent node, or NULL to get first
- *
- *     Returns a node pointer with refcount incremented, use
- *     of_node_put() on it when done.
- */
-struct device_node *of_get_next_child(const struct device_node *node,
-       struct device_node *prev)
-{
-       struct device_node *next;
-
-       read_lock(&devtree_lock);
-       next = prev ? prev->sibling : node->child;
-       for (; next != 0; next = next->sibling)
-               if (of_node_get(next))
-                       break;
-       if (prev)
-               of_node_put(prev);
-       read_unlock(&devtree_lock);
-       return next;
-}
-EXPORT_SYMBOL(of_get_next_child);
-
-/**
- *     of_node_get - Increment refcount of a node
- *     @node:  Node to inc refcount, NULL is supported to
- *             simplify writing of callers
- *
- *     Returns node.
- */
-struct device_node *of_node_get(struct device_node *node)
-{
-       if (node)
-               kref_get(&node->kref);
-       return node;
-}
-EXPORT_SYMBOL(of_node_get);
-
-static inline struct device_node * kref_to_device_node(struct kref *kref)
-{
-       return container_of(kref, struct device_node, kref);
-}
-
-/**
- *     of_node_release - release a dynamically allocated node
- *     @kref:  kref element of the node to be released
- *
- *     In of_node_put() this function is passed to kref_put()
- *     as the destructor.
- */
-static void of_node_release(struct kref *kref)
-{
-       struct device_node *node = kref_to_device_node(kref);
-       struct property *prop = node->properties;
-
-       if (!OF_IS_DYNAMIC(node))
-               return;
-       while (prop) {
-               struct property *next = prop->next;
-               kfree(prop->name);
-               kfree(prop->value);
-               kfree(prop);
-               prop = next;
-       }
-       kfree(node->intrs);
-       kfree(node->addrs);
-       kfree(node->full_name);
-       kfree(node->data);
-       kfree(node);
-}
-
-/**
- *     of_node_put - Decrement refcount of a node
- *     @node:  Node to dec refcount, NULL is supported to
- *             simplify writing of callers
- *
- */
-void of_node_put(struct device_node *node)
-{
-       if (node)
-               kref_put(&node->kref, of_node_release);
-}
-EXPORT_SYMBOL(of_node_put);
-
-/*
- * Fix up the uninitialized fields in a new device node:
- * name, type, n_addrs, addrs, n_intrs, intrs, and pci-specific fields
- *
- * A lot of boot-time code is duplicated here, because functions such
- * as finish_node_interrupts, interpret_pci_props, etc. cannot use the
- * slab allocator.
- *
- * This should probably be split up into smaller chunks.
- */
-
-static int of_finish_dynamic_node(struct device_node *node,
-                                 unsigned long *unused1, int unused2,
-                                 int unused3, int unused4)
-{
-       struct device_node *parent = of_get_parent(node);
-       int err = 0;
-       phandle *ibm_phandle;
-
-       node->name = get_property(node, "name", NULL);
-       node->type = get_property(node, "device_type", NULL);
-
-       if (!parent) {
-               err = -ENODEV;
-               goto out;
-       }
-
-       /* We don't support that function on PowerMac, at least
-        * not yet
-        */
-       if (_machine == PLATFORM_POWERMAC)
-               return -ENODEV;
-
-       /* fix up new node's linux_phandle field */
-       if ((ibm_phandle = (unsigned int *)get_property(node, "ibm,phandle", NULL)))
-               node->linux_phandle = *ibm_phandle;
-
-out:
-       of_node_put(parent);
-       return err;
-}
-
-/*
- * Plug a device node into the tree and global list.
- */
-void of_attach_node(struct device_node *np)
-{
-       write_lock(&devtree_lock);
-       np->sibling = np->parent->child;
-       np->allnext = allnodes;
-       np->parent->child = np;
-       allnodes = np;
-       write_unlock(&devtree_lock);
-}
-
-/*
- * "Unplug" a node from the device tree.  The caller must hold
- * a reference to the node.  The memory associated with the node
- * is not freed until its refcount goes to zero.
- */
-void of_detach_node(const struct device_node *np)
-{
-       struct device_node *parent;
-
-       write_lock(&devtree_lock);
-
-       parent = np->parent;
-
-       if (allnodes == np)
-               allnodes = np->allnext;
-       else {
-               struct device_node *prev;
-               for (prev = allnodes;
-                    prev->allnext != np;
-                    prev = prev->allnext)
-                       ;
-               prev->allnext = np->allnext;
-       }
-
-       if (parent->child == np)
-               parent->child = np->sibling;
-       else {
-               struct device_node *prevsib;
-               for (prevsib = np->parent->child;
-                    prevsib->sibling != np;
-                    prevsib = prevsib->sibling)
-                       ;
-               prevsib->sibling = np->sibling;
-       }
-
-       write_unlock(&devtree_lock);
-}
-
-static int prom_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *node)
-{
-       int err;
-
-       switch (action) {
-       case PSERIES_RECONFIG_ADD:
-               err = finish_node(node, NULL, of_finish_dynamic_node, 0, 0, 0);
-               if (err < 0) {
-                       printk(KERN_ERR "finish_node returned %d\n", err);
-                       err = NOTIFY_BAD;
-               }
-               break;
-       default:
-               err = NOTIFY_DONE;
-               break;
-       }
-       return err;
-}
-
-static struct notifier_block prom_reconfig_nb = {
-       .notifier_call = prom_reconfig_notifier,
-       .priority = 10, /* This one needs to run first */
-};
-
-static int __init prom_reconfig_setup(void)
-{
-       return pSeries_reconfig_notifier_register(&prom_reconfig_nb);
-}
-__initcall(prom_reconfig_setup);
-
-/*
- * Find a property with a given name for a given node
- * and return the value.
- */
-unsigned char *
-get_property(struct device_node *np, const char *name, int *lenp)
-{
-       struct property *pp;
-
-       for (pp = np->properties; pp != 0; pp = pp->next)
-               if (strcmp(pp->name, name) == 0) {
-                       if (lenp != 0)
-                               *lenp = pp->length;
-                       return pp->value;
-               }
-       return NULL;
-}
-EXPORT_SYMBOL(get_property);
-
-/*
- * Add a property to a node.
- */
-int
-prom_add_property(struct device_node* np, struct property* prop)
-{
-       struct property **next;
-
-       prop->next = NULL;      
-       write_lock(&devtree_lock);
-       next = &np->properties;
-       while (*next) {
-               if (strcmp(prop->name, (*next)->name) == 0) {
-                       /* duplicate ! don't insert it */
-                       write_unlock(&devtree_lock);
-                       return -1;
-               }
-               next = &(*next)->next;
-       }
-       *next = prop;
-       write_unlock(&devtree_lock);
-
-       /* try to add to proc as well if it was initialized */
-       if (np->pde)
-               proc_device_tree_add_prop(np->pde, prop);
-
-       return 0;
-}
-
-#if 0
-void
-print_properties(struct device_node *np)
-{
-       struct property *pp;
-       char *cp;
-       int i, n;
-
-       for (pp = np->properties; pp != 0; pp = pp->next) {
-               printk(KERN_INFO "%s", pp->name);
-               for (i = strlen(pp->name); i < 16; ++i)
-                       printk(" ");
-               cp = (char *) pp->value;
-               for (i = pp->length; i > 0; --i, ++cp)
-                       if ((i > 1 && (*cp < 0x20 || *cp > 0x7e))
-                           || (i == 1 && *cp != 0))
-                               break;
-               if (i == 0 && pp->length > 1) {
-                       /* looks like a string */
-                       printk(" %s\n", (char *) pp->value);
-               } else {
-                       /* dump it in hex */
-                       n = pp->length;
-                       if (n > 64)
-                               n = 64;
-                       if (pp->length % 4 == 0) {
-                               unsigned int *p = (unsigned int *) pp->value;
-
-                               n /= 4;
-                               for (i = 0; i < n; ++i) {
-                                       if (i != 0 && (i % 4) == 0)
-                                               printk("\n                ");
-                                       printk(" %08x", *p++);
-                               }
-                       } else {
-                               unsigned char *bp = pp->value;
-
-                               for (i = 0; i < n; ++i) {
-                                       if (i != 0 && (i % 16) == 0)
-                                               printk("\n                ");
-                                       printk(" %02x", *bp++);
-                               }
-                       }
-                       printk("\n");
-                       if (pp->length > 64)
-                               printk("                 ... (length = %d)\n",
-                                      pp->length);
-               }
-       }
-}
-#endif
-
-
-
-
-
-
-
-
-
-
diff --git a/arch/ppc64/kernel/prom_init.c b/arch/ppc64/kernel/prom_init.c
deleted file mode 100644 (file)
index 6375f40..0000000
+++ /dev/null
@@ -1,2051 +0,0 @@
-/*
- * 
- *
- * Procedures for interfacing to Open Firmware.
- *
- * Paul Mackerras      August 1996.
- * Copyright (C) 1996 Paul Mackerras.
- * 
- *  Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
- *    {engebret|bergner}@us.ibm.com 
- *
- *      This program is free software; you can redistribute it and/or
- *      modify it under the terms of the GNU General Public License
- *      as published by the Free Software Foundation; either version
- *      2 of the License, or (at your option) any later version.
- */
-
-#undef DEBUG_PROM
-
-#include <stdarg.h>
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/threads.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/proc_fs.h>
-#include <linux/stringify.h>
-#include <linux/delay.h>
-#include <linux/initrd.h>
-#include <linux/bitops.h>
-#include <asm/prom.h>
-#include <asm/rtas.h>
-#include <asm/abs_addr.h>
-#include <asm/page.h>
-#include <asm/processor.h>
-#include <asm/irq.h>
-#include <asm/io.h>
-#include <asm/smp.h>
-#include <asm/system.h>
-#include <asm/mmu.h>
-#include <asm/pgtable.h>
-#include <asm/pci.h>
-#include <asm/iommu.h>
-#include <asm/btext.h>
-#include <asm/sections.h>
-#include <asm/machdep.h>
-
-#ifdef CONFIG_LOGO_LINUX_CLUT224
-#include <linux/linux_logo.h>
-extern const struct linux_logo logo_linux_clut224;
-#endif
-
-/*
- * Properties whose value is longer than this get excluded from our
- * copy of the device tree. This value does need to be big enough to
- * ensure that we don't lose things like the interrupt-map property
- * on a PCI-PCI bridge.
- */
-#define MAX_PROPERTY_LENGTH    (1UL * 1024 * 1024)
-
-/*
- * Eventually bump that one up
- */
-#define DEVTREE_CHUNK_SIZE     0x100000
-
-/*
- * This is the size of the local memory reserve map that gets copied
- * into the boot params passed to the kernel. That size is totally
- * flexible as the kernel just reads the list until it encounters an
- * entry with size 0, so it can be changed without breaking binary
- * compatibility
- */
-#define MEM_RESERVE_MAP_SIZE   8
-
-/*
- * prom_init() is called very early on, before the kernel text
- * and data have been mapped to KERNELBASE.  At this point the code
- * is running at whatever address it has been loaded at, so
- * references to extern and static variables must be relocated
- * explicitly.  The procedure reloc_offset() returns the address
- * we're currently running at minus the address we were linked at.
- * (Note that strings count as static variables.)
- *
- * Because OF may have mapped I/O devices into the area starting at
- * KERNELBASE, particularly on CHRP machines, we can't safely call
- * OF once the kernel has been mapped to KERNELBASE.  Therefore all
- * OF calls should be done within prom_init(), and prom_init()
- * and all routines called within it must be careful to relocate
- * references as necessary.
- *
- * Note that the bss is cleared *after* prom_init runs, so we have
- * to make sure that any static or extern variables it accesses
- * are put in the data segment.
- */
-
-
-#define PROM_BUG() do {                                                \
-        prom_printf("kernel BUG at %s line 0x%x!\n",           \
-                   RELOC(__FILE__), __LINE__);                 \
-        __asm__ __volatile__(".long " BUG_ILLEGAL_INSTR);      \
-} while (0)
-
-#ifdef DEBUG_PROM
-#define prom_debug(x...)       prom_printf(x)
-#else
-#define prom_debug(x...)
-#endif
-
-
-typedef u32 prom_arg_t;
-
-struct prom_args {
-        u32 service;
-        u32 nargs;
-        u32 nret;
-        prom_arg_t args[10];
-        prom_arg_t *rets;     /* Pointer to return values in args[16]. */
-};
-
-struct prom_t {
-       unsigned long entry;
-       ihandle root;
-       ihandle chosen;
-       int cpu;
-       ihandle stdout;
-       ihandle disp_node;
-       struct prom_args args;
-       unsigned long version;
-       unsigned long root_size_cells;
-       unsigned long root_addr_cells;
-};
-
-struct pci_reg_property {
-       struct pci_address addr;
-       u32 size_hi;
-       u32 size_lo;
-};
-
-struct mem_map_entry {
-       u64     base;
-       u64     size;
-};
-
-typedef u32 cell_t;
-
-extern void __start(unsigned long r3, unsigned long r4, unsigned long r5);
-
-extern void enter_prom(struct prom_args *args, unsigned long entry);
-extern void copy_and_flush(unsigned long dest, unsigned long src,
-                          unsigned long size, unsigned long offset);
-
-extern unsigned long klimit;
-
-/* prom structure */
-static struct prom_t __initdata prom;
-
-#define PROM_SCRATCH_SIZE 256
-
-static char __initdata of_stdout_device[256];
-static char __initdata prom_scratch[PROM_SCRATCH_SIZE];
-
-static unsigned long __initdata dt_header_start;
-static unsigned long __initdata dt_struct_start, dt_struct_end;
-static unsigned long __initdata dt_string_start, dt_string_end;
-
-static unsigned long __initdata prom_initrd_start, prom_initrd_end;
-
-static int __initdata iommu_force_on;
-static int __initdata ppc64_iommu_off;
-static int __initdata of_platform;
-
-static char __initdata prom_cmd_line[COMMAND_LINE_SIZE];
-
-static unsigned long __initdata prom_memory_limit;
-static unsigned long __initdata prom_tce_alloc_start;
-static unsigned long __initdata prom_tce_alloc_end;
-
-static unsigned long __initdata alloc_top;
-static unsigned long __initdata alloc_top_high;
-static unsigned long __initdata alloc_bottom;
-static unsigned long __initdata rmo_top;
-static unsigned long __initdata ram_top;
-
-static struct mem_map_entry __initdata mem_reserve_map[MEM_RESERVE_MAP_SIZE];
-static int __initdata mem_reserve_cnt;
-
-static cell_t __initdata regbuf[1024];
-
-
-#define MAX_CPU_THREADS 2
-
-/* TO GO */
-#ifdef CONFIG_HMT
-struct {
-       unsigned int pir;
-       unsigned int threadid;
-} hmt_thread_data[NR_CPUS];
-#endif /* CONFIG_HMT */
-
-/*
- * This are used in calls to call_prom.  The 4th and following
- * arguments to call_prom should be 32-bit values.  64 bit values
- * are truncated to 32 bits (and fortunately don't get interpreted
- * as two arguments).
- */
-#define ADDR(x)                (u32) ((unsigned long)(x) - offset)
-
-/*
- * Error results ... some OF calls will return "-1" on error, some
- * will return 0, some will return either. To simplify, here are
- * macros to use with any ihandle or phandle return value to check if
- * it is valid
- */
-
-#define PROM_ERROR             (-1u)
-#define PHANDLE_VALID(p)       ((p) != 0 && (p) != PROM_ERROR)
-#define IHANDLE_VALID(i)       ((i) != 0 && (i) != PROM_ERROR)
-
-
-/* This is the one and *ONLY* place where we actually call open
- * firmware from, since we need to make sure we're running in 32b
- * mode when we do.  We switch back to 64b mode upon return.
- */
-
-static int __init call_prom(const char *service, int nargs, int nret, ...)
-{
-       int i;
-       unsigned long offset = reloc_offset();
-       struct prom_t *_prom = PTRRELOC(&prom);
-       va_list list;
-
-       _prom->args.service = ADDR(service);
-       _prom->args.nargs = nargs;
-       _prom->args.nret = nret;
-       _prom->args.rets = (prom_arg_t *)&(_prom->args.args[nargs]);
-
-       va_start(list, nret);
-       for (i=0; i < nargs; i++)
-               _prom->args.args[i] = va_arg(list, prom_arg_t);
-       va_end(list);
-
-       for (i=0; i < nret ;i++)
-               _prom->args.rets[i] = 0;
-
-       enter_prom(&_prom->args, _prom->entry);
-
-       return (nret > 0) ? _prom->args.rets[0] : 0;
-}
-
-
-static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
-                               unsigned long align)
-{
-       return (unsigned int)call_prom("claim", 3, 1,
-                                      (prom_arg_t)virt, (prom_arg_t)size,
-                                      (prom_arg_t)align);
-}
-
-static void __init prom_print(const char *msg)
-{
-       const char *p, *q;
-       unsigned long offset = reloc_offset();
-       struct prom_t *_prom = PTRRELOC(&prom);
-
-       if (_prom->stdout == 0)
-               return;
-
-       for (p = msg; *p != 0; p = q) {
-               for (q = p; *q != 0 && *q != '\n'; ++q)
-                       ;
-               if (q > p)
-                       call_prom("write", 3, 1, _prom->stdout, p, q - p);
-               if (*q == 0)
-                       break;
-               ++q;
-               call_prom("write", 3, 1, _prom->stdout, ADDR("\r\n"), 2);
-       }
-}
-
-
-static void __init prom_print_hex(unsigned long val)
-{
-       unsigned long offset = reloc_offset();
-       int i, nibbles = sizeof(val)*2;
-       char buf[sizeof(val)*2+1];
-       struct prom_t *_prom = PTRRELOC(&prom);
-
-       for (i = nibbles-1;  i >= 0;  i--) {
-               buf[i] = (val & 0xf) + '0';
-               if (buf[i] > '9')
-                       buf[i] += ('a'-'0'-10);
-               val >>= 4;
-       }
-       buf[nibbles] = '\0';
-       call_prom("write", 3, 1, _prom->stdout, buf, nibbles);
-}
-
-
-static void __init prom_printf(const char *format, ...)
-{
-       unsigned long offset = reloc_offset();
-       const char *p, *q, *s;
-       va_list args;
-       unsigned long v;
-       struct prom_t *_prom = PTRRELOC(&prom);
-
-       va_start(args, format);
-       for (p = PTRRELOC(format); *p != 0; p = q) {
-               for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
-                       ;
-               if (q > p)
-                       call_prom("write", 3, 1, _prom->stdout, p, q - p);
-               if (*q == 0)
-                       break;
-               if (*q == '\n') {
-                       ++q;
-                       call_prom("write", 3, 1, _prom->stdout,
-                                 ADDR("\r\n"), 2);
-                       continue;
-               }
-               ++q;
-               if (*q == 0)
-                       break;
-               switch (*q) {
-               case 's':
-                       ++q;
-                       s = va_arg(args, const char *);
-                       prom_print(s);
-                       break;
-               case 'x':
-                       ++q;
-                       v = va_arg(args, unsigned long);
-                       prom_print_hex(v);
-                       break;
-               }
-       }
-}
-
-
-static void __init __attribute__((noreturn)) prom_panic(const char *reason)
-{
-       unsigned long offset = reloc_offset();
-
-       prom_print(PTRRELOC(reason));
-       /* ToDo: should put up an SRC here */
-       call_prom("exit", 0, 0);
-
-       for (;;)                        /* should never get here */
-               ;
-}
-
-
-static int __init prom_next_node(phandle *nodep)
-{
-       phandle node;
-
-       if ((node = *nodep) != 0
-           && (*nodep = call_prom("child", 1, 1, node)) != 0)
-               return 1;
-       if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
-               return 1;
-       for (;;) {
-               if ((node = call_prom("parent", 1, 1, node)) == 0)
-                       return 0;
-               if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
-                       return 1;
-       }
-}
-
-static int __init prom_getprop(phandle node, const char *pname,
-                              void *value, size_t valuelen)
-{
-       unsigned long offset = reloc_offset();
-
-       return call_prom("getprop", 4, 1, node, ADDR(pname),
-                        (u32)(unsigned long) value, (u32) valuelen);
-}
-
-static int __init prom_getproplen(phandle node, const char *pname)
-{
-       unsigned long offset = reloc_offset();
-
-       return call_prom("getproplen", 2, 1, node, ADDR(pname));
-}
-
-static int __init prom_setprop(phandle node, const char *pname,
-                              void *value, size_t valuelen)
-{
-       unsigned long offset = reloc_offset();
-
-       return call_prom("setprop", 4, 1, node, ADDR(pname),
-                        (u32)(unsigned long) value, (u32) valuelen);
-}
-
-/* We can't use the standard versions because of RELOC headaches. */
-#define isxdigit(c)    (('0' <= (c) && (c) <= '9') \
-                        || ('a' <= (c) && (c) <= 'f') \
-                        || ('A' <= (c) && (c) <= 'F'))
-
-#define isdigit(c)     ('0' <= (c) && (c) <= '9')
-#define islower(c)     ('a' <= (c) && (c) <= 'z')
-#define toupper(c)     (islower(c) ? ((c) - 'a' + 'A') : (c))
-
-unsigned long prom_strtoul(const char *cp, const char **endp)
-{
-       unsigned long result = 0, base = 10, value;
-
-       if (*cp == '0') {
-               base = 8;
-               cp++;
-               if (toupper(*cp) == 'X') {
-                       cp++;
-                       base = 16;
-               }
-       }
-
-       while (isxdigit(*cp) &&
-              (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) {
-               result = result * base + value;
-               cp++;
-       }
-
-       if (endp)
-               *endp = cp;
-
-       return result;
-}
-
-unsigned long prom_memparse(const char *ptr, const char **retptr)
-{
-       unsigned long ret = prom_strtoul(ptr, retptr);
-       int shift = 0;
-
-       /*
-        * We can't use a switch here because GCC *may* generate a
-        * jump table which won't work, because we're not running at
-        * the address we're linked at.
-        */
-       if ('G' == **retptr || 'g' == **retptr)
-               shift = 30;
-
-       if ('M' == **retptr || 'm' == **retptr)
-               shift = 20;
-
-       if ('K' == **retptr || 'k' == **retptr)
-               shift = 10;
-
-       if (shift) {
-               ret <<= shift;
-               (*retptr)++;
-       }
-
-       return ret;
-}
-
-/*
- * Early parsing of the command line passed to the kernel, used for
- * "mem=x" and the options that affect the iommu
- */
-static void __init early_cmdline_parse(void)
-{
-       unsigned long offset = reloc_offset();
-       struct prom_t *_prom = PTRRELOC(&prom);
-       char *opt, *p;
-       int l = 0;
-
-       RELOC(prom_cmd_line[0]) = 0;
-       p = RELOC(prom_cmd_line);
-       if ((long)_prom->chosen > 0)
-               l = prom_getprop(_prom->chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
-#ifdef CONFIG_CMDLINE
-       if (l == 0) /* dbl check */
-               strlcpy(RELOC(prom_cmd_line),
-                       RELOC(CONFIG_CMDLINE), sizeof(prom_cmd_line));
-#endif /* CONFIG_CMDLINE */
-       prom_printf("command line: %s\n", RELOC(prom_cmd_line));
-
-       opt = strstr(RELOC(prom_cmd_line), RELOC("iommu="));
-       if (opt) {
-               prom_printf("iommu opt is: %s\n", opt);
-               opt += 6;
-               while (*opt && *opt == ' ')
-                       opt++;
-               if (!strncmp(opt, RELOC("off"), 3))
-                       RELOC(ppc64_iommu_off) = 1;
-               else if (!strncmp(opt, RELOC("force"), 5))
-                       RELOC(iommu_force_on) = 1;
-       }
-
-       opt = strstr(RELOC(prom_cmd_line), RELOC("mem="));
-       if (opt) {
-               opt += 4;
-               RELOC(prom_memory_limit) = prom_memparse(opt, (const char **)&opt);
-               /* Align to 16 MB == size of large page */
-               RELOC(prom_memory_limit) = ALIGN(RELOC(prom_memory_limit), 0x1000000);
-       }
-}
-
-/*
- * To tell the firmware what our capabilities are, we have to pass
- * it a fake 32-bit ELF header containing a couple of PT_NOTE sections
- * that contain structures that contain the actual values.
- */
-static struct fake_elf {
-       Elf32_Ehdr      elfhdr;
-       Elf32_Phdr      phdr[2];
-       struct chrpnote {
-               u32     namesz;
-               u32     descsz;
-               u32     type;
-               char    name[8];        /* "PowerPC" */
-               struct chrpdesc {
-                       u32     real_mode;
-                       u32     real_base;
-                       u32     real_size;
-                       u32     virt_base;
-                       u32     virt_size;
-                       u32     load_base;
-               } chrpdesc;
-       } chrpnote;
-       struct rpanote {
-               u32     namesz;
-               u32     descsz;
-               u32     type;
-               char    name[24];       /* "IBM,RPA-Client-Config" */
-               struct rpadesc {
-                       u32     lpar_affinity;
-                       u32     min_rmo_size;
-                       u32     min_rmo_percent;
-                       u32     max_pft_size;
-                       u32     splpar;
-                       u32     min_load;
-                       u32     new_mem_def;
-                       u32     ignore_me;
-               } rpadesc;
-       } rpanote;
-} fake_elf = {
-       .elfhdr = {
-               .e_ident = { 0x7f, 'E', 'L', 'F',
-                            ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
-               .e_type = ET_EXEC,      /* yeah right */
-               .e_machine = EM_PPC,
-               .e_version = EV_CURRENT,
-               .e_phoff = offsetof(struct fake_elf, phdr),
-               .e_phentsize = sizeof(Elf32_Phdr),
-               .e_phnum = 2
-       },
-       .phdr = {
-               [0] = {
-                       .p_type = PT_NOTE,
-                       .p_offset = offsetof(struct fake_elf, chrpnote),
-                       .p_filesz = sizeof(struct chrpnote)
-               }, [1] = {
-                       .p_type = PT_NOTE,
-                       .p_offset = offsetof(struct fake_elf, rpanote),
-                       .p_filesz = sizeof(struct rpanote)
-               }
-       },
-       .chrpnote = {
-               .namesz = sizeof("PowerPC"),
-               .descsz = sizeof(struct chrpdesc),
-               .type = 0x1275,
-               .name = "PowerPC",
-               .chrpdesc = {
-                       .real_mode = ~0U,       /* ~0 means "don't care" */
-                       .real_base = ~0U,
-                       .real_size = ~0U,
-                       .virt_base = ~0U,
-                       .virt_size = ~0U,
-                       .load_base = ~0U
-               },
-       },
-       .rpanote = {
-               .namesz = sizeof("IBM,RPA-Client-Config"),
-               .descsz = sizeof(struct rpadesc),
-               .type = 0x12759999,
-               .name = "IBM,RPA-Client-Config",
-               .rpadesc = {
-                       .lpar_affinity = 0,
-                       .min_rmo_size = 64,     /* in megabytes */
-                       .min_rmo_percent = 0,
-                       .max_pft_size = 48,     /* 2^48 bytes max PFT size */
-                       .splpar = 1,
-                       .min_load = ~0U,
-                       .new_mem_def = 0
-               }
-       }
-};
-
-static void __init prom_send_capabilities(void)
-{
-       unsigned long offset = reloc_offset();
-       ihandle elfloader;
-
-       elfloader = call_prom("open", 1, 1, ADDR("/packages/elf-loader"));
-       if (elfloader == 0) {
-               prom_printf("couldn't open /packages/elf-loader\n");
-               return;
-       }
-       call_prom("call-method", 3, 1, ADDR("process-elf-header"),
-                       elfloader, ADDR(&fake_elf));
-       call_prom("close", 1, 0, elfloader);
-}
-
-/*
- * Memory allocation strategy... our layout is normally:
- *
- *  at 14Mb or more we vmlinux, then a gap and initrd. In some rare cases, initrd
- *  might end up beeing before the kernel though. We assume this won't override
- *  the final kernel at 0, we have no provision to handle that in this version,
- *  but it should hopefully never happen.
- *
- *  alloc_top is set to the top of RMO, eventually shrink down if the TCEs overlap
- *  alloc_bottom is set to the top of kernel/initrd
- *
- *  from there, allocations are done that way : rtas is allocated topmost, and
- *  the device-tree is allocated from the bottom. We try to grow the device-tree
- *  allocation as we progress. If we can't, then we fail, we don't currently have
- *  a facility to restart elsewhere, but that shouldn't be necessary neither
- *
- *  Note that calls to reserve_mem have to be done explicitely, memory allocated
- *  with either alloc_up or alloc_down isn't automatically reserved.
- */
-
-
-/*
- * Allocates memory in the RMO upward from the kernel/initrd
- *
- * When align is 0, this is a special case, it means to allocate in place
- * at the current location of alloc_bottom or fail (that is basically
- * extending the previous allocation). Used for the device-tree flattening
- */
-static unsigned long __init alloc_up(unsigned long size, unsigned long align)
-{
-       unsigned long offset = reloc_offset();
-       unsigned long base = _ALIGN_UP(RELOC(alloc_bottom), align);
-       unsigned long addr = 0;
-
-       prom_debug("alloc_up(%x, %x)\n", size, align);
-       if (RELOC(ram_top) == 0)
-               prom_panic("alloc_up() called with mem not initialized\n");
-
-       if (align)
-               base = _ALIGN_UP(RELOC(alloc_bottom), align);
-       else
-               base = RELOC(alloc_bottom);
-
-       for(; (base + size) <= RELOC(alloc_top); 
-           base = _ALIGN_UP(base + 0x100000, align)) {
-               prom_debug("    trying: 0x%x\n\r", base);
-               addr = (unsigned long)prom_claim(base, size, 0);
-               if (addr != PROM_ERROR)
-                       break;
-               addr = 0;
-               if (align == 0)
-                       break;
-       }
-       if (addr == 0)
-               return 0;
-       RELOC(alloc_bottom) = addr;
-
-       prom_debug(" -> %x\n", addr);
-       prom_debug("  alloc_bottom : %x\n", RELOC(alloc_bottom));
-       prom_debug("  alloc_top    : %x\n", RELOC(alloc_top));
-       prom_debug("  alloc_top_hi : %x\n", RELOC(alloc_top_high));
-       prom_debug("  rmo_top      : %x\n", RELOC(rmo_top));
-       prom_debug("  ram_top      : %x\n", RELOC(ram_top));
-
-       return addr;
-}
-
-/*
- * Allocates memory downard, either from top of RMO, or if highmem
- * is set, from the top of RAM. Note that this one doesn't handle
- * failures. In does claim memory if highmem is not set.
- */
-static unsigned long __init alloc_down(unsigned long size, unsigned long align,
-                                      int highmem)
-{
-       unsigned long offset = reloc_offset();
-       unsigned long base, addr = 0;
-
-       prom_debug("alloc_down(%x, %x, %s)\n", size, align,
-                  highmem ? RELOC("(high)") : RELOC("(low)"));
-       if (RELOC(ram_top) == 0)
-               prom_panic("alloc_down() called with mem not initialized\n");
-
-       if (highmem) {
-               /* Carve out storage for the TCE table. */
-               addr = _ALIGN_DOWN(RELOC(alloc_top_high) - size, align);
-               if (addr <= RELOC(alloc_bottom))
-                       return 0;
-               else {
-                       /* Will we bump into the RMO ? If yes, check out that we
-                        * didn't overlap existing allocations there, if we did,
-                        * we are dead, we must be the first in town !
-                        */
-                       if (addr < RELOC(rmo_top)) {
-                               /* Good, we are first */
-                               if (RELOC(alloc_top) == RELOC(rmo_top))
-                                       RELOC(alloc_top) = RELOC(rmo_top) = addr;
-                               else
-                                       return 0;
-                       }
-                       RELOC(alloc_top_high) = addr;
-               }
-               goto bail;
-       }
-
-       base = _ALIGN_DOWN(RELOC(alloc_top) - size, align);
-       for(; base > RELOC(alloc_bottom); base = _ALIGN_DOWN(base - 0x100000, align))  {
-               prom_debug("    trying: 0x%x\n\r", base);
-               addr = (unsigned long)prom_claim(base, size, 0);
-               if (addr != PROM_ERROR)
-                       break;
-               addr = 0;
-       }
-       if (addr == 0)
-               return 0;
-       RELOC(alloc_top) = addr;
-
- bail:
-       prom_debug(" -> %x\n", addr);
-       prom_debug("  alloc_bottom : %x\n", RELOC(alloc_bottom));
-       prom_debug("  alloc_top    : %x\n", RELOC(alloc_top));
-       prom_debug("  alloc_top_hi : %x\n", RELOC(alloc_top_high));
-       prom_debug("  rmo_top      : %x\n", RELOC(rmo_top));
-       prom_debug("  ram_top      : %x\n", RELOC(ram_top));
-
-       return addr;
-}
-
-/*
- * Parse a "reg" cell
- */
-static unsigned long __init prom_next_cell(int s, cell_t **cellp)
-{
-       cell_t *p = *cellp;
-       unsigned long r = 0;
-
-       /* Ignore more than 2 cells */
-       while (s > 2) {
-               p++;
-               s--;
-       }
-       while (s) {
-               r <<= 32;
-               r |= *(p++);
-               s--;
-       }
-
-       *cellp = p;
-       return r;
-}
-
-/*
- * Very dumb function for adding to the memory reserve list, but
- * we don't need anything smarter at this point
- *
- * XXX Eventually check for collisions. They should NEVER happen
- * if problems seem to show up, it would be a good start to track
- * them down.
- */
-static void reserve_mem(unsigned long base, unsigned long size)
-{
-       unsigned long offset = reloc_offset();
-       unsigned long top = base + size;
-       unsigned long cnt = RELOC(mem_reserve_cnt);
-
-       if (size == 0)
-               return;
-
-       /* We need to always keep one empty entry so that we
-        * have our terminator with "size" set to 0 since we are
-        * dumb and just copy this entire array to the boot params
-        */
-       base = _ALIGN_DOWN(base, PAGE_SIZE);
-       top = _ALIGN_UP(top, PAGE_SIZE);
-       size = top - base;
-
-       if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
-               prom_panic("Memory reserve map exhausted !\n");
-       RELOC(mem_reserve_map)[cnt].base = base;
-       RELOC(mem_reserve_map)[cnt].size = size;
-       RELOC(mem_reserve_cnt) = cnt + 1;
-}
-
-/*
- * Initialize memory allocation mecanism, parse "memory" nodes and
- * obtain that way the top of memory and RMO to setup out local allocator
- */
-static void __init prom_init_mem(void)
-{
-       phandle node;
-       char *path, type[64];
-       unsigned int plen;
-       cell_t *p, *endp;
-       unsigned long offset = reloc_offset();
-       struct prom_t *_prom = PTRRELOC(&prom);
-
-       /*
-        * We iterate the memory nodes to find
-        * 1) top of RMO (first node)
-        * 2) top of memory
-        */
-       prom_debug("root_addr_cells: %x\n", (long)_prom->root_addr_cells);
-       prom_debug("root_size_cells: %x\n", (long)_prom->root_size_cells);
-
-       prom_debug("scanning memory:\n");
-       path = RELOC(prom_scratch);
-
-       for (node = 0; prom_next_node(&node); ) {
-               type[0] = 0;
-               prom_getprop(node, "device_type", type, sizeof(type));
-
-               if (strcmp(type, RELOC("memory")))
-                       continue;
-       
-               plen = prom_getprop(node, "reg", RELOC(regbuf), sizeof(regbuf));
-               if (plen > sizeof(regbuf)) {
-                       prom_printf("memory node too large for buffer !\n");
-                       plen = sizeof(regbuf);
-               }
-               p = RELOC(regbuf);
-               endp = p + (plen / sizeof(cell_t));
-
-#ifdef DEBUG_PROM
-               memset(path, 0, PROM_SCRATCH_SIZE);
-               call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
-               prom_debug("  node %s :\n", path);
-#endif /* DEBUG_PROM */
-
-               while ((endp - p) >= (_prom->root_addr_cells + _prom->root_size_cells)) {
-                       unsigned long base, size;
-
-                       base = prom_next_cell(_prom->root_addr_cells, &p);
-                       size = prom_next_cell(_prom->root_size_cells, &p);
-
-                       if (size == 0)
-                               continue;
-                       prom_debug("    %x %x\n", base, size);
-                       if (base == 0)
-                               RELOC(rmo_top) = size;
-                       if ((base + size) > RELOC(ram_top))
-                               RELOC(ram_top) = base + size;
-               }
-       }
-
-       RELOC(alloc_bottom) = PAGE_ALIGN(RELOC(klimit) - offset + 0x4000);
-
-       /* Check if we have an initrd after the kernel, if we do move our bottom
-        * point to after it
-        */
-       if (RELOC(prom_initrd_start)) {
-               if (RELOC(prom_initrd_end) > RELOC(alloc_bottom))
-                       RELOC(alloc_bottom) = PAGE_ALIGN(RELOC(prom_initrd_end));
-       }
-
-       /*
-        * If prom_memory_limit is set we reduce the upper limits *except* for
-        * alloc_top_high. This must be the real top of RAM so we can put
-        * TCE's up there.
-        */
-
-       RELOC(alloc_top_high) = RELOC(ram_top);
-
-       if (RELOC(prom_memory_limit)) {
-               if (RELOC(prom_memory_limit) <= RELOC(alloc_bottom)) {
-                       prom_printf("Ignoring mem=%x <= alloc_bottom.\n",
-                               RELOC(prom_memory_limit));
-                       RELOC(prom_memory_limit) = 0;
-               } else if (RELOC(prom_memory_limit) >= RELOC(ram_top)) {
-                       prom_printf("Ignoring mem=%x >= ram_top.\n",
-                               RELOC(prom_memory_limit));
-                       RELOC(prom_memory_limit) = 0;
-               } else {
-                       RELOC(ram_top) = RELOC(prom_memory_limit);
-                       RELOC(rmo_top) = min(RELOC(rmo_top), RELOC(prom_memory_limit));
-               }
-       }
-
-       /*
-        * Setup our top alloc point, that is top of RMO or top of
-        * segment 0 when running non-LPAR.
-        */
-       if ( RELOC(of_platform) == PLATFORM_PSERIES_LPAR )
-               RELOC(alloc_top) = RELOC(rmo_top);
-       else
-               /* Some RS64 machines have buggy firmware where claims up at 1GB
-                * fails. Cap at 768MB as a workaround. Still plenty of room.
-                */
-               RELOC(alloc_top) = RELOC(rmo_top) = min(0x30000000ul, RELOC(ram_top));
-
-       prom_printf("memory layout at init:\n");
-       prom_printf("  memory_limit : %x (16 MB aligned)\n", RELOC(prom_memory_limit));
-       prom_printf("  alloc_bottom : %x\n", RELOC(alloc_bottom));
-       prom_printf("  alloc_top    : %x\n", RELOC(alloc_top));
-       prom_printf("  alloc_top_hi : %x\n", RELOC(alloc_top_high));
-       prom_printf("  rmo_top      : %x\n", RELOC(rmo_top));
-       prom_printf("  ram_top      : %x\n", RELOC(ram_top));
-}
-
-
-/*
- * Allocate room for and instanciate RTAS
- */
-static void __init prom_instantiate_rtas(void)
-{
-       unsigned long offset = reloc_offset();
-       struct prom_t *_prom = PTRRELOC(&prom);
-       phandle rtas_node;
-       ihandle rtas_inst;
-       u32 base, entry = 0;
-       u32 size = 0;
-
-       prom_debug("prom_instantiate_rtas: start...\n");
-
-       rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
-       prom_debug("rtas_node: %x\n", rtas_node);
-       if (!PHANDLE_VALID(rtas_node))
-               return;
-
-       prom_getprop(rtas_node, "rtas-size", &size, sizeof(size));
-       if (size == 0)
-               return;
-
-       base = alloc_down(size, PAGE_SIZE, 0);
-       if (base == 0) {
-               prom_printf("RTAS allocation failed !\n");
-               return;
-       }
-
-       rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
-       if (!IHANDLE_VALID(rtas_inst)) {
-               prom_printf("opening rtas package failed");
-               return;
-       }
-
-       prom_printf("instantiating rtas at 0x%x ...", base);
-
-       if (call_prom("call-method", 3, 2,
-                     ADDR("instantiate-rtas"),
-                     rtas_inst, base) != PROM_ERROR) {
-               entry = (long)_prom->args.rets[1];
-       }
-       if (entry == 0) {
-               prom_printf(" failed\n");
-               return;
-       }
-       prom_printf(" done\n");
-
-       reserve_mem(base, size);
-
-       prom_setprop(rtas_node, "linux,rtas-base", &base, sizeof(base));
-       prom_setprop(rtas_node, "linux,rtas-entry", &entry, sizeof(entry));
-
-       prom_debug("rtas base     = 0x%x\n", base);
-       prom_debug("rtas entry    = 0x%x\n", entry);
-       prom_debug("rtas size     = 0x%x\n", (long)size);
-
-       prom_debug("prom_instantiate_rtas: end...\n");
-}
-
-
-/*
- * Allocate room for and initialize TCE tables
- */
-static void __init prom_initialize_tce_table(void)
-{
-       phandle node;
-       ihandle phb_node;
-       unsigned long offset = reloc_offset();
-       char compatible[64], type[64], model[64];
-       char *path = RELOC(prom_scratch);
-       u64 base, align;
-       u32 minalign, minsize;
-       u64 tce_entry, *tce_entryp;
-       u64 local_alloc_top, local_alloc_bottom;
-       u64 i;
-
-       if (RELOC(ppc64_iommu_off))
-               return;
-
-       prom_debug("starting prom_initialize_tce_table\n");
-
-       /* Cache current top of allocs so we reserve a single block */
-       local_alloc_top = RELOC(alloc_top_high);
-       local_alloc_bottom = local_alloc_top;
-
-       /* Search all nodes looking for PHBs. */
-       for (node = 0; prom_next_node(&node); ) {
-               compatible[0] = 0;
-               type[0] = 0;
-               model[0] = 0;
-               prom_getprop(node, "compatible",
-                            compatible, sizeof(compatible));
-               prom_getprop(node, "device_type", type, sizeof(type));
-               prom_getprop(node, "model", model, sizeof(model));
-
-               if ((type[0] == 0) || (strstr(type, RELOC("pci")) == NULL))
-                       continue;
-
-               /* Keep the old logic in tack to avoid regression. */
-               if (compatible[0] != 0) {
-                       if ((strstr(compatible, RELOC("python")) == NULL) &&
-                           (strstr(compatible, RELOC("Speedwagon")) == NULL) &&
-                           (strstr(compatible, RELOC("Winnipeg")) == NULL))
-                               continue;
-               } else if (model[0] != 0) {
-                       if ((strstr(model, RELOC("ython")) == NULL) &&
-                           (strstr(model, RELOC("peedwagon")) == NULL) &&
-                           (strstr(model, RELOC("innipeg")) == NULL))
-                               continue;
-               }
-
-               if (prom_getprop(node, "tce-table-minalign", &minalign,
-                                sizeof(minalign)) == PROM_ERROR)
-                       minalign = 0;
-               if (prom_getprop(node, "tce-table-minsize", &minsize,
-                                sizeof(minsize)) == PROM_ERROR)
-                       minsize = 4UL << 20;
-
-               /*
-                * Even though we read what OF wants, we just set the table
-                * size to 4 MB.  This is enough to map 2GB of PCI DMA space.
-                * By doing this, we avoid the pitfalls of trying to DMA to
-                * MMIO space and the DMA alias hole.
-                *
-                * On POWER4, firmware sets the TCE region by assuming
-                * each TCE table is 8MB. Using this memory for anything
-                * else will impact performance, so we always allocate 8MB.
-                * Anton
-                */
-               if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p))
-                       minsize = 8UL << 20;
-               else
-                       minsize = 4UL << 20;
-
-               /* Align to the greater of the align or size */
-               align = max(minalign, minsize);
-               base = alloc_down(minsize, align, 1);
-               if (base == 0)
-                       prom_panic("ERROR, cannot find space for TCE table.\n");
-               if (base < local_alloc_bottom)
-                       local_alloc_bottom = base;
-
-               /* Save away the TCE table attributes for later use. */
-               prom_setprop(node, "linux,tce-base", &base, sizeof(base));
-               prom_setprop(node, "linux,tce-size", &minsize, sizeof(minsize));
-
-               /* It seems OF doesn't null-terminate the path :-( */
-               memset(path, 0, sizeof(path));
-               /* Call OF to setup the TCE hardware */
-               if (call_prom("package-to-path", 3, 1, node,
-                             path, PROM_SCRATCH_SIZE-1) == PROM_ERROR) {
-                       prom_printf("package-to-path failed\n");
-               }
-
-               prom_debug("TCE table: %s\n", path);
-               prom_debug("\tnode = 0x%x\n", node);
-               prom_debug("\tbase = 0x%x\n", base);
-               prom_debug("\tsize = 0x%x\n", minsize);
-
-               /* Initialize the table to have a one-to-one mapping
-                * over the allocated size.
-                */
-               tce_entryp = (unsigned long *)base;
-               for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
-                       tce_entry = (i << PAGE_SHIFT);
-                       tce_entry |= 0x3;
-                       *tce_entryp = tce_entry;
-               }
-
-               prom_printf("opening PHB %s", path);
-               phb_node = call_prom("open", 1, 1, path);
-               if (phb_node == 0)
-                       prom_printf("... failed\n");
-               else
-                       prom_printf("... done\n");
-
-               call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
-                         phb_node, -1, minsize,
-                         (u32) base, (u32) (base >> 32));
-               call_prom("close", 1, 0, phb_node);
-       }
-
-       reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
-
-       if (RELOC(prom_memory_limit)) {
-               /*
-                * We align the start to a 16MB boundary so we can map the TCE area
-                * using large pages if possible. The end should be the top of RAM
-                * so no need to align it.
-                */
-               RELOC(prom_tce_alloc_start) = _ALIGN_DOWN(local_alloc_bottom, 0x1000000);
-               RELOC(prom_tce_alloc_end) = local_alloc_top;
-       }
-
-       /* Flag the first invalid entry */
-       prom_debug("ending prom_initialize_tce_table\n");
-}
-
-/*
- * With CHRP SMP we need to use the OF to start the other
- * processors so we can't wait until smp_boot_cpus (the OF is
- * trashed by then) so we have to put the processors into
- * a holding pattern controlled by the kernel (not OF) before
- * we destroy the OF.
- *
- * This uses a chunk of low memory, puts some holding pattern
- * code there and sends the other processors off to there until
- * smp_boot_cpus tells them to do something.  The holding pattern
- * checks that address until its cpu # is there, when it is that
- * cpu jumps to __secondary_start().  smp_boot_cpus() takes care
- * of setting those values.
- *
- * We also use physical address 0x4 here to tell when a cpu
- * is in its holding pattern code.
- *
- * Fixup comment... DRENG / PPPBBB - Peter
- *
- * -- Cort
- */
-static void __init prom_hold_cpus(void)
-{
-       unsigned long i;
-       unsigned int reg;
-       phandle node;
-       unsigned long offset = reloc_offset();
-       char type[64];
-       int cpuid = 0;
-       unsigned int interrupt_server[MAX_CPU_THREADS];
-       unsigned int cpu_threads, hw_cpu_num;
-       int propsize;
-       extern void __secondary_hold(void);
-       extern unsigned long __secondary_hold_spinloop;
-       extern unsigned long __secondary_hold_acknowledge;
-       unsigned long *spinloop
-               = (void *)virt_to_abs(&__secondary_hold_spinloop);
-       unsigned long *acknowledge
-               = (void *)virt_to_abs(&__secondary_hold_acknowledge);
-       unsigned long secondary_hold
-               = virt_to_abs(*PTRRELOC((unsigned long *)__secondary_hold));
-       struct prom_t *_prom = PTRRELOC(&prom);
-
-       prom_debug("prom_hold_cpus: start...\n");
-       prom_debug("    1) spinloop       = 0x%x\n", (unsigned long)spinloop);
-       prom_debug("    1) *spinloop      = 0x%x\n", *spinloop);
-       prom_debug("    1) acknowledge    = 0x%x\n",
-                  (unsigned long)acknowledge);
-       prom_debug("    1) *acknowledge   = 0x%x\n", *acknowledge);
-       prom_debug("    1) secondary_hold = 0x%x\n", secondary_hold);
-
-       /* Set the common spinloop variable, so all of the secondary cpus
-        * will block when they are awakened from their OF spinloop.
-        * This must occur for both SMP and non SMP kernels, since OF will
-        * be trashed when we move the kernel.
-        */
-       *spinloop = 0;
-
-#ifdef CONFIG_HMT
-       for (i=0; i < NR_CPUS; i++) {
-               RELOC(hmt_thread_data)[i].pir = 0xdeadbeef;
-       }
-#endif
-       /* look for cpus */
-       for (node = 0; prom_next_node(&node); ) {
-               type[0] = 0;
-               prom_getprop(node, "device_type", type, sizeof(type));
-               if (strcmp(type, RELOC("cpu")) != 0)
-                       continue;
-
-               /* Skip non-configured cpus. */
-               if (prom_getprop(node, "status", type, sizeof(type)) > 0)
-                       if (strcmp(type, RELOC("okay")) != 0)
-                               continue;
-
-               reg = -1;
-               prom_getprop(node, "reg", &reg, sizeof(reg));
-
-               prom_debug("\ncpuid        = 0x%x\n", cpuid);
-               prom_debug("cpu hw idx   = 0x%x\n", reg);
-
-               /* Init the acknowledge var which will be reset by
-                * the secondary cpu when it awakens from its OF
-                * spinloop.
-                */
-               *acknowledge = (unsigned long)-1;
-
-               propsize = prom_getprop(node, "ibm,ppc-interrupt-server#s",
-                                       &interrupt_server,
-                                       sizeof(interrupt_server));
-               if (propsize < 0) {
-                       /* no property.  old hardware has no SMT */
-                       cpu_threads = 1;
-                       interrupt_server[0] = reg; /* fake it with phys id */
-               } else {
-                       /* We have a threaded processor */
-                       cpu_threads = propsize / sizeof(u32);
-                       if (cpu_threads > MAX_CPU_THREADS) {
-                               prom_printf("SMT: too many threads!\n"
-                                           "SMT: found %x, max is %x\n",
-                                           cpu_threads, MAX_CPU_THREADS);
-                               cpu_threads = 1; /* ToDo: panic? */
-                       }
-               }
-
-               hw_cpu_num = interrupt_server[0];
-               if (hw_cpu_num != _prom->cpu) {
-                       /* Primary Thread of non-boot cpu */
-                       prom_printf("%x : starting cpu hw idx %x... ", cpuid, reg);
-                       call_prom("start-cpu", 3, 0, node,
-                                 secondary_hold, reg);
-
-                       for ( i = 0 ; (i < 100000000) && 
-                             (*acknowledge == ((unsigned long)-1)); i++ )
-                               mb();
-
-                       if (*acknowledge == reg) {
-                               prom_printf("done\n");
-                               /* We have to get every CPU out of OF,
-                                * even if we never start it. */
-                               if (cpuid >= NR_CPUS)
-                                       goto next;
-                       } else {
-                               prom_printf("failed: %x\n", *acknowledge);
-                       }
-               }
-#ifdef CONFIG_SMP
-               else
-                       prom_printf("%x : boot cpu     %x\n", cpuid, reg);
-#endif
-next:
-#ifdef CONFIG_SMP
-               /* Init paca for secondary threads.   They start later. */
-               for (i=1; i < cpu_threads; i++) {
-                       cpuid++;
-                       if (cpuid >= NR_CPUS)
-                               continue;
-               }
-#endif /* CONFIG_SMP */
-               cpuid++;
-       }
-#ifdef CONFIG_HMT
-       /* Only enable HMT on processors that provide support. */
-       if (__is_processor(PV_PULSAR) || 
-           __is_processor(PV_ICESTAR) ||
-           __is_processor(PV_SSTAR)) {
-               prom_printf("    starting secondary threads\n");
-
-               for (i = 0; i < NR_CPUS; i += 2) {
-                       if (!cpu_online(i))
-                               continue;
-
-                       if (i == 0) {
-                               unsigned long pir = mfspr(SPRN_PIR);
-                               if (__is_processor(PV_PULSAR)) {
-                                       RELOC(hmt_thread_data)[i].pir = 
-                                               pir & 0x1f;
-                               } else {
-                                       RELOC(hmt_thread_data)[i].pir = 
-                                               pir & 0x3ff;
-                               }
-                       }
-               }
-       } else {
-               prom_printf("Processor is not HMT capable\n");
-       }
-#endif
-
-       if (cpuid > NR_CPUS)
-               prom_printf("WARNING: maximum CPUs (" __stringify(NR_CPUS)
-                           ") exceeded: ignoring extras\n");
-
-       prom_debug("prom_hold_cpus: end...\n");
-}
-
-
-static void __init prom_init_client_services(unsigned long pp)
-{
-       unsigned long offset = reloc_offset();
-       struct prom_t *_prom = PTRRELOC(&prom);
-
-       /* Get a handle to the prom entry point before anything else */
-       _prom->entry = pp;
-
-       /* Init default value for phys size */
-       _prom->root_size_cells = 1;
-       _prom->root_addr_cells = 2;
-
-       /* get a handle for the stdout device */
-       _prom->chosen = call_prom("finddevice", 1, 1, ADDR("/chosen"));
-       if (!PHANDLE_VALID(_prom->chosen))
-               prom_panic("cannot find chosen"); /* msg won't be printed :( */
-
-       /* get device tree root */
-       _prom->root = call_prom("finddevice", 1, 1, ADDR("/"));
-       if (!PHANDLE_VALID(_prom->root))
-               prom_panic("cannot find device tree root"); /* msg won't be printed :( */
-}
-
-static void __init prom_init_stdout(void)
-{
-       unsigned long offset = reloc_offset();
-       struct prom_t *_prom = PTRRELOC(&prom);
-       char *path = RELOC(of_stdout_device);
-       char type[16];
-       u32 val;
-
-       if (prom_getprop(_prom->chosen, "stdout", &val, sizeof(val)) <= 0)
-               prom_panic("cannot find stdout");
-
-       _prom->stdout = val;
-
-       /* Get the full OF pathname of the stdout device */
-       memset(path, 0, 256);
-       call_prom("instance-to-path", 3, 1, _prom->stdout, path, 255);
-       val = call_prom("instance-to-package", 1, 1, _prom->stdout);
-       prom_setprop(_prom->chosen, "linux,stdout-package", &val, sizeof(val));
-       prom_printf("OF stdout device is: %s\n", RELOC(of_stdout_device));
-       prom_setprop(_prom->chosen, "linux,stdout-path",
-                    RELOC(of_stdout_device), strlen(RELOC(of_stdout_device))+1);
-
-       /* If it's a display, note it */
-       memset(type, 0, sizeof(type));
-       prom_getprop(val, "device_type", type, sizeof(type));
-       if (strcmp(type, RELOC("display")) == 0) {
-               _prom->disp_node = val;
-               prom_setprop(val, "linux,boot-display", NULL, 0);
-       }
-}
-
-static void __init prom_close_stdin(void)
-{
-       unsigned long offset = reloc_offset();
-       struct prom_t *_prom = PTRRELOC(&prom);
-       ihandle val;
-
-       if (prom_getprop(_prom->chosen, "stdin", &val, sizeof(val)) > 0)
-               call_prom("close", 1, 0, val);
-}
-
-static int __init prom_find_machine_type(void)
-{
-       unsigned long offset = reloc_offset();
-       struct prom_t *_prom = PTRRELOC(&prom);
-       char compat[256];
-       int len, i = 0;
-       phandle rtas;
-
-       len = prom_getprop(_prom->root, "compatible",
-                          compat, sizeof(compat)-1);
-       if (len > 0) {
-               compat[len] = 0;
-               while (i < len) {
-                       char *p = &compat[i];
-                       int sl = strlen(p);
-                       if (sl == 0)
-                               break;
-                       if (strstr(p, RELOC("Power Macintosh")) ||
-                           strstr(p, RELOC("MacRISC4")))
-                               return PLATFORM_POWERMAC;
-                       if (strstr(p, RELOC("Momentum,Maple")))
-                               return PLATFORM_MAPLE;
-                       i += sl + 1;
-               }
-       }
-       /* Default to pSeries. We need to know if we are running LPAR */
-       rtas = call_prom("finddevice", 1, 1, ADDR("/rtas"));
-       if (PHANDLE_VALID(rtas)) {
-               int x = prom_getproplen(rtas, "ibm,hypertas-functions");
-               if (x != PROM_ERROR) {
-                       prom_printf("Hypertas detected, assuming LPAR !\n");
-                       return PLATFORM_PSERIES_LPAR;
-               }
-       }
-       return PLATFORM_PSERIES;
-}
-
-static int __init prom_set_color(ihandle ih, int i, int r, int g, int b)
-{
-       unsigned long offset = reloc_offset();
-
-       return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r);
-}
-
-/*
- * If we have a display that we don't know how to drive,
- * we will want to try to execute OF's open method for it
- * later.  However, OF will probably fall over if we do that
- * we've taken over the MMU.
- * So we check whether we will need to open the display,
- * and if so, open it now.
- */
-static void __init prom_check_displays(void)
-{
-       unsigned long offset = reloc_offset();
-       struct prom_t *_prom = PTRRELOC(&prom);
-       char type[16], *path;
-       phandle node;
-       ihandle ih;
-       int i;
-
-       static unsigned char default_colors[] = {
-               0x00, 0x00, 0x00,
-               0x00, 0x00, 0xaa,
-               0x00, 0xaa, 0x00,
-               0x00, 0xaa, 0xaa,
-               0xaa, 0x00, 0x00,
-               0xaa, 0x00, 0xaa,
-               0xaa, 0xaa, 0x00,
-               0xaa, 0xaa, 0xaa,
-               0x55, 0x55, 0x55,
-               0x55, 0x55, 0xff,
-               0x55, 0xff, 0x55,
-               0x55, 0xff, 0xff,
-               0xff, 0x55, 0x55,
-               0xff, 0x55, 0xff,
-               0xff, 0xff, 0x55,
-               0xff, 0xff, 0xff
-       };
-       const unsigned char *clut;
-
-       prom_printf("Looking for displays\n");
-       for (node = 0; prom_next_node(&node); ) {
-               memset(type, 0, sizeof(type));
-               prom_getprop(node, "device_type", type, sizeof(type));
-               if (strcmp(type, RELOC("display")) != 0)
-                       continue;
-
-               /* It seems OF doesn't null-terminate the path :-( */
-               path = RELOC(prom_scratch);
-               memset(path, 0, PROM_SCRATCH_SIZE);
-
-               /*
-                * leave some room at the end of the path for appending extra
-                * arguments
-                */
-               if (call_prom("package-to-path", 3, 1, node, path,
-                             PROM_SCRATCH_SIZE-10) == PROM_ERROR)
-                       continue;
-               prom_printf("found display   : %s, opening ... ", path);
-               
-               ih = call_prom("open", 1, 1, path);
-               if (ih == 0) {
-                       prom_printf("failed\n");
-                       continue;
-               }
-
-               /* Success */
-               prom_printf("done\n");
-               prom_setprop(node, "linux,opened", NULL, 0);
-
-               /*
-                * stdout wasn't a display node, pick the first we can find
-                * for btext
-                */
-               if (_prom->disp_node == 0)
-                       _prom->disp_node = node;
-
-               /* Setup a useable color table when the appropriate
-                * method is available. Should update this to set-colors */
-               clut = RELOC(default_colors);
-               for (i = 0; i < 32; i++, clut += 3)
-                       if (prom_set_color(ih, i, clut[0], clut[1],
-                                          clut[2]) != 0)
-                               break;
-
-#ifdef CONFIG_LOGO_LINUX_CLUT224
-               clut = PTRRELOC(RELOC(logo_linux_clut224.clut));
-               for (i = 0; i < RELOC(logo_linux_clut224.clutsize); i++, clut += 3)
-                       if (prom_set_color(ih, i + 32, clut[0], clut[1],
-                                          clut[2]) != 0)
-                               break;
-#endif /* CONFIG_LOGO_LINUX_CLUT224 */
-       }
-}
-
-
-/* Return (relocated) pointer to this much memory: moves initrd if reqd. */
-static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
-                             unsigned long needed, unsigned long align)
-{
-       unsigned long offset = reloc_offset();
-       void *ret;
-
-       *mem_start = _ALIGN(*mem_start, align);
-       while ((*mem_start + needed) > *mem_end) {
-               unsigned long room, chunk;
-
-               prom_debug("Chunk exhausted, claiming more at %x...\n",
-                          RELOC(alloc_bottom));
-               room = RELOC(alloc_top) - RELOC(alloc_bottom);
-               if (room > DEVTREE_CHUNK_SIZE)
-                       room = DEVTREE_CHUNK_SIZE;
-               if (room < PAGE_SIZE)
-                       prom_panic("No memory for flatten_device_tree (no room)");
-               chunk = alloc_up(room, 0);
-               if (chunk == 0)
-                       prom_panic("No memory for flatten_device_tree (claim failed)");
-               *mem_end = RELOC(alloc_top);
-       }
-
-       ret = (void *)*mem_start;
-       *mem_start += needed;
-
-       return ret;
-}
-
-#define dt_push_token(token, mem_start, mem_end) \
-       do { *((u32 *)make_room(mem_start, mem_end, 4, 4)) = token; } while(0)
-
-static unsigned long __init dt_find_string(char *str)
-{
-       unsigned long offset = reloc_offset();
-       char *s, *os;
-
-       s = os = (char *)RELOC(dt_string_start);
-       s += 4;
-       while (s <  (char *)RELOC(dt_string_end)) {
-               if (strcmp(s, str) == 0)
-                       return s - os;
-               s += strlen(s) + 1;
-       }
-       return 0;
-}
-
-/*
- * The Open Firmware 1275 specification states properties must be 31 bytes or
- * less, however not all firmwares obey this. Make it 64 bytes to be safe.
- */
-#define MAX_PROPERTY_NAME 64
-
-static void __init scan_dt_build_strings(phandle node,
-                                        unsigned long *mem_start,
-                                        unsigned long *mem_end)
-{
-       unsigned long offset = reloc_offset();
-       char *prev_name, *namep, *sstart;
-       unsigned long soff;
-       phandle child;
-
-       sstart =  (char *)RELOC(dt_string_start);
-
-       /* get and store all property names */
-       prev_name = RELOC("");
-       for (;;) {
-               /* 64 is max len of name including nul. */
-               namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
-               if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) {
-                       /* No more nodes: unwind alloc */
-                       *mem_start = (unsigned long)namep;
-                       break;
-               }
-
-               /* skip "name" */
-               if (strcmp(namep, RELOC("name")) == 0) {
-                       *mem_start = (unsigned long)namep;
-                       prev_name = RELOC("name");
-                       continue;
-               }
-               /* get/create string entry */
-               soff = dt_find_string(namep);
-               if (soff != 0) {
-                       *mem_start = (unsigned long)namep;
-                       namep = sstart + soff;
-               } else {
-                       /* Trim off some if we can */
-                       *mem_start = (unsigned long)namep + strlen(namep) + 1;
-                       RELOC(dt_string_end) = *mem_start;
-               }
-               prev_name = namep;
-       }
-
-       /* do all our children */
-       child = call_prom("child", 1, 1, node);
-       while (child != 0) {
-               scan_dt_build_strings(child, mem_start, mem_end);
-               child = call_prom("peer", 1, 1, child);
-       }
-}
-
-static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
-                                       unsigned long *mem_end)
-{
-       phandle child;
-       char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
-       unsigned long soff;
-       unsigned char *valp;
-       unsigned long offset = reloc_offset();
-       static char pname[MAX_PROPERTY_NAME];
-       int l;
-
-       dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end);
-
-       /* get the node's full name */
-       namep = (char *)*mem_start;
-       l = call_prom("package-to-path", 3, 1, node,
-                     namep, *mem_end - *mem_start);
-       if (l >= 0) {
-               /* Didn't fit?  Get more room. */
-               if ((l+1) > (*mem_end - *mem_start)) {
-                       namep = make_room(mem_start, mem_end, l+1, 1);
-                       call_prom("package-to-path", 3, 1, node, namep, l);
-               }
-               namep[l] = '\0';
-
-               /* Fixup an Apple bug where they have bogus \0 chars in the
-                * middle of the path in some properties
-                */
-               for (p = namep, ep = namep + l; p < ep; p++)
-                       if (*p == '\0') {
-                               memmove(p, p+1, ep - p);
-                               ep--; l--; p--;
-                       }
-
-               /* now try to extract the unit name in that mess */
-               for (p = namep, lp = NULL; *p; p++)
-                       if (*p == '/')
-                               lp = p + 1;
-               if (lp != NULL)
-                       memmove(namep, lp, strlen(lp) + 1);
-               *mem_start = _ALIGN(((unsigned long) namep) +
-                                   strlen(namep) + 1, 4);
-       }
-
-       /* get it again for debugging */
-       path = RELOC(prom_scratch);
-       memset(path, 0, PROM_SCRATCH_SIZE);
-       call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
-
-       /* get and store all properties */
-       prev_name = RELOC("");
-       sstart = (char *)RELOC(dt_string_start);
-       for (;;) {
-               if (call_prom("nextprop", 3, 1, node, prev_name,
-                             RELOC(pname)) != 1)
-                       break;
-
-               /* skip "name" */
-               if (strcmp(RELOC(pname), RELOC("name")) == 0) {
-                       prev_name = RELOC("name");
-                       continue;
-               }
-
-               /* find string offset */
-               soff = dt_find_string(RELOC(pname));
-               if (soff == 0) {
-                       prom_printf("WARNING: Can't find string index for"
-                                   " <%s>, node %s\n", RELOC(pname), path);
-                       break;
-               }
-               prev_name = sstart + soff;
-
-               /* get length */
-               l = call_prom("getproplen", 2, 1, node, RELOC(pname));
-
-               /* sanity checks */
-               if (l == PROM_ERROR)
-                       continue;
-               if (l > MAX_PROPERTY_LENGTH) {
-                       prom_printf("WARNING: ignoring large property ");
-                       /* It seems OF doesn't null-terminate the path :-( */
-                       prom_printf("[%s] ", path);
-                       prom_printf("%s length 0x%x\n", RELOC(pname), l);
-                       continue;
-               }
-
-               /* push property head */
-               dt_push_token(OF_DT_PROP, mem_start, mem_end);
-               dt_push_token(l, mem_start, mem_end);
-               dt_push_token(soff, mem_start, mem_end);
-
-               /* push property content */
-               valp = make_room(mem_start, mem_end, l, 4);
-               call_prom("getprop", 4, 1, node, RELOC(pname), valp, l);
-               *mem_start = _ALIGN(*mem_start, 4);
-       }
-
-       /* Add a "linux,phandle" property. */
-       soff = dt_find_string(RELOC("linux,phandle"));
-       if (soff == 0)
-               prom_printf("WARNING: Can't find string index for"
-                           " <linux-phandle> node %s\n", path);
-       else {
-               dt_push_token(OF_DT_PROP, mem_start, mem_end);
-               dt_push_token(4, mem_start, mem_end);
-               dt_push_token(soff, mem_start, mem_end);
-               valp = make_room(mem_start, mem_end, 4, 4);
-               *(u32 *)valp = node;
-       }
-
-       /* do all our children */
-       child = call_prom("child", 1, 1, node);
-       while (child != 0) {
-               scan_dt_build_struct(child, mem_start, mem_end);
-               child = call_prom("peer", 1, 1, child);
-       }
-
-       dt_push_token(OF_DT_END_NODE, mem_start, mem_end);
-}
-
-static void __init flatten_device_tree(void)
-{
-       phandle root;
-       unsigned long offset = reloc_offset();
-       unsigned long mem_start, mem_end, room;
-       struct boot_param_header *hdr;
-       struct prom_t *_prom = PTRRELOC(&prom);
-       char *namep;
-       u64 *rsvmap;
-
-       /*
-        * Check how much room we have between alloc top & bottom (+/- a
-        * few pages), crop to 4Mb, as this is our "chuck" size
-        */
-       room = RELOC(alloc_top) - RELOC(alloc_bottom) - 0x4000;
-       if (room > DEVTREE_CHUNK_SIZE)
-               room = DEVTREE_CHUNK_SIZE;
-       prom_debug("starting device tree allocs at %x\n", RELOC(alloc_bottom));
-
-       /* Now try to claim that */
-       mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
-       if (mem_start == 0)
-               prom_panic("Can't allocate initial device-tree chunk\n");
-       mem_end = RELOC(alloc_top);
-
-       /* Get root of tree */
-       root = call_prom("peer", 1, 1, (phandle)0);
-       if (root == (phandle)0)
-               prom_panic ("couldn't get device tree root\n");
-
-       /* Build header and make room for mem rsv map */ 
-       mem_start = _ALIGN(mem_start, 4);
-       hdr = make_room(&mem_start, &mem_end,
-                       sizeof(struct boot_param_header), 4);
-       RELOC(dt_header_start) = (unsigned long)hdr;
-       rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8);
-
-       /* Start of strings */
-       mem_start = PAGE_ALIGN(mem_start);
-       RELOC(dt_string_start) = mem_start;
-       mem_start += 4; /* hole */
-
-       /* Add "linux,phandle" in there, we'll need it */
-       namep = make_room(&mem_start, &mem_end, 16, 1);
-       strcpy(namep, RELOC("linux,phandle"));
-       mem_start = (unsigned long)namep + strlen(namep) + 1;
-
-       /* Build string array */
-       prom_printf("Building dt strings...\n"); 
-       scan_dt_build_strings(root, &mem_start, &mem_end);
-       RELOC(dt_string_end) = mem_start;
-
-       /* Build structure */
-       mem_start = PAGE_ALIGN(mem_start);
-       RELOC(dt_struct_start) = mem_start;
-       prom_printf("Building dt structure...\n"); 
-       scan_dt_build_struct(root, &mem_start, &mem_end);
-       dt_push_token(OF_DT_END, &mem_start, &mem_end);
-       RELOC(dt_struct_end) = PAGE_ALIGN(mem_start);
-
-       /* Finish header */
-       hdr->boot_cpuid_phys = _prom->cpu;
-       hdr->magic = OF_DT_HEADER;
-       hdr->totalsize = RELOC(dt_struct_end) - RELOC(dt_header_start);
-       hdr->off_dt_struct = RELOC(dt_struct_start) - RELOC(dt_header_start);
-       hdr->off_dt_strings = RELOC(dt_string_start) - RELOC(dt_header_start);
-       hdr->dt_strings_size = RELOC(dt_string_end) - RELOC(dt_string_start);
-       hdr->off_mem_rsvmap = ((unsigned long)rsvmap) - RELOC(dt_header_start);
-       hdr->version = OF_DT_VERSION;
-       /* Version 16 is not backward compatible */
-       hdr->last_comp_version = 0x10;
-
-       /* Reserve the whole thing and copy the reserve map in, we
-        * also bump mem_reserve_cnt to cause further reservations to
-        * fail since it's too late.
-        */
-       reserve_mem(RELOC(dt_header_start), hdr->totalsize);
-       memcpy(rsvmap, RELOC(mem_reserve_map), sizeof(mem_reserve_map));
-
-#ifdef DEBUG_PROM
-       {
-               int i;
-               prom_printf("reserved memory map:\n");
-               for (i = 0; i < RELOC(mem_reserve_cnt); i++)
-                       prom_printf("  %x - %x\n", RELOC(mem_reserve_map)[i].base,
-                                   RELOC(mem_reserve_map)[i].size);
-       }
-#endif
-       RELOC(mem_reserve_cnt) = MEM_RESERVE_MAP_SIZE;
-
-       prom_printf("Device tree strings 0x%x -> 0x%x\n",
-                   RELOC(dt_string_start), RELOC(dt_string_end)); 
-       prom_printf("Device tree struct  0x%x -> 0x%x\n",
-                   RELOC(dt_struct_start), RELOC(dt_struct_end));
-
-}
-
-
-static void __init fixup_device_tree(void)
-{
-       unsigned long offset = reloc_offset();
-       phandle u3, i2c, mpic;
-       u32 u3_rev;
-       u32 interrupts[2];
-       u32 parent;
-
-       /* Some G5s have a missing interrupt definition, fix it up here */
-       u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
-       if (!PHANDLE_VALID(u3))
-               return;
-       i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
-       if (!PHANDLE_VALID(i2c))
-               return;
-       mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
-       if (!PHANDLE_VALID(mpic))
-               return;
-
-       /* check if proper rev of u3 */
-       if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
-           == PROM_ERROR)
-               return;
-       if (u3_rev < 0x35 || u3_rev > 0x39)
-               return;
-       /* does it need fixup ? */
-       if (prom_getproplen(i2c, "interrupts") > 0)
-               return;
-
-       prom_printf("fixing up bogus interrupts for u3 i2c...\n");
-
-       /* interrupt on this revision of u3 is number 0 and level */
-       interrupts[0] = 0;
-       interrupts[1] = 1;
-       prom_setprop(i2c, "interrupts", &interrupts, sizeof(interrupts));
-       parent = (u32)mpic;
-       prom_setprop(i2c, "interrupt-parent", &parent, sizeof(parent));
-}
-
-
-static void __init prom_find_boot_cpu(void)
-{
-       unsigned long offset = reloc_offset();
-               struct prom_t *_prom = PTRRELOC(&prom);
-       u32 getprop_rval;
-       ihandle prom_cpu;
-       phandle cpu_pkg;
-
-       if (prom_getprop(_prom->chosen, "cpu", &prom_cpu, sizeof(prom_cpu)) <= 0)
-               prom_panic("cannot find boot cpu");
-
-       cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
-
-       prom_getprop(cpu_pkg, "reg", &getprop_rval, sizeof(getprop_rval));
-       _prom->cpu = getprop_rval;
-
-       prom_debug("Booting CPU hw index = 0x%x\n", _prom->cpu);
-}
-
-static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
-{
-#ifdef CONFIG_BLK_DEV_INITRD
-       unsigned long offset = reloc_offset();
-               struct prom_t *_prom = PTRRELOC(&prom);
-
-       if ( r3 && r4 && r4 != 0xdeadbeef) {
-               u64 val;
-
-               RELOC(prom_initrd_start) = (r3 >= KERNELBASE) ? __pa(r3) : r3;
-               RELOC(prom_initrd_end) = RELOC(prom_initrd_start) + r4;
-
-               val = (u64)RELOC(prom_initrd_start);
-               prom_setprop(_prom->chosen, "linux,initrd-start", &val, sizeof(val));
-               val = (u64)RELOC(prom_initrd_end);
-               prom_setprop(_prom->chosen, "linux,initrd-end", &val, sizeof(val));
-
-               reserve_mem(RELOC(prom_initrd_start),
-                           RELOC(prom_initrd_end) - RELOC(prom_initrd_start));
-
-               prom_debug("initrd_start=0x%x\n", RELOC(prom_initrd_start));
-               prom_debug("initrd_end=0x%x\n", RELOC(prom_initrd_end));
-       }
-#endif /* CONFIG_BLK_DEV_INITRD */
-}
-
-/*
- * We enter here early on, when the Open Firmware prom is still
- * handling exceptions and the MMU hash table for us.
- */
-
-unsigned long __init prom_init(unsigned long r3, unsigned long r4, unsigned long pp,
-                              unsigned long r6, unsigned long r7)
-{      
-       unsigned long offset = reloc_offset();
-               struct prom_t *_prom = PTRRELOC(&prom);
-       unsigned long phys = KERNELBASE - offset;
-       u32 getprop_rval;
-       
-       /*
-        * First zero the BSS
-        */
-       memset(PTRRELOC(&__bss_start), 0, __bss_stop - __bss_start);
-
-       /*
-        * Init interface to Open Firmware, get some node references,
-        * like /chosen
-        */
-       prom_init_client_services(pp);
-
-       /*
-        * Init prom stdout device
-        */
-       prom_init_stdout();
-       prom_debug("klimit=0x%x\n", RELOC(klimit));
-       prom_debug("offset=0x%x\n", offset);
-
-       /*
-        * Check for an initrd
-        */
-       prom_check_initrd(r3, r4);
-
-       /*
-        * Get default machine type. At this point, we do not differenciate
-        * between pSeries SMP and pSeries LPAR
-        */
-       RELOC(of_platform) = prom_find_machine_type();
-       getprop_rval = RELOC(of_platform);
-       prom_setprop(_prom->chosen, "linux,platform",
-                    &getprop_rval, sizeof(getprop_rval));
-
-       /*
-        * On pSeries, inform the firmware about our capabilities
-        */
-       if (RELOC(of_platform) == PLATFORM_PSERIES ||
-           RELOC(of_platform) == PLATFORM_PSERIES_LPAR)
-               prom_send_capabilities();
-
-       /*
-        * On pSeries and Cell, copy the CPU hold code
-        */
-               if (RELOC(of_platform) & (PLATFORM_PSERIES | PLATFORM_CELL))
-                       copy_and_flush(0, KERNELBASE - offset, 0x100, 0);
-
-       /*
-        * Get memory cells format
-        */
-       getprop_rval = 1;
-       prom_getprop(_prom->root, "#size-cells",
-                    &getprop_rval, sizeof(getprop_rval));
-       _prom->root_size_cells = getprop_rval;
-       getprop_rval = 2;
-       prom_getprop(_prom->root, "#address-cells",
-                    &getprop_rval, sizeof(getprop_rval));
-       _prom->root_addr_cells = getprop_rval;
-
-       /*
-        * Do early parsing of command line
-        */
-       early_cmdline_parse();
-
-       /*
-        * Initialize memory management within prom_init
-        */
-       prom_init_mem();
-
-       /*
-        * Determine which cpu is actually running right _now_
-        */
-       prom_find_boot_cpu();
-
-       /* 
-        * Initialize display devices
-        */
-       prom_check_displays();
-
-       /*
-        * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
-        * that uses the allocator, we need to make sure we get the top of memory
-        * available for us here...
-        */
-       if (RELOC(of_platform) == PLATFORM_PSERIES)
-               prom_initialize_tce_table();
-
-       /*
-        * On non-powermacs, try to instantiate RTAS and puts all CPUs
-        * in spin-loops. PowerMacs don't have a working RTAS and use
-        * a different way to spin CPUs
-        */
-       if (RELOC(of_platform) != PLATFORM_POWERMAC) {
-               prom_instantiate_rtas();
-               prom_hold_cpus();
-       }
-
-       /*
-        * Fill in some infos for use by the kernel later on
-        */
-       if (RELOC(ppc64_iommu_off))
-               prom_setprop(_prom->chosen, "linux,iommu-off", NULL, 0);
-
-       if (RELOC(iommu_force_on))
-               prom_setprop(_prom->chosen, "linux,iommu-force-on", NULL, 0);
-
-       if (RELOC(prom_memory_limit))
-               prom_setprop(_prom->chosen, "linux,memory-limit",
-                       PTRRELOC(&prom_memory_limit), sizeof(RELOC(prom_memory_limit)));
-
-       if (RELOC(prom_tce_alloc_start)) {
-               prom_setprop(_prom->chosen, "linux,tce-alloc-start",
-                       PTRRELOC(&prom_tce_alloc_start), sizeof(RELOC(prom_tce_alloc_start)));
-               prom_setprop(_prom->chosen, "linux,tce-alloc-end",
-                       PTRRELOC(&prom_tce_alloc_end), sizeof(RELOC(prom_tce_alloc_end)));
-       }
-
-       /*
-        * Fixup any known bugs in the device-tree
-        */
-       fixup_device_tree();
-
-       /*
-        * Now finally create the flattened device-tree
-        */
-               prom_printf("copying OF device tree ...\n");
-               flatten_device_tree();
-
-       /* in case stdin is USB and still active on IBM machines... */
-       prom_close_stdin();
-
-       /*
-        * Call OF "quiesce" method to shut down pending DMA's from
-        * devices etc...
-        */
-       prom_printf("Calling quiesce ...\n");
-       call_prom("quiesce", 0, 0);
-
-       /*
-        * And finally, call the kernel passing it the flattened device
-        * tree and NULL as r5, thus triggering the new entry point which
-        * is common to us and kexec
-        */
-       prom_printf("returning from prom_init\n");
-       prom_debug("->dt_header_start=0x%x\n", RELOC(dt_header_start));
-       prom_debug("->phys=0x%x\n", phys);
-
-       __start(RELOC(dt_header_start), phys, 0);
-
-       return 0;
-}
-
diff --git a/arch/ppc64/kernel/semaphore.c b/arch/ppc64/kernel/semaphore.c
deleted file mode 100644 (file)
index a1c1db5..0000000
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * 
- *
- * PowerPC-specific semaphore code.
- *
- * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- * April 2001 - Reworked by Paul Mackerras <paulus@samba.org>
- * to eliminate the SMP races in the old version between the updates
- * of `count' and `waking'.  Now we use negative `count' values to
- * indicate that some process(es) are waiting for the semaphore.
- */
-
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <linux/module.h>
-
-#include <asm/atomic.h>
-#include <asm/semaphore.h>
-#include <asm/errno.h>
-
-/*
- * Atomically update sem->count.
- * This does the equivalent of the following:
- *
- *     old_count = sem->count;
- *     tmp = MAX(old_count, 0) + incr;
- *     sem->count = tmp;
- *     return old_count;
- */
-static inline int __sem_update_count(struct semaphore *sem, int incr)
-{
-       int old_count, tmp;
-
-       __asm__ __volatile__("\n"
-"1:    lwarx   %0,0,%3\n"
-"      srawi   %1,%0,31\n"
-"      andc    %1,%0,%1\n"
-"      add     %1,%1,%4\n"
-"      stwcx.  %1,0,%3\n"
-"      bne     1b"
-       : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
-       : "r" (&sem->count), "r" (incr), "m" (sem->count)
-       : "cc");
-
-       return old_count;
-}
-
-void __up(struct semaphore *sem)
-{
-       /*
-        * Note that we incremented count in up() before we came here,
-        * but that was ineffective since the result was <= 0, and
-        * any negative value of count is equivalent to 0.
-        * This ends up setting count to 1, unless count is now > 0
-        * (i.e. because some other cpu has called up() in the meantime),
-        * in which case we just increment count.
-        */
-       __sem_update_count(sem, 1);
-       wake_up(&sem->wait);
-}
-EXPORT_SYMBOL(__up);
-
-/*
- * Note that when we come in to __down or __down_interruptible,
- * we have already decremented count, but that decrement was
- * ineffective since the result was < 0, and any negative value
- * of count is equivalent to 0.
- * Thus it is only when we decrement count from some value > 0
- * that we have actually got the semaphore.
- */
-void __sched __down(struct semaphore *sem)
-{
-       struct task_struct *tsk = current;
-       DECLARE_WAITQUEUE(wait, tsk);
-
-       __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
-       add_wait_queue_exclusive(&sem->wait, &wait);
-
-       /*
-        * Try to get the semaphore.  If the count is > 0, then we've
-        * got the semaphore; we decrement count and exit the loop.
-        * If the count is 0 or negative, we set it to -1, indicating
-        * that we are asleep, and then sleep.
-        */
-       while (__sem_update_count(sem, -1) <= 0) {
-               schedule();
-               set_task_state(tsk, TASK_UNINTERRUPTIBLE);
-       }
-       remove_wait_queue(&sem->wait, &wait);
-       __set_task_state(tsk, TASK_RUNNING);
-
-       /*
-        * If there are any more sleepers, wake one of them up so
-        * that it can either get the semaphore, or set count to -1
-        * indicating that there are still processes sleeping.
-        */
-       wake_up(&sem->wait);
-}
-EXPORT_SYMBOL(__down);
-
-int __sched __down_interruptible(struct semaphore * sem)
-{
-       int retval = 0;
-       struct task_struct *tsk = current;
-       DECLARE_WAITQUEUE(wait, tsk);
-
-       __set_task_state(tsk, TASK_INTERRUPTIBLE);
-       add_wait_queue_exclusive(&sem->wait, &wait);
-
-       while (__sem_update_count(sem, -1) <= 0) {
-               if (signal_pending(current)) {
-                       /*
-                        * A signal is pending - give up trying.
-                        * Set sem->count to 0 if it is negative,
-                        * since we are no longer sleeping.
-                        */
-                       __sem_update_count(sem, 0);
-                       retval = -EINTR;
-                       break;
-               }
-               schedule();
-               set_task_state(tsk, TASK_INTERRUPTIBLE);
-       }
-       remove_wait_queue(&sem->wait, &wait);
-       __set_task_state(tsk, TASK_RUNNING);
-
-       wake_up(&sem->wait);
-       return retval;
-}
-EXPORT_SYMBOL(__down_interruptible);
diff --git a/arch/ppc64/kernel/vdso.c b/arch/ppc64/kernel/vdso.c
deleted file mode 100644 (file)
index 1bbacac..0000000
+++ /dev/null
@@ -1,625 +0,0 @@
-/*
- *  linux/arch/ppc64/kernel/vdso.c
- *
- *    Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
- *                      <benh@kernel.crashing.org>
- *
- *  This program is free software; you can redistribute it and/or
- *  modify it under the terms of the GNU General Public License
- *  as published by the Free Software Foundation; either version
- *  2 of the License, or (at your option) any later version.
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/stddef.h>
-#include <linux/unistd.h>
-#include <linux/slab.h>
-#include <linux/user.h>
-#include <linux/elf.h>
-#include <linux/security.h>
-#include <linux/bootmem.h>
-
-#include <asm/pgtable.h>
-#include <asm/system.h>
-#include <asm/processor.h>
-#include <asm/mmu.h>
-#include <asm/mmu_context.h>
-#include <asm/machdep.h>
-#include <asm/cputable.h>
-#include <asm/sections.h>
-#include <asm/systemcfg.h>
-#include <asm/vdso.h>
-
-#undef DEBUG
-
-#ifdef DEBUG
-#define DBG(fmt...) printk(fmt)
-#else
-#define DBG(fmt...)
-#endif
-
-
-/*
- * The vDSOs themselves are here
- */
-extern char vdso64_start, vdso64_end;
-extern char vdso32_start, vdso32_end;
-
-static void *vdso64_kbase = &vdso64_start;
-static void *vdso32_kbase = &vdso32_start;
-
-unsigned int vdso64_pages;
-unsigned int vdso32_pages;
-
-/* Signal trampolines user addresses */
-
-unsigned long vdso64_rt_sigtramp;
-unsigned long vdso32_sigtramp;
-unsigned long vdso32_rt_sigtramp;
-
-/* Format of the patch table */
-struct vdso_patch_def
-{
-       u32             pvr_mask, pvr_value;
-       const char      *gen_name;
-       const char      *fix_name;
-};
-
-/* Table of functions to patch based on the CPU type/revision
- *
- * TODO: Improve by adding whole lists for each entry
- */
-static struct vdso_patch_def vdso_patches[] = {
-       {
-               0xffff0000, 0x003a0000,         /* POWER5 */
-               "__kernel_sync_dicache", "__kernel_sync_dicache_p5"
-       },
-       {
-               0xffff0000, 0x003b0000,         /* POWER5 */
-               "__kernel_sync_dicache", "__kernel_sync_dicache_p5"
-       },
-};
-
-/*
- * Some infos carried around for each of them during parsing at
- * boot time.
- */
-struct lib32_elfinfo
-{
-       Elf32_Ehdr      *hdr;           /* ptr to ELF */
-       Elf32_Sym       *dynsym;        /* ptr to .dynsym section */
-       unsigned long   dynsymsize;     /* size of .dynsym section */
-       char            *dynstr;        /* ptr to .dynstr section */
-       unsigned long   text;           /* offset of .text section in .so */
-};
-
-struct lib64_elfinfo
-{
-       Elf64_Ehdr      *hdr;
-       Elf64_Sym       *dynsym;
-       unsigned long   dynsymsize;
-       char            *dynstr;
-       unsigned long   text;
-};
-
-
-#ifdef __DEBUG
-static void dump_one_vdso_page(struct page *pg, struct page *upg)
-{
-       printk("kpg: %p (c:%d,f:%08lx)", __va(page_to_pfn(pg) << PAGE_SHIFT),
-              page_count(pg),
-              pg->flags);
-       if (upg/* && pg != upg*/) {
-               printk(" upg: %p (c:%d,f:%08lx)", __va(page_to_pfn(upg) << PAGE_SHIFT),
-                      page_count(upg),
-                      upg->flags);
-       }
-       printk("\n");
-}
-
-static void dump_vdso_pages(struct vm_area_struct * vma)
-{
-       int i;
-
-       if (!vma || test_thread_flag(TIF_32BIT)) {
-               printk("vDSO32 @ %016lx:\n", (unsigned long)vdso32_kbase);
-               for (i=0; i<vdso32_pages; i++) {
-                       struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
-                       struct page *upg = (vma && vma->vm_mm) ?
-                               follow_page(vma->vm_mm, vma->vm_start + i*PAGE_SIZE, 0)
-                               : NULL;
-                       dump_one_vdso_page(pg, upg);
-               }
-       }
-       if (!vma || !test_thread_flag(TIF_32BIT)) {
-               printk("vDSO64 @ %016lx:\n", (unsigned long)vdso64_kbase);
-               for (i=0; i<vdso64_pages; i++) {
-                       struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
-                       struct page *upg = (vma && vma->vm_mm) ?
-                               follow_page(vma->vm_mm, vma->vm_start + i*PAGE_SIZE, 0)
-                               : NULL;
-                       dump_one_vdso_page(pg, upg);
-               }
-       }
-}
-#endif /* DEBUG */
-
-/*
- * Keep a dummy vma_close for now, it will prevent VMA merging.
- */
-static void vdso_vma_close(struct vm_area_struct * vma)
-{
-}
-
-/*
- * Our nopage() function, maps in the actual vDSO kernel pages, they will
- * be mapped read-only by do_no_page(), and eventually COW'ed, either
- * right away for an initial write access, or by do_wp_page().
- */
-static struct page * vdso_vma_nopage(struct vm_area_struct * vma,
-                                    unsigned long address, int *type)
-{
-       unsigned long offset = address - vma->vm_start;
-       struct page *pg;
-       void *vbase = test_thread_flag(TIF_32BIT) ? vdso32_kbase : vdso64_kbase;
-
-       DBG("vdso_vma_nopage(current: %s, address: %016lx, off: %lx)\n",
-           current->comm, address, offset);
-
-       if (address < vma->vm_start || address > vma->vm_end)
-               return NOPAGE_SIGBUS;
-
-       /*
-        * Last page is systemcfg.
-        */
-       if ((vma->vm_end - address) <= PAGE_SIZE)
-               pg = virt_to_page(_systemcfg);
-       else
-               pg = virt_to_page(vbase + offset);
-
-       get_page(pg);
-       DBG(" ->page count: %d\n", page_count(pg));
-
-       return pg;
-}
-
-static struct vm_operations_struct vdso_vmops = {
-       .close  = vdso_vma_close,
-       .nopage = vdso_vma_nopage,
-};
-
-/*
- * This is called from binfmt_elf, we create the special vma for the
- * vDSO and insert it into the mm struct tree
- */
-int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack)
-{
-       struct mm_struct *mm = current->mm;
-       struct vm_area_struct *vma;
-       unsigned long vdso_pages;
-       unsigned long vdso_base;
-
-       if (test_thread_flag(TIF_32BIT)) {
-               vdso_pages = vdso32_pages;
-               vdso_base = VDSO32_MBASE;
-       } else {
-               vdso_pages = vdso64_pages;
-               vdso_base = VDSO64_MBASE;
-       }
-
-       current->thread.vdso_base = 0;
-
-       /* vDSO has a problem and was disabled, just don't "enable" it for the
-        * process
-        */
-       if (vdso_pages == 0)
-               return 0;
-
-       vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
-       if (vma == NULL)
-               return -ENOMEM;
-
-       memset(vma, 0, sizeof(*vma));
-
-       /*
-        * pick a base address for the vDSO in process space. We try to put it
-        * at vdso_base which is the "natural" base for it, but we might fail
-        * and end up putting it elsewhere.
-        */
-       vdso_base = get_unmapped_area(NULL, vdso_base,
-                                     vdso_pages << PAGE_SHIFT, 0, 0);
-       if (vdso_base & ~PAGE_MASK) {
-               kmem_cache_free(vm_area_cachep, vma);
-               return (int)vdso_base;
-       }
-
-       current->thread.vdso_base = vdso_base;
-
-       vma->vm_mm = mm;
-       vma->vm_start = current->thread.vdso_base;
-
-       /*
-        * the VMA size is one page more than the vDSO since systemcfg
-        * is mapped in the last one
-        */
-       vma->vm_end = vma->vm_start + ((vdso_pages + 1) << PAGE_SHIFT);
-
-       /*
-        * our vma flags don't have VM_WRITE so by default, the process isn't allowed
-        * to write those pages.
-        * gdb can break that with ptrace interface, and thus trigger COW on those
-        * pages but it's then your responsibility to never do that on the "data" page
-        * of the vDSO or you'll stop getting kernel updates and your nice userland
-        * gettimeofday will be totally dead. It's fine to use that for setting
-        * breakpoints in the vDSO code pages though
-        */
-       vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC | VM_RESERVED;
-       vma->vm_flags |= mm->def_flags;
-       vma->vm_page_prot = protection_map[vma->vm_flags & 0x7];
-       vma->vm_ops = &vdso_vmops;
-
-       down_write(&mm->mmap_sem);
-       if (insert_vm_struct(mm, vma)) {
-               up_write(&mm->mmap_sem);
-               kmem_cache_free(vm_area_cachep, vma);
-               return -ENOMEM;
-       }
-       mm->total_vm += (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
-       up_write(&mm->mmap_sem);
-
-       return 0;
-}
-
-static void * __init find_section32(Elf32_Ehdr *ehdr, const char *secname,
-                                 unsigned long *size)
-{
-       Elf32_Shdr *sechdrs;
-       unsigned int i;
-       char *secnames;
-
-       /* Grab section headers and strings so we can tell who is who */
-       sechdrs = (void *)ehdr + ehdr->e_shoff;
-       secnames = (void *)ehdr + sechdrs[ehdr->e_shstrndx].sh_offset;
-
-       /* Find the section they want */
-       for (i = 1; i < ehdr->e_shnum; i++) {
-               if (strcmp(secnames+sechdrs[i].sh_name, secname) == 0) {
-                       if (size)
-                               *size = sechdrs[i].sh_size;
-                       return (void *)ehdr + sechdrs[i].sh_offset;
-               }
-       }
-       *size = 0;
-       return NULL;
-}
-
-static void * __init find_section64(Elf64_Ehdr *ehdr, const char *secname,
-                                 unsigned long *size)
-{
-       Elf64_Shdr *sechdrs;
-       unsigned int i;
-       char *secnames;
-
-       /* Grab section headers and strings so we can tell who is who */
-       sechdrs = (void *)ehdr + ehdr->e_shoff;
-       secnames = (void *)ehdr + sechdrs[ehdr->e_shstrndx].sh_offset;
-
-       /* Find the section they want */
-       for (i = 1; i < ehdr->e_shnum; i++) {
-               if (strcmp(secnames+sechdrs[i].sh_name, secname) == 0) {
-                       if (size)
-                               *size = sechdrs[i].sh_size;
-                       return (void *)ehdr + sechdrs[i].sh_offset;
-               }
-       }
-       if (size)
-               *size = 0;
-       return NULL;
-}
-
-static Elf32_Sym * __init find_symbol32(struct lib32_elfinfo *lib, const char *symname)
-{
-       unsigned int i;
-       char name[32], *c;
-
-       for (i = 0; i < (lib->dynsymsize / sizeof(Elf32_Sym)); i++) {
-               if (lib->dynsym[i].st_name == 0)
-                       continue;
-               strlcpy(name, lib->dynstr + lib->dynsym[i].st_name, 32);
-               c = strchr(name, '@');
-               if (c)
-                       *c = 0;
-               if (strcmp(symname, name) == 0)
-                       return &lib->dynsym[i];
-       }
-       return NULL;
-}
-
-static Elf64_Sym * __init find_symbol64(struct lib64_elfinfo *lib, const char *symname)
-{
-       unsigned int i;
-       char name[32], *c;
-
-       for (i = 0; i < (lib->dynsymsize / sizeof(Elf64_Sym)); i++) {
-               if (lib->dynsym[i].st_name == 0)
-                       continue;
-               strlcpy(name, lib->dynstr + lib->dynsym[i].st_name, 32);
-               c = strchr(name, '@');
-               if (c)
-                       *c = 0;
-               if (strcmp(symname, name) == 0)
-                       return &lib->dynsym[i];
-       }
-       return NULL;
-}
-
-/* Note that we assume the section is .text and the symbol is relative to
- * the library base
- */
-static unsigned long __init find_function32(struct lib32_elfinfo *lib, const char *symname)
-{
-       Elf32_Sym *sym = find_symbol32(lib, symname);
-
-       if (sym == NULL) {
-               printk(KERN_WARNING "vDSO32: function %s not found !\n", symname);
-               return 0;
-       }
-       return sym->st_value - VDSO32_LBASE;
-}
-
-/* Note that we assume the section is .text and the symbol is relative to
- * the library base
- */
-static unsigned long __init find_function64(struct lib64_elfinfo *lib, const char *symname)
-{
-       Elf64_Sym *sym = find_symbol64(lib, symname);
-
-       if (sym == NULL) {
-               printk(KERN_WARNING "vDSO64: function %s not found !\n", symname);
-               return 0;
-       }
-#ifdef VDS64_HAS_DESCRIPTORS
-       return *((u64 *)(vdso64_kbase + sym->st_value - VDSO64_LBASE)) - VDSO64_LBASE;
-#else
-       return sym->st_value - VDSO64_LBASE;
-#endif
-}
-
-
-static __init int vdso_do_find_sections(struct lib32_elfinfo *v32,
-                                       struct lib64_elfinfo *v64)
-{
-       void *sect;
-
-       /*
-        * Locate symbol tables & text section
-        */
-
-       v32->dynsym = find_section32(v32->hdr, ".dynsym", &v32->dynsymsize);
-       v32->dynstr = find_section32(v32->hdr, ".dynstr", NULL);
-       if (v32->dynsym == NULL || v32->dynstr == NULL) {
-               printk(KERN_ERR "vDSO32: a required symbol section was not found\n");
-               return -1;
-       }
-       sect = find_section32(v32->hdr, ".text", NULL);
-       if (sect == NULL) {
-               printk(KERN_ERR "vDSO32: the .text section was not found\n");
-               return -1;
-       }
-       v32->text = sect - vdso32_kbase;
-
-       v64->dynsym = find_section64(v64->hdr, ".dynsym", &v64->dynsymsize);
-       v64->dynstr = find_section64(v64->hdr, ".dynstr", NULL);
-       if (v64->dynsym == NULL || v64->dynstr == NULL) {
-               printk(KERN_ERR "vDSO64: a required symbol section was not found\n");
-               return -1;
-       }
-       sect = find_section64(v64->hdr, ".text", NULL);
-       if (sect == NULL) {
-               printk(KERN_ERR "vDSO64: the .text section was not found\n");
-               return -1;
-       }
-       v64->text = sect - vdso64_kbase;
-
-       return 0;
-}
-
-static __init void vdso_setup_trampolines(struct lib32_elfinfo *v32,
-                                         struct lib64_elfinfo *v64)
-{
-       /*
-        * Find signal trampolines
-        */
-
-       vdso64_rt_sigtramp      = find_function64(v64, "__kernel_sigtramp_rt64");
-       vdso32_sigtramp         = find_function32(v32, "__kernel_sigtramp32");
-       vdso32_rt_sigtramp      = find_function32(v32, "__kernel_sigtramp_rt32");
-}
-
-static __init int vdso_fixup_datapage(struct lib32_elfinfo *v32,
-                                      struct lib64_elfinfo *v64)
-{
-       Elf32_Sym *sym32;
-       Elf64_Sym *sym64;
-
-       sym32 = find_symbol32(v32, "__kernel_datapage_offset");
-       if (sym32 == NULL) {
-               printk(KERN_ERR "vDSO32: Can't find symbol __kernel_datapage_offset !\n");
-               return -1;
-       }
-       *((int *)(vdso32_kbase + (sym32->st_value - VDSO32_LBASE))) =
-               (vdso32_pages << PAGE_SHIFT) - (sym32->st_value - VDSO32_LBASE);
-
-               sym64 = find_symbol64(v64, "__kernel_datapage_offset");
-       if (sym64 == NULL) {
-               printk(KERN_ERR "vDSO64: Can't find symbol __kernel_datapage_offset !\n");
-               return -1;
-       }
-       *((int *)(vdso64_kbase + sym64->st_value - VDSO64_LBASE)) =
-               (vdso64_pages << PAGE_SHIFT) - (sym64->st_value - VDSO64_LBASE);
-
-       return 0;
-}
-
-static int vdso_do_func_patch32(struct lib32_elfinfo *v32,
-                               struct lib64_elfinfo *v64,
-                               const char *orig, const char *fix)
-{
-       Elf32_Sym *sym32_gen, *sym32_fix;
-
-       sym32_gen = find_symbol32(v32, orig);
-       if (sym32_gen == NULL) {
-               printk(KERN_ERR "vDSO32: Can't find symbol %s !\n", orig);
-               return -1;
-       }
-       sym32_fix = find_symbol32(v32, fix);
-       if (sym32_fix == NULL) {
-               printk(KERN_ERR "vDSO32: Can't find symbol %s !\n", fix);
-               return -1;
-       }
-       sym32_gen->st_value = sym32_fix->st_value;
-       sym32_gen->st_size = sym32_fix->st_size;
-       sym32_gen->st_info = sym32_fix->st_info;
-       sym32_gen->st_other = sym32_fix->st_other;
-       sym32_gen->st_shndx = sym32_fix->st_shndx;
-
-       return 0;
-}
-
-static int vdso_do_func_patch64(struct lib32_elfinfo *v32,
-                               struct lib64_elfinfo *v64,
-                               const char *orig, const char *fix)
-{
-       Elf64_Sym *sym64_gen, *sym64_fix;
-
-       sym64_gen = find_symbol64(v64, orig);
-       if (sym64_gen == NULL) {
-               printk(KERN_ERR "vDSO64: Can't find symbol %s !\n", orig);
-               return -1;
-       }
-       sym64_fix = find_symbol64(v64, fix);
-       if (sym64_fix == NULL) {
-               printk(KERN_ERR "vDSO64: Can't find symbol %s !\n", fix);
-               return -1;
-       }
-       sym64_gen->st_value = sym64_fix->st_value;
-       sym64_gen->st_size = sym64_fix->st_size;
-       sym64_gen->st_info = sym64_fix->st_info;
-       sym64_gen->st_other = sym64_fix->st_other;
-       sym64_gen->st_shndx = sym64_fix->st_shndx;
-
-       return 0;
-}
-
-static __init int vdso_fixup_alt_funcs(struct lib32_elfinfo *v32,
-                                      struct lib64_elfinfo *v64)
-{
-       u32 pvr;
-       int i;
-
-       pvr = mfspr(SPRN_PVR);
-       for (i = 0; i < ARRAY_SIZE(vdso_patches); i++) {
-               struct vdso_patch_def *patch = &vdso_patches[i];
-               int match = (pvr & patch->pvr_mask) == patch->pvr_value;
-
-               DBG("patch %d (mask: %x, pvr: %x) : %s\n",
-                   i, patch->pvr_mask, patch->pvr_value, match ? "match" : "skip");
-
-               if (!match)
-                       continue;
-
-               DBG("replacing %s with %s...\n", patch->gen_name, patch->fix_name);
-
-               /*
-                * Patch the 32 bits and 64 bits symbols. Note that we do not patch
-                * the "." symbol on 64 bits. It would be easy to do, but doesn't
-                * seem to be necessary, patching the OPD symbol is enough.
-                */
-               vdso_do_func_patch32(v32, v64, patch->gen_name, patch->fix_name);
-               vdso_do_func_patch64(v32, v64, patch->gen_name, patch->fix_name);
-       }
-
-       return 0;
-}
-
-
-static __init int vdso_setup(void)
-{
-       struct lib32_elfinfo    v32;
-       struct lib64_elfinfo    v64;
-
-       v32.hdr = vdso32_kbase;
-       v64.hdr = vdso64_kbase;
-
-       if (vdso_do_find_sections(&v32, &v64))
-               return -1;
-
-       if (vdso_fixup_datapage(&v32, &v64))
-               return -1;
-
-       if (vdso_fixup_alt_funcs(&v32, &v64))
-               return -1;
-
-       vdso_setup_trampolines(&v32, &v64);
-
-       return 0;
-}
-
-void __init vdso_init(void)
-{
-       int i;
-
-       vdso64_pages = (&vdso64_end - &vdso64_start) >> PAGE_SHIFT;
-       vdso32_pages = (&vdso32_end - &vdso32_start) >> PAGE_SHIFT;
-
-       DBG("vdso64_kbase: %p, 0x%x pages, vdso32_kbase: %p, 0x%x pages\n",
-              vdso64_kbase, vdso64_pages, vdso32_kbase, vdso32_pages);
-
-       /*
-        * Initialize the vDSO images in memory, that is do necessary
-        * fixups of vDSO symbols, locate trampolines, etc...
-        */
-       if (vdso_setup()) {
-               printk(KERN_ERR "vDSO setup failure, not enabled !\n");
-               /* XXX should free pages here ? */
-               vdso64_pages = vdso32_pages = 0;
-               return;
-       }
-
-       /* Make sure pages are in the correct state */
-       for (i = 0; i < vdso64_pages; i++) {
-               struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
-               ClearPageReserved(pg);
-               get_page(pg);
-       }
-       for (i = 0; i < vdso32_pages; i++) {
-               struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
-               ClearPageReserved(pg);
-               get_page(pg);
-       }
-
-       get_page(virt_to_page(_systemcfg));
-}
-
-int in_gate_area_no_task(unsigned long addr)
-{
-       return 0;
-}
-
-int in_gate_area(struct task_struct *task, unsigned long addr)
-{
-       return 0;
-}
-
-struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
-{
-       return NULL;
-}
-
diff --git a/arch/ppc64/kernel/vmlinux.lds.S b/arch/ppc64/kernel/vmlinux.lds.S
deleted file mode 100644 (file)
index 022f220..0000000
+++ /dev/null
@@ -1,151 +0,0 @@
-#include <asm/page.h>
-#include <asm-generic/vmlinux.lds.h>
-
-OUTPUT_ARCH(powerpc:common64)
-jiffies = jiffies_64;
-SECTIONS
-{
-  /* Sections to be discarded. */
-  /DISCARD/ : {
-       *(.exitcall.exit)
-       }
-
-
-  /* Read-only sections, merged into text segment: */
-  .text : {
-       *(.text .text.*)
-       SCHED_TEXT
-       LOCK_TEXT
-       KPROBES_TEXT
-       *(.fixup)
-       . = ALIGN(PAGE_SIZE);
-       _etext = .;
-       }
-
-  __ex_table : {
-       __start___ex_table = .;
-       *(__ex_table)
-       __stop___ex_table = .;
-       }
-
-  __bug_table : {
-       __start___bug_table = .;
-       *(__bug_table)
-       __stop___bug_table = .;
-       }
-
-  __ftr_fixup : {
-       __start___ftr_fixup = .;
-       *(__ftr_fixup)
-       __stop___ftr_fixup = .;
-       }
-
-  RODATA
-
-
-  /* will be freed after init */
-  . = ALIGN(PAGE_SIZE);
-  __init_begin = .;
-
-  .init.text : {
-       _sinittext = .;
-       *(.init.text)
-       _einittext = .;
-       }
-
-  .init.data : {
-       *(.init.data)
-       }
-
-  . = ALIGN(16);
-  .init.setup : {
-       __setup_start = .;
-       *(.init.setup)
-       __setup_end = .;
-       }
-
-  .initcall.init : {
-       __initcall_start = .;
-       *(.initcall1.init)
-       *(.initcall2.init)
-       *(.initcall3.init)
-       *(.initcall4.init)
-       *(.initcall5.init)
-       *(.initcall6.init)
-       *(.initcall7.init)
-       __initcall_end = .;
-       }
-
-  .con_initcall.init : {
-       __con_initcall_start = .;
-       *(.con_initcall.init)
-       __con_initcall_end = .;
-       }
-
-  SECURITY_INIT
-
-  . = ALIGN(PAGE_SIZE);
-  .init.ramfs : {
-       __initramfs_start = .;
-       *(.init.ramfs)
-       __initramfs_end = .;
-       }
-
-  .data.percpu : {
-       __per_cpu_start = .;
-       *(.data.percpu)
-       __per_cpu_end = .;
-       }
-
-  . = ALIGN(PAGE_SIZE);
-  . = ALIGN(16384);
-  __init_end = .;
-  /* freed after init ends here */
-
-
-  /* Read/write sections */
-  . = ALIGN(PAGE_SIZE);
-  . = ALIGN(16384);
-  _sdata = .;
-  /* The initial task and kernel stack */
-  .data.init_task : {
-       *(.data.init_task)
-       }
-
-  . = ALIGN(PAGE_SIZE);
-  .data.page_aligned : {
-       *(.data.page_aligned)
-       }
-
-  .data.cacheline_aligned : {
-       *(.data.cacheline_aligned)
-       }
-
-  .data : {
-       *(.data .data.rel* .toc1)
-       *(.branch_lt)
-       }
-
-  .opd : {
-       *(.opd)
-       }
-
-  .got : {
-       __toc_start = .;
-       *(.got)
-       *(.toc)
-       . = ALIGN(PAGE_SIZE);
-       _edata = .;
-       }
-
-
-  . = ALIGN(PAGE_SIZE);
-  .bss : {
-       __bss_start = .;
-       *(.bss)
-       __bss_stop = .;
-       }
-
-  . = ALIGN(PAGE_SIZE);
-  _end = . ;
-}
diff --git a/arch/ppc64/xmon/privinst.h b/arch/ppc64/xmon/privinst.h
deleted file mode 100644 (file)
index 02eb40d..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (C) 1996 Paul Mackerras.
- *
- *      This program is free software; you can redistribute it and/or
- *      modify it under the terms of the GNU General Public License
- *      as published by the Free Software Foundation; either version
- *      2 of the License, or (at your option) any later version.
- */
-
-#define GETREG(reg)            \
-    static inline unsigned long get_ ## reg (void)     \
-       { unsigned long ret; asm volatile ("mf" #reg " %0" : "=r" (ret) :); return ret; }
-
-#define SETREG(reg)            \
-    static inline void set_ ## reg (unsigned long val) \
-       { asm volatile ("mt" #reg " %0" : : "r" (val)); }
-
-GETREG(msr)
-SETREG(msrd)
-GETREG(cr)
-
-#define GSETSPR(n, name)       \
-    static inline long get_ ## name (void) \
-       { long ret; asm volatile ("mfspr %0," #n : "=r" (ret) : ); return ret; } \
-    static inline void set_ ## name (long val) \
-       { asm volatile ("mtspr " #n ",%0" : : "r" (val)); }
-
-GSETSPR(0, mq)
-GSETSPR(1, xer)
-GSETSPR(4, rtcu)
-GSETSPR(5, rtcl)
-GSETSPR(8, lr)
-GSETSPR(9, ctr)
-GSETSPR(18, dsisr)
-GSETSPR(19, dar)
-GSETSPR(22, dec)
-GSETSPR(25, sdr1)
-GSETSPR(26, srr0)
-GSETSPR(27, srr1)
-GSETSPR(272, sprg0)
-GSETSPR(273, sprg1)
-GSETSPR(274, sprg2)
-GSETSPR(275, sprg3)
-GSETSPR(282, ear)
-GSETSPR(287, pvr)
-GSETSPR(1008, hid0)
-GSETSPR(1009, hid1)
-GSETSPR(1010, iabr)
-GSETSPR(1023, pir)
-
-static inline void store_inst(void *p)
-{
-       asm volatile ("dcbst 0,%0; sync; icbi 0,%0; isync" : : "r" (p));
-}
-
-static inline void cflush(void *p)
-{
-       asm volatile ("dcbf 0,%0; icbi 0,%0" : : "r" (p));
-}
-
-static inline void cinval(void *p)
-{
-       asm volatile ("dcbi 0,%0; icbi 0,%0" : : "r" (p));
-}
diff --git a/include/asm-ppc64/page.h b/include/asm-ppc64/page.h
deleted file mode 100644 (file)
index 3efc328..0000000
+++ /dev/null
@@ -1,328 +0,0 @@
-#ifndef _PPC64_PAGE_H
-#define _PPC64_PAGE_H
-
-/*
- * Copyright (C) 2001 PPC64 Team, IBM Corp
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/config.h>
-#include <asm/asm-compat.h>
-
-/*
- * We support either 4k or 64k software page size. When using 64k pages
- * however, wether we are really supporting 64k pages in HW or not is
- * irrelevant to those definitions. We always define HW_PAGE_SHIFT to 12
- * as use of 64k pages remains a linux kernel specific, every notion of
- * page number shared with the firmware, TCEs, iommu, etc... still assumes
- * a page size of 4096.
- */
-#ifdef CONFIG_PPC_64K_PAGES
-#define PAGE_SHIFT             16
-#else
-#define PAGE_SHIFT             12
-#endif
-
-#define PAGE_SIZE              (ASM_CONST(1) << PAGE_SHIFT)
-#define PAGE_MASK              (~(PAGE_SIZE-1))
-
-/* HW_PAGE_SHIFT is always 4k pages */
-#define HW_PAGE_SHIFT          12
-#define HW_PAGE_SIZE           (ASM_CONST(1) << HW_PAGE_SHIFT)
-#define HW_PAGE_MASK           (~(HW_PAGE_SIZE-1))
-
-/* PAGE_FACTOR is the number of bits factor between PAGE_SHIFT and
- * HW_PAGE_SHIFT, that is 4k pages
- */
-#define PAGE_FACTOR            (PAGE_SHIFT - HW_PAGE_SHIFT)
-
-/* Segment size */
-#define SID_SHIFT              28
-#define SID_MASK               0xfffffffffUL
-#define ESID_MASK              0xfffffffff0000000UL
-#define GET_ESID(x)            (((x) >> SID_SHIFT) & SID_MASK)
-
-/* Large pages size */
-
-#ifndef __ASSEMBLY__
-extern unsigned int HPAGE_SHIFT;
-#define HPAGE_SIZE             ((1UL) << HPAGE_SHIFT)
-#define HPAGE_MASK             (~(HPAGE_SIZE - 1))
-#define HUGETLB_PAGE_ORDER     (HPAGE_SHIFT - PAGE_SHIFT)
-#endif /* __ASSEMBLY__ */
-
-#ifdef CONFIG_HUGETLB_PAGE
-
-
-#define HTLB_AREA_SHIFT                40
-#define HTLB_AREA_SIZE         (1UL << HTLB_AREA_SHIFT)
-#define GET_HTLB_AREA(x)       ((x) >> HTLB_AREA_SHIFT)
-
-#define LOW_ESID_MASK(addr, len)    (((1U << (GET_ESID(addr+len-1)+1)) \
-                                     - (1U << GET_ESID(addr))) & 0xffff)
-#define HTLB_AREA_MASK(addr, len)   (((1U << (GET_HTLB_AREA(addr+len-1)+1)) \
-                                     - (1U << GET_HTLB_AREA(addr))) & 0xffff)
-
-#define ARCH_HAS_HUGEPAGE_ONLY_RANGE
-#define ARCH_HAS_PREPARE_HUGEPAGE_RANGE
-#define ARCH_HAS_SETCLEAR_HUGE_PTE
-
-#define touches_hugepage_low_range(mm, addr, len) \
-       (LOW_ESID_MASK((addr), (len)) & (mm)->context.low_htlb_areas)
-#define touches_hugepage_high_range(mm, addr, len) \
-       (HTLB_AREA_MASK((addr), (len)) & (mm)->context.high_htlb_areas)
-
-#define __within_hugepage_low_range(addr, len, segmask) \
-       ((LOW_ESID_MASK((addr), (len)) | (segmask)) == (segmask))
-#define within_hugepage_low_range(addr, len) \
-       __within_hugepage_low_range((addr), (len), \
-                                   current->mm->context.low_htlb_areas)
-#define __within_hugepage_high_range(addr, len, zonemask) \
-       ((HTLB_AREA_MASK((addr), (len)) | (zonemask)) == (zonemask))
-#define within_hugepage_high_range(addr, len) \
-       __within_hugepage_high_range((addr), (len), \
-                                   current->mm->context.high_htlb_areas)
-
-#define is_hugepage_only_range(mm, addr, len) \
-       (touches_hugepage_high_range((mm), (addr), (len)) || \
-         touches_hugepage_low_range((mm), (addr), (len)))
-#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
-
-#define in_hugepage_area(context, addr) \
-       (cpu_has_feature(CPU_FTR_16M_PAGE) && \
-        ( ((1 << GET_HTLB_AREA(addr)) & (context).high_htlb_areas) || \
-          ( ((addr) < 0x100000000L) && \
-            ((1 << GET_ESID(addr)) & (context).low_htlb_areas) ) ) )
-
-#else /* !CONFIG_HUGETLB_PAGE */
-
-#define in_hugepage_area(mm, addr)     0
-
-#endif /* !CONFIG_HUGETLB_PAGE */
-
-/* align addr on a size boundary - adjust address up/down if needed */
-#define _ALIGN_UP(addr,size)   (((addr)+((size)-1))&(~((size)-1)))
-#define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1)))
-
-/* align addr on a size boundary - adjust address up if needed */
-#define _ALIGN(addr,size)     _ALIGN_UP(addr,size)
-
-/* to align the pointer to the (next) page boundary */
-#define PAGE_ALIGN(addr)       _ALIGN(addr, PAGE_SIZE)
-
-#ifdef __KERNEL__
-#ifndef __ASSEMBLY__
-#include <asm/cache.h>
-
-#undef STRICT_MM_TYPECHECKS
-
-#define REGION_SIZE   4UL
-#define REGION_SHIFT  60UL
-#define REGION_MASK   (((1UL<<REGION_SIZE)-1UL)<<REGION_SHIFT)
-
-static __inline__ void clear_page(void *addr)
-{
-       unsigned long lines, line_size;
-
-       line_size = ppc64_caches.dline_size;
-       lines = ppc64_caches.dlines_per_page;
-
-       __asm__ __volatile__(
-       "mtctr          %1      # clear_page\n\
-1:      dcbz   0,%0\n\
-       add     %0,%0,%3\n\
-       bdnz+   1b"
-        : "=r" (addr)
-        : "r" (lines), "0" (addr), "r" (line_size)
-       : "ctr", "memory");
-}
-
-extern void copy_4K_page(void *to, void *from);
-
-#ifdef CONFIG_PPC_64K_PAGES
-static inline void copy_page(void *to, void *from)
-{
-       unsigned int i;
-       for (i=0; i < (1 << (PAGE_SHIFT - 12)); i++) {
-               copy_4K_page(to, from);
-               to += 4096;
-               from += 4096;
-       }
-}
-#else /* CONFIG_PPC_64K_PAGES */
-static inline void copy_page(void *to, void *from)
-{
-       copy_4K_page(to, from);
-}
-#endif /* CONFIG_PPC_64K_PAGES */
-
-struct page;
-extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
-extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *p);
-
-#ifdef STRICT_MM_TYPECHECKS
-/*
- * These are used to make use of C type-checking.  
- * Entries in the pte table are 64b, while entries in the pgd & pmd are 32b.
- */
-
-/* PTE level */
-typedef struct { unsigned long pte; } pte_t;
-#define pte_val(x)     ((x).pte)
-#define __pte(x)       ((pte_t) { (x) })
-
-/* 64k pages additionally define a bigger "real PTE" type that gathers
- * the "second half" part of the PTE for pseudo 64k pages
- */
-#ifdef CONFIG_PPC_64K_PAGES
-typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
-#else
-typedef struct { pte_t pte; } real_pte_t;
-#endif
-
-/* PMD level */
-typedef struct { unsigned long pmd; } pmd_t;
-#define pmd_val(x)     ((x).pmd)
-#define __pmd(x)       ((pmd_t) { (x) })
-
-/* PUD level exusts only on 4k pages */
-#ifndef CONFIG_PPC_64K_PAGES
-typedef struct { unsigned long pud; } pud_t;
-#define pud_val(x)     ((x).pud)
-#define __pud(x)       ((pud_t) { (x) })
-#endif
-
-/* PGD level */
-typedef struct { unsigned long pgd; } pgd_t;
-#define pgd_val(x)     ((x).pgd)
-#define __pgd(x)       ((pgd_t) { (x) })
-
-/* Page protection bits */
-typedef struct { unsigned long pgprot; } pgprot_t;
-#define pgprot_val(x)  ((x).pgprot)
-#define __pgprot(x)    ((pgprot_t) { (x) })
-
-#else
-
-/*
- * .. while these make it easier on the compiler
- */
-
-typedef unsigned long pte_t;
-#define pte_val(x)     (x)
-#define __pte(x)       (x)
-
-#ifdef CONFIG_PPC_64K_PAGES
-typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
-#else
-typedef unsigned long real_pte_t;
-#endif
-
-
-typedef unsigned long pmd_t;
-#define pmd_val(x)     (x)
-#define __pmd(x)       (x)
-
-#ifndef CONFIG_PPC_64K_PAGES
-typedef unsigned long pud_t;
-#define pud_val(x)     (x)
-#define __pud(x)       (x)
-#endif
-
-typedef unsigned long pgd_t;
-#define pgd_val(x)     (x)
-#define pgprot_val(x)  (x)
-
-typedef unsigned long pgprot_t;
-#define __pgd(x)       (x)
-#define __pgprot(x)    (x)
-
-#endif
-
-#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
-
-extern int page_is_ram(unsigned long pfn);
-
-extern u64 ppc64_pft_size;             /* Log 2 of page table size */
-
-/* We do define AT_SYSINFO_EHDR but don't use the gate mecanism */
-#define __HAVE_ARCH_GATE_AREA          1
-
-#endif /* __ASSEMBLY__ */
-
-#ifdef MODULE
-#define __page_aligned __attribute__((__aligned__(PAGE_SIZE)))
-#else
-#define __page_aligned \
-       __attribute__((__aligned__(PAGE_SIZE), \
-               __section__(".data.page_aligned")))
-#endif
-
-
-/* This must match the -Ttext linker address            */
-/* Note: tophys & tovirt make assumptions about how     */
-/*       KERNELBASE is defined for performance reasons. */
-/*       When KERNELBASE moves, those macros may have   */
-/*             to change!                               */
-#define PAGE_OFFSET     ASM_CONST(0xC000000000000000)
-#define KERNELBASE      PAGE_OFFSET
-#define VMALLOCBASE     ASM_CONST(0xD000000000000000)
-
-#define VMALLOC_REGION_ID  (VMALLOCBASE >> REGION_SHIFT)
-#define KERNEL_REGION_ID   (KERNELBASE >> REGION_SHIFT)
-#define USER_REGION_ID     (0UL)
-#define REGION_ID(ea)     (((unsigned long)(ea)) >> REGION_SHIFT)
-
-#define __va(x) ((void *)((unsigned long)(x) + KERNELBASE))
-
-#ifdef CONFIG_FLATMEM
-#define pfn_to_page(pfn)       (mem_map + (pfn))
-#define page_to_pfn(page)      ((unsigned long)((page) - mem_map))
-#define pfn_valid(pfn)         ((pfn) < max_mapnr)
-#endif
-
-#define virt_to_page(kaddr)    pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-#define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
-
-#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
-
-/*
- * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
- * and needs to be executable.  This means the whole heap ends
- * up being executable.
- */
-#define VM_DATA_DEFAULT_FLAGS32        (VM_READ | VM_WRITE | VM_EXEC | \
-                                VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
-#define VM_DATA_DEFAULT_FLAGS64        (VM_READ | VM_WRITE | \
-                                VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
-#define VM_DATA_DEFAULT_FLAGS \
-       (test_thread_flag(TIF_32BIT) ? \
-        VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)
-
-/*
- * This is the default if a program doesn't have a PT_GNU_STACK
- * program header entry. The PPC64 ELF ABI has a non executable stack
- * stack by default, so in the absense of a PT_GNU_STACK program header
- * we turn execute permission off.
- */
-#define VM_STACK_DEFAULT_FLAGS32       (VM_READ | VM_WRITE | VM_EXEC | \
-                                        VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
-#define VM_STACK_DEFAULT_FLAGS64       (VM_READ | VM_WRITE | \
-                                        VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
-#define VM_STACK_DEFAULT_FLAGS \
-       (test_thread_flag(TIF_32BIT) ? \
-        VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
-
-#endif /* __KERNEL__ */
-
-#include <asm-generic/page.h>
-
-#endif /* _PPC64_PAGE_H */