From 4cb3cee03d558fd457cb58f56c80a2a09a66110c Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Sat, 11 Nov 2006 17:25:10 +1100 Subject: [POWERPC] Allow hooking of PCI MMIO & PIO accessors on 64 bits This patch reworks the way iSeries hooks on PCI IO operations (both MMIO and PIO) and provides a generic way for other platforms to do so (we have need to do that for various other platforms). While reworking the IO ops, I ended up doing some spring cleaning in io.h and eeh.h which I might want to split into 2 or 3 patches (among others, eeh.h had a lot of useless stuff in it). A side effect is that EEH for PIO should work now (it used to pass IO ports down to the eeh address check functions which is bogus). Also, new are MMIO "repeat" ops, which other archs like ARM already had, and that we have too now: readsb, readsw, readsl, writesb, writesw, writesl. In the long run, I might also make EEH use the hooks instead of wrapping at the toplevel, which would make things even cleaner and relegate EEH completely in platforms/iseries, but we have to measure the performance impact there (though it's really only on MMIO reads) Since I also need to hook on ioremap, I shuffled the functions a bit there. I introduced ioremap_flags() to use by drivers who want to pass explicit flags to ioremap (and it can be hooked). The old __ioremap() is still there as a low level and cannot be hooked, thus drivers who use it should migrate unless they know they want the low level version. The patch "arch provides generic iomap missing accessors" (should be number 4 in this series) is a pre-requisite to provide full iomap API support with this patch. Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- arch/powerpc/mm/pgtable_64.c | 46 +++++++++++++++++++++++++++++--------------- 1 file changed, 31 insertions(+), 15 deletions(-) (limited to 'arch/powerpc/mm/pgtable_64.c') diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index ac64f4aaa50..e9b21846ccb 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -129,22 +129,12 @@ static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa, return (void __iomem *) (ea + (addr & ~PAGE_MASK)); } - -void __iomem * -ioremap(unsigned long addr, unsigned long size) -{ - return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED); -} - void __iomem * __ioremap(unsigned long addr, unsigned long size, unsigned long flags) { unsigned long pa, ea; void __iomem *ret; - if (firmware_has_feature(FW_FEATURE_ISERIES)) - return (void __iomem *)addr; - /* * Choose an address to map it to. * Once the imalloc system is running, we use it. @@ -178,6 +168,25 @@ void __iomem * __ioremap(unsigned long addr, unsigned long size, return ret; } + +void __iomem * ioremap(unsigned long addr, unsigned long size) +{ + unsigned long flags = _PAGE_NO_CACHE | _PAGE_GUARDED; + + if (ppc_md.ioremap) + return ppc_md.ioremap(addr, size, flags); + return __ioremap(addr, size, flags); +} + +void __iomem * ioremap_flags(unsigned long addr, unsigned long size, + unsigned long flags) +{ + if (ppc_md.ioremap) + return ppc_md.ioremap(addr, size, flags); + return __ioremap(addr, size, flags); +} + + #define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK)) int __ioremap_explicit(unsigned long pa, unsigned long ea, @@ -235,13 +244,10 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea, * * XXX what about calls before mem_init_done (ie python_countermeasures()) */ -void iounmap(volatile void __iomem *token) +void __iounmap(void __iomem *token) { void *addr; - if (firmware_has_feature(FW_FEATURE_ISERIES)) - return; - if (!mem_init_done) return; @@ -250,6 +256,14 @@ void iounmap(volatile void __iomem *token) im_free(addr); } +void iounmap(void __iomem *token) +{ + if (ppc_md.iounmap) + ppc_md.iounmap(token); + else + __iounmap(token); +} + static int iounmap_subset_regions(unsigned long addr, unsigned long size) { struct vm_struct *area; @@ -268,7 +282,7 @@ static int iounmap_subset_regions(unsigned long addr, unsigned long size) return 0; } -int iounmap_explicit(volatile void __iomem *start, unsigned long size) +int __iounmap_explicit(void __iomem *start, unsigned long size) { struct vm_struct *area; unsigned long addr; @@ -303,8 +317,10 @@ int iounmap_explicit(volatile void __iomem *start, unsigned long size) } EXPORT_SYMBOL(ioremap); +EXPORT_SYMBOL(ioremap_flags); EXPORT_SYMBOL(__ioremap); EXPORT_SYMBOL(iounmap); +EXPORT_SYMBOL(__iounmap); void __iomem * reserve_phb_iospace(unsigned long size) { -- cgit v1.2.3-18-g5258 From 68a64357d15ae4f596e92715719071952006e83c Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Mon, 13 Nov 2006 09:27:39 +1100 Subject: [POWERPC] Merge 32 and 64 bits asm-powerpc/io.h powerpc: Merge 32 and 64 bits asm-powerpc/io.h The rework on io.h done for the new hookable accessors made it easier, so I just finished the work and merged 32 and 64 bits io.h for arch/powerpc. arch/ppc still uses the old version in asm-ppc, there is just too much gunk in there that I really can't be bothered trying to cleanup. Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- arch/powerpc/mm/pgtable_64.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'arch/powerpc/mm/pgtable_64.c') diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index e9b21846ccb..16e4ee1c231 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -113,7 +113,7 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags) } -static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa, +static void __iomem * __ioremap_com(phys_addr_t addr, unsigned long pa, unsigned long ea, unsigned long size, unsigned long flags) { @@ -129,7 +129,7 @@ static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa, return (void __iomem *) (ea + (addr & ~PAGE_MASK)); } -void __iomem * __ioremap(unsigned long addr, unsigned long size, +void __iomem * __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags) { unsigned long pa, ea; @@ -169,7 +169,7 @@ void __iomem * __ioremap(unsigned long addr, unsigned long size, } -void __iomem * ioremap(unsigned long addr, unsigned long size) +void __iomem * ioremap(phys_addr_t addr, unsigned long size) { unsigned long flags = _PAGE_NO_CACHE | _PAGE_GUARDED; @@ -178,7 +178,7 @@ void __iomem * ioremap(unsigned long addr, unsigned long size) return __ioremap(addr, size, flags); } -void __iomem * ioremap_flags(unsigned long addr, unsigned long size, +void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags) { if (ppc_md.ioremap) @@ -189,7 +189,7 @@ void __iomem * ioremap_flags(unsigned long addr, unsigned long size, #define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK)) -int __ioremap_explicit(unsigned long pa, unsigned long ea, +int __ioremap_explicit(phys_addr_t pa, unsigned long ea, unsigned long size, unsigned long flags) { struct vm_struct *area; @@ -244,7 +244,7 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea, * * XXX what about calls before mem_init_done (ie python_countermeasures()) */ -void __iounmap(void __iomem *token) +void __iounmap(volatile void __iomem *token) { void *addr; @@ -256,7 +256,7 @@ void __iounmap(void __iomem *token) im_free(addr); } -void iounmap(void __iomem *token) +void iounmap(volatile void __iomem *token) { if (ppc_md.iounmap) ppc_md.iounmap(token); @@ -282,7 +282,7 @@ static int iounmap_subset_regions(unsigned long addr, unsigned long size) return 0; } -int __iounmap_explicit(void __iomem *start, unsigned long size) +int __iounmap_explicit(volatile void __iomem *start, unsigned long size) { struct vm_struct *area; unsigned long addr; -- cgit v1.2.3-18-g5258