aboutsummaryrefslogtreecommitdiff
path: root/arch/ia64/include/asm/io.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/include/asm/io.h')
-rw-r--r--arch/ia64/include/asm/io.h45
1 files changed, 15 insertions, 30 deletions
diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h
index 260a85ac9d6..0d2bcb37ec3 100644
--- a/arch/ia64/include/asm/io.h
+++ b/arch/ia64/include/asm/io.h
@@ -19,6 +19,8 @@
* Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
*/
+#include <asm/unaligned.h>
+
/* We don't use IO slowdowns on the ia64, but.. */
#define __SLOW_DOWN_IO do { } while (0)
#define SLOW_DOWN_IO do { } while (0)
@@ -69,7 +71,6 @@ extern unsigned int num_io_spaces;
#include <asm/intrinsics.h>
#include <asm/machvec.h>
#include <asm/page.h>
-#include <asm/system.h>
#include <asm-generic/iomap.h>
/*
@@ -89,7 +90,7 @@ phys_to_virt (unsigned long address)
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
extern u64 kern_mem_attribute (unsigned long phys_addr, unsigned long size);
-extern int valid_phys_addr_range (unsigned long addr, size_t count); /* efi.c */
+extern int valid_phys_addr_range (phys_addr_t addr, size_t count); /* efi.c */
extern int valid_mmap_phys_addr_range (unsigned long pfn, size_t count);
/*
@@ -241,7 +242,7 @@ __insw (unsigned long port, void *dst, unsigned long count)
unsigned short *dp = dst;
while (count--)
- *dp++ = platform_inw(port);
+ put_unaligned(platform_inw(port), dp++);
}
static inline void
@@ -250,7 +251,7 @@ __insl (unsigned long port, void *dst, unsigned long count)
unsigned int *dp = dst;
while (count--)
- *dp++ = platform_inl(port);
+ put_unaligned(platform_inl(port), dp++);
}
static inline void
@@ -268,7 +269,7 @@ __outsw (unsigned long port, const void *src, unsigned long count)
const unsigned short *sp = src;
while (count--)
- platform_outw(*sp++, port);
+ platform_outw(get_unaligned(sp++), port);
}
static inline void
@@ -277,7 +278,7 @@ __outsl (unsigned long port, const void *src, unsigned long count)
const unsigned int *sp = src;
while (count--)
- platform_outl(*sp++, port);
+ platform_outl(get_unaligned(sp++), port);
}
/*
@@ -422,6 +423,14 @@ __writeq (unsigned long val, volatile void __iomem *addr)
extern void __iomem * ioremap(unsigned long offset, unsigned long size);
extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
extern void iounmap (volatile void __iomem *addr);
+extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size);
+#define early_memremap(phys_addr, size) early_ioremap(phys_addr, size)
+extern void early_iounmap (volatile void __iomem *addr, unsigned long size);
+static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned long size)
+{
+ return ioremap(phys_addr, size);
+}
+
/*
* String version of IO memory access ops:
@@ -432,28 +441,4 @@ extern void memset_io(volatile void __iomem *s, int c, long n);
# endif /* __KERNEL__ */
-/*
- * Enabling BIO_VMERGE_BOUNDARY forces us to turn off I/O MMU bypassing. It is said that
- * BIO-level virtual merging can give up to 4% performance boost (not verified for ia64).
- * On the other hand, we know that I/O MMU bypassing gives ~8% performance improvement on
- * SPECweb-like workloads on zx1-based machines. Thus, for now we favor I/O MMU bypassing
- * over BIO-level virtual merging.
- */
-extern unsigned long ia64_max_iommu_merge_mask;
-#if 1
-#define BIO_VMERGE_BOUNDARY 0
-#else
-/*
- * It makes no sense at all to have this BIO_VMERGE_BOUNDARY macro here. Should be
- * replaced by dma_merge_mask() or something of that sort. Note: the only way
- * BIO_VMERGE_BOUNDARY is used is to mask off bits. Effectively, our definition gets
- * expanded into:
- *
- * addr & ((ia64_max_iommu_merge_mask + 1) - 1) == (addr & ia64_max_iommu_vmerge_mask)
- *
- * which is precisely what we want.
- */
-#define BIO_VMERGE_BOUNDARY (ia64_max_iommu_merge_mask + 1)
-#endif
-
#endif /* _ASM_IA64_IO_H */