aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/include/asm/io.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm/io.h')
-rw-r--r--arch/arm/include/asm/io.h108
1 files changed, 96 insertions, 12 deletions
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 815c669fec0..3d23418cbdd 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -24,9 +24,11 @@
#ifdef __KERNEL__
#include <linux/types.h>
+#include <linux/blk_types.h>
#include <asm/byteorder.h>
#include <asm/memory.h>
#include <asm-generic/pci_iomap.h>
+#include <xen/xen.h>
/*
* ISA I/O bus memory addresses are 1:1 with the physical address.
@@ -36,6 +38,12 @@
#define isa_bus_to_virt phys_to_virt
/*
+ * Atomic MMIO-wide IO modify
+ */
+extern void atomic_io_modify(void __iomem *reg, u32 mask, u32 set);
+extern void atomic_io_modify_relaxed(void __iomem *reg, u32 mask, u32 set);
+
+/*
* Generic IO read/write. These perform native-endian accesses. Note
* that some architectures will want to re-define __raw_{read,write}w.
*/
@@ -47,13 +55,68 @@ extern void __raw_readsb(const void __iomem *addr, void *data, int bytelen);
extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen);
extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
-#define __raw_writeb(v,a) ((void)(__chk_io_ptr(a), *(volatile unsigned char __force *)(a) = (v)))
-#define __raw_writew(v,a) ((void)(__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v)))
-#define __raw_writel(v,a) ((void)(__chk_io_ptr(a), *(volatile unsigned int __force *)(a) = (v)))
+#if __LINUX_ARM_ARCH__ < 6
+/*
+ * Half-word accesses are problematic with RiscPC due to limitations of
+ * the bus. Rather than special-case the machine, just let the compiler
+ * generate the access for CPUs prior to ARMv6.
+ */
+#define __raw_readw(a) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a))
+#define __raw_writew(v,a) ((void)(__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v)))
+#else
+/*
+ * When running under a hypervisor, we want to avoid I/O accesses with
+ * writeback addressing modes as these incur a significant performance
+ * overhead (the address generation must be emulated in software).
+ */
+static inline void __raw_writew(u16 val, volatile void __iomem *addr)
+{
+ asm volatile("strh %1, %0"
+ : "+Q" (*(volatile u16 __force *)addr)
+ : "r" (val));
+}
+
+static inline u16 __raw_readw(const volatile void __iomem *addr)
+{
+ u16 val;
+ asm volatile("ldrh %1, %0"
+ : "+Q" (*(volatile u16 __force *)addr),
+ "=r" (val));
+ return val;
+}
+#endif
-#define __raw_readb(a) (__chk_io_ptr(a), *(volatile unsigned char __force *)(a))
-#define __raw_readw(a) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a))
-#define __raw_readl(a) (__chk_io_ptr(a), *(volatile unsigned int __force *)(a))
+static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
+{
+ asm volatile("strb %1, %0"
+ : "+Qo" (*(volatile u8 __force *)addr)
+ : "r" (val));
+}
+
+static inline void __raw_writel(u32 val, volatile void __iomem *addr)
+{
+ asm volatile("str %1, %0"
+ : "+Qo" (*(volatile u32 __force *)addr)
+ : "r" (val));
+}
+
+static inline u8 __raw_readb(const volatile void __iomem *addr)
+{
+ u8 val;
+ asm volatile("ldrb %1, %0"
+ : "+Qo" (*(volatile u8 __force *)addr),
+ "=r" (val));
+ return val;
+}
+
+static inline u32 __raw_readl(const volatile void __iomem *addr)
+{
+ u32 val;
+ asm volatile("ldr %1, %0"
+ : "+Qo" (*(volatile u32 __force *)addr),
+ "=r" (val));
+ return val;
+}
/*
* Architecture ioremap implementation.
@@ -75,16 +138,16 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
*/
extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long,
size_t, unsigned int, void *);
-extern void __iomem *__arm_ioremap_caller(unsigned long, size_t, unsigned int,
+extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int,
void *);
extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
-extern void __iomem *__arm_ioremap(unsigned long, size_t, unsigned int);
-extern void __iomem *__arm_ioremap_exec(unsigned long, size_t, bool cached);
+extern void __iomem *__arm_ioremap(phys_addr_t, size_t, unsigned int);
+extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached);
extern void __iounmap(volatile void __iomem *addr);
extern void __arm_iounmap(volatile void __iomem *addr);
-extern void __iomem * (*arch_ioremap_caller)(unsigned long, size_t,
+extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
unsigned int, void *);
extern void (*arch_iounmap)(volatile void __iomem *);
@@ -113,11 +176,25 @@ static inline void __iomem *__typesafe_io(unsigned long addr)
#define __iowmb() do { } while (0)
#endif
+/* PCI fixed i/o mapping */
+#define PCI_IO_VIRT_BASE 0xfee00000
+
+#if defined(CONFIG_PCI)
+void pci_ioremap_set_mem_type(int mem_type);
+#else
+static inline void pci_ioremap_set_mem_type(int mem_type) {}
+#endif
+
+extern int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr);
+
/*
* Now, pick up the machine-defined IO definitions
*/
#ifdef CONFIG_NEED_MACH_IO_H
#include <mach/io.h>
+#elif defined(CONFIG_PCI)
+#define IO_SPACE_LIMIT ((resource_size_t)0xfffff)
+#define __io(a) __typesafe_io(PCI_IO_VIRT_BASE + ((a) & IO_SPACE_LIMIT))
#else
#define __io(a) __typesafe_io((a) & IO_SPACE_LIMIT)
#endif
@@ -264,7 +341,7 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
*/
#define ioremap(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE)
#define ioremap_nocache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE)
-#define ioremap_cached(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED)
+#define ioremap_cache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED)
#define ioremap_wc(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_WC)
#define iounmap __arm_iounmap
@@ -309,9 +386,16 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr);
#define BIOVEC_MERGEABLE(vec1, vec2) \
((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
+struct bio_vec;
+extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
+ const struct bio_vec *vec2);
+#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
+ (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \
+ (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2)))
+
#ifdef CONFIG_MMU
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
-extern int valid_phys_addr_range(unsigned long addr, size_t size);
+extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
extern int devmem_is_allowed(unsigned long pfn);
#endif