aboutsummaryrefslogtreecommitdiff
path: root/arch/sparc/kernel/ioport.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/kernel/ioport.c')
-rw-r--r--arch/sparc/kernel/ioport.c202
1 files changed, 90 insertions, 112 deletions
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 41f7e4e0f72..7f08ec8a7c6 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -50,14 +50,18 @@
#include <asm/io-unit.h>
#include <asm/leon.h>
-#ifdef CONFIG_SPARC_LEON
-#define mmu_inval_dma_area(p, l) leon_flush_dcache_all()
-#else
-#define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */
-#endif
+const struct sparc32_dma_ops *sparc32_dma_ops;
-static struct resource *_sparc_find_resource(struct resource *r,
- unsigned long);
+/* This function must make sure that caches and memory are coherent after DMA
+ * On LEON systems without cache snooping it flushes the entire D-CACHE.
+ */
+static inline void dma_make_coherent(unsigned long pa, unsigned long len)
+{
+ if (sparc_cpu_model == sparc_leon) {
+ if (!sparc_leon3_snooping_enabled())
+ leon_flush_dcache_all();
+ }
+}
static void __iomem *_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz);
static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys,
@@ -134,7 +138,11 @@ void iounmap(volatile void __iomem *virtual)
unsigned long vaddr = (unsigned long) virtual & PAGE_MASK;
struct resource *res;
- if ((res = _sparc_find_resource(&sparc_iomap, vaddr)) == NULL) {
+ /*
+ * XXX Too slow. Can have 8192 DVMA pages on sun4m in the worst case.
+ * This probably warrants some sort of hashing.
+ */
+ if ((res = lookup_resource(&sparc_iomap, vaddr)) == NULL) {
printk("free_io/iounmap: cannot free %lx\n", vaddr);
return;
}
@@ -178,7 +186,7 @@ static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys,
if (name == NULL) name = "???";
- if ((xres = xres_alloc()) != 0) {
+ if ((xres = xres_alloc()) != NULL) {
tack = xres->xname;
res = &xres->xres;
} else {
@@ -219,7 +227,7 @@ _sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz)
}
pa &= PAGE_MASK;
- sparc_mapiorange(bus, pa, res->start, res->end - res->start + 1);
+ srmmu_mapiorange(bus, pa, res->start, resource_size(res));
return (void __iomem *)(unsigned long)(res->start + offset);
}
@@ -231,9 +239,9 @@ static void _sparc_free_io(struct resource *res)
{
unsigned long plen;
- plen = res->end - res->start + 1;
+ plen = resource_size(res);
BUG_ON((plen & (PAGE_SIZE-1)) != 0);
- sparc_unmapiorange(res->start, plen);
+ srmmu_unmapiorange(res->start, plen);
release_resource(res);
}
@@ -251,10 +259,11 @@ EXPORT_SYMBOL(sbus_set_sbus64);
* CPU may access them without any explicit flushing.
*/
static void *sbus_alloc_coherent(struct device *dev, size_t len,
- dma_addr_t *dma_addrp, gfp_t gfp)
+ dma_addr_t *dma_addrp, gfp_t gfp,
+ struct dma_attrs *attrs)
{
struct platform_device *op = to_platform_device(dev);
- unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
+ unsigned long len_total = PAGE_ALIGN(len);
unsigned long va;
struct resource *res;
int order;
@@ -280,14 +289,14 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len,
printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total);
goto err_nova;
}
- mmu_inval_dma_area(va, len_total);
- // XXX The mmu_map_dma_area does this for us below, see comments.
- // sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
+
+ // XXX The sbus_map_dma_area does this for us below, see comments.
+ // srmmu_mapiorange(0, virt_to_phys(va), res->start, len_total);
/*
* XXX That's where sdev would be used. Currently we load
* all iommu tables with the same translations.
*/
- if (mmu_map_dma_area(dev, dma_addrp, va, res->start, len_total) != 0)
+ if (sbus_map_dma_area(dev, dma_addrp, va, res->start, len_total) != 0)
goto err_noiommu;
res->name = op->dev.of_node->name;
@@ -297,20 +306,20 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len,
err_noiommu:
release_resource(res);
err_nova:
- free_pages(va, order);
-err_nomem:
kfree(res);
+err_nomem:
+ free_pages(va, order);
err_nopages:
return NULL;
}
static void sbus_free_coherent(struct device *dev, size_t n, void *p,
- dma_addr_t ba)
+ dma_addr_t ba, struct dma_attrs *attrs)
{
struct resource *res;
struct page *pgv;
- if ((res = _sparc_find_resource(&_sparc_dvma,
+ if ((res = lookup_resource(&_sparc_dvma,
(unsigned long)p)) == NULL) {
printk("sbus_free_consistent: cannot free %p\n", p);
return;
@@ -321,19 +330,18 @@ static void sbus_free_coherent(struct device *dev, size_t n, void *p,
return;
}
- n = (n + PAGE_SIZE-1) & PAGE_MASK;
- if ((res->end-res->start)+1 != n) {
+ n = PAGE_ALIGN(n);
+ if (resource_size(res) != n) {
printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n",
- (long)((res->end-res->start)+1), n);
+ (long)resource_size(res), n);
return;
}
release_resource(res);
kfree(res);
- /* mmu_inval_dma_area(va, n); */ /* it's consistent, isn't it */
pgv = virt_to_page(p);
- mmu_unmap_dma_area(dev, ba, n);
+ sbus_unmap_dma_area(dev, ba, n);
__free_pages(pgv, get_order(n));
}
@@ -371,11 +379,6 @@ static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n,
enum dma_data_direction dir, struct dma_attrs *attrs)
{
mmu_get_scsi_sgl(dev, sg, n);
-
- /*
- * XXX sparc64 can return a partial length here. sun4c should do this
- * but it currently panics if it can't fulfill the request - Anton
- */
return n;
}
@@ -397,9 +400,9 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
BUG();
}
-struct dma_map_ops sbus_dma_ops = {
- .alloc_coherent = sbus_alloc_coherent,
- .free_coherent = sbus_free_coherent,
+static struct dma_map_ops sbus_dma_ops = {
+ .alloc = sbus_alloc_coherent,
+ .free = sbus_free_coherent,
.map_page = sbus_map_page,
.unmap_page = sbus_unmap_page,
.map_sg = sbus_map_sg,
@@ -408,9 +411,6 @@ struct dma_map_ops sbus_dma_ops = {
.sync_sg_for_device = sbus_sync_sg_for_device,
};
-struct dma_map_ops *dma_ops = &sbus_dma_ops;
-EXPORT_SYMBOL(dma_ops);
-
static int __init sparc_register_ioport(void)
{
register_proc_sparc_ioport();
@@ -422,16 +422,16 @@ arch_initcall(sparc_register_ioport);
#endif /* CONFIG_SBUS */
-#ifdef CONFIG_PCI
/* Allocate and map kernel buffer using consistent mode DMA for a device.
* hwdev should be valid struct pci_dev pointer for PCI devices.
*/
static void *pci32_alloc_coherent(struct device *dev, size_t len,
- dma_addr_t *pba, gfp_t gfp)
+ dma_addr_t *pba, gfp_t gfp,
+ struct dma_attrs *attrs)
{
- unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
- unsigned long va;
+ unsigned long len_total = PAGE_ALIGN(len);
+ void *va;
struct resource *res;
int order;
@@ -443,34 +443,33 @@ static void *pci32_alloc_coherent(struct device *dev, size_t len,
}
order = get_order(len_total);
- va = __get_free_pages(GFP_KERNEL, order);
- if (va == 0) {
+ va = (void *) __get_free_pages(GFP_KERNEL, order);
+ if (va == NULL) {
printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT);
- return NULL;
+ goto err_nopages;
}
if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) {
- free_pages(va, order);
printk("pci_alloc_consistent: no core\n");
- return NULL;
+ goto err_nomem;
}
if (allocate_resource(&_sparc_dvma, res, len_total,
_sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total);
- free_pages(va, order);
- kfree(res);
- return NULL;
+ goto err_nova;
}
- mmu_inval_dma_area(va, len_total);
-#if 0
-/* P3 */ printk("pci_alloc_consistent: kva %lx uncva %lx phys %lx size %lx\n",
- (long)va, (long)res->start, (long)virt_to_phys(va), len_total);
-#endif
- sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
+ srmmu_mapiorange(0, virt_to_phys(va), res->start, len_total);
*pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
return (void *) res->start;
+
+err_nova:
+ kfree(res);
+err_nomem:
+ free_pages((unsigned long)va, order);
+err_nopages:
+ return NULL;
}
/* Free and unmap a consistent DMA buffer.
@@ -482,12 +481,11 @@ static void *pci32_alloc_coherent(struct device *dev, size_t len,
* past this call are illegal.
*/
static void pci32_free_coherent(struct device *dev, size_t n, void *p,
- dma_addr_t ba)
+ dma_addr_t ba, struct dma_attrs *attrs)
{
struct resource *res;
- unsigned long pgp;
- if ((res = _sparc_find_resource(&_sparc_dvma,
+ if ((res = lookup_resource(&_sparc_dvma,
(unsigned long)p)) == NULL) {
printk("pci_free_consistent: cannot free %p\n", p);
return;
@@ -498,21 +496,19 @@ static void pci32_free_coherent(struct device *dev, size_t n, void *p,
return;
}
- n = (n + PAGE_SIZE-1) & PAGE_MASK;
- if ((res->end-res->start)+1 != n) {
+ n = PAGE_ALIGN(n);
+ if (resource_size(res) != n) {
printk("pci_free_consistent: region 0x%lx asked 0x%lx\n",
- (long)((res->end-res->start)+1), (long)n);
+ (long)resource_size(res), (long)n);
return;
}
- pgp = (unsigned long) phys_to_virt(ba); /* bus_to_virt actually */
- mmu_inval_dma_area(pgp, n);
- sparc_unmapiorange((unsigned long)p, n);
+ dma_make_coherent(ba, n);
+ srmmu_unmapiorange((unsigned long)p, n);
release_resource(res);
kfree(res);
-
- free_pages(pgp, get_order(n));
+ free_pages((unsigned long)phys_to_virt(ba), get_order(n));
}
/*
@@ -527,6 +523,13 @@ static dma_addr_t pci32_map_page(struct device *dev, struct page *page,
return page_to_phys(page) + offset;
}
+static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+ if (dir != PCI_DMA_TODEVICE)
+ dma_make_coherent(ba, PAGE_ALIGN(size));
+}
+
/* Map a set of buffers described by scatterlist in streaming
* mode for DMA. This is the scather-gather version of the
* above pci_map_single interface. Here the scatter gather list
@@ -551,8 +554,7 @@ static int pci32_map_sg(struct device *device, struct scatterlist *sgl,
/* IIep is write-through, not flushing. */
for_each_sg(sgl, sg, nents, n) {
- BUG_ON(page_address(sg_page(sg)) == NULL);
- sg->dma_address = virt_to_phys(sg_virt(sg));
+ sg->dma_address = sg_phys(sg);
sg->dma_length = sg->length;
}
return nents;
@@ -571,10 +573,7 @@ static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl,
if (dir != PCI_DMA_TODEVICE) {
for_each_sg(sgl, sg, nents, n) {
- BUG_ON(page_address(sg_page(sg)) == NULL);
- mmu_inval_dma_area(
- (unsigned long) page_address(sg_page(sg)),
- (sg->length + PAGE_SIZE-1) & PAGE_MASK);
+ dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length));
}
}
}
@@ -593,8 +592,7 @@ static void pci32_sync_single_for_cpu(struct device *dev, dma_addr_t ba,
size_t size, enum dma_data_direction dir)
{
if (dir != PCI_DMA_TODEVICE) {
- mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
- (size + PAGE_SIZE-1) & PAGE_MASK);
+ dma_make_coherent(ba, PAGE_ALIGN(size));
}
}
@@ -602,8 +600,7 @@ static void pci32_sync_single_for_device(struct device *dev, dma_addr_t ba,
size_t size, enum dma_data_direction dir)
{
if (dir != PCI_DMA_TODEVICE) {
- mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
- (size + PAGE_SIZE-1) & PAGE_MASK);
+ dma_make_coherent(ba, PAGE_ALIGN(size));
}
}
@@ -621,10 +618,7 @@ static void pci32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
if (dir != PCI_DMA_TODEVICE) {
for_each_sg(sgl, sg, nents, n) {
- BUG_ON(page_address(sg_page(sg)) == NULL);
- mmu_inval_dma_area(
- (unsigned long) page_address(sg_page(sg)),
- (sg->length + PAGE_SIZE-1) & PAGE_MASK);
+ dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length));
}
}
}
@@ -637,18 +631,16 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *
if (dir != PCI_DMA_TODEVICE) {
for_each_sg(sgl, sg, nents, n) {
- BUG_ON(page_address(sg_page(sg)) == NULL);
- mmu_inval_dma_area(
- (unsigned long) page_address(sg_page(sg)),
- (sg->length + PAGE_SIZE-1) & PAGE_MASK);
+ dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length));
}
}
}
struct dma_map_ops pci32_dma_ops = {
- .alloc_coherent = pci32_alloc_coherent,
- .free_coherent = pci32_free_coherent,
+ .alloc = pci32_alloc_coherent,
+ .free = pci32_free_coherent,
.map_page = pci32_map_page,
+ .unmap_page = pci32_unmap_page,
.map_sg = pci32_map_sg,
.unmap_sg = pci32_unmap_sg,
.sync_single_for_cpu = pci32_sync_single_for_cpu,
@@ -658,7 +650,13 @@ struct dma_map_ops pci32_dma_ops = {
};
EXPORT_SYMBOL(pci32_dma_ops);
-#endif /* CONFIG_PCI */
+/* leon re-uses pci32_dma_ops */
+struct dma_map_ops *leon_dma_ops = &pci32_dma_ops;
+EXPORT_SYMBOL(leon_dma_ops);
+
+struct dma_map_ops *dma_ops = &sbus_dma_ops;
+EXPORT_SYMBOL(dma_ops);
+
/*
* Return whether the given PCI device DMA address mask can be
@@ -668,10 +666,9 @@ EXPORT_SYMBOL(pci32_dma_ops);
*/
int dma_supported(struct device *dev, u64 mask)
{
-#ifdef CONFIG_PCI
- if (dev->bus == &pci_bus_type)
+ if (dev_is_pci(dev))
return 1;
-#endif
+
return 0;
}
EXPORT_SYMBOL(dma_supported);
@@ -684,7 +681,7 @@ static int sparc_io_proc_show(struct seq_file *m, void *v)
const char *nm;
for (r = root->child; r != NULL; r = r->sibling) {
- if ((nm = r->name) == 0) nm = "???";
+ if ((nm = r->name) == NULL) nm = "???";
seq_printf(m, "%016llx-%016llx: %s\n",
(unsigned long long)r->start,
(unsigned long long)r->end, nm);
@@ -695,7 +692,7 @@ static int sparc_io_proc_show(struct seq_file *m, void *v)
static int sparc_io_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, sparc_io_proc_show, PDE(inode)->data);
+ return single_open(file, sparc_io_proc_show, PDE_DATA(inode));
}
static const struct file_operations sparc_io_proc_fops = {
@@ -707,25 +704,6 @@ static const struct file_operations sparc_io_proc_fops = {
};
#endif /* CONFIG_PROC_FS */
-/*
- * This is a version of find_resource and it belongs to kernel/resource.c.
- * Until we have agreement with Linus and Martin, it lingers here.
- *
- * XXX Too slow. Can have 8192 DVMA pages on sun4m in the worst case.
- * This probably warrants some sort of hashing.
- */
-static struct resource *_sparc_find_resource(struct resource *root,
- unsigned long hit)
-{
- struct resource *tmp;
-
- for (tmp = root->child; tmp != 0; tmp = tmp->sibling) {
- if (tmp->start <= hit && tmp->end >= hit)
- return tmp;
- }
- return NULL;
-}
-
static void register_proc_sparc_ioport(void)
{
#ifdef CONFIG_PROC_FS