aboutsummaryrefslogtreecommitdiff
path: root/arch/ia64/sn/pci
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/sn/pci')
-rw-r--r--arch/ia64/sn/pci/Makefile4
-rw-r--r--arch/ia64/sn/pci/pci_dma.c238
-rw-r--r--arch/ia64/sn/pci/pcibr/Makefile2
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_ate.c51
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_dma.c198
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_provider.c123
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_reg.c131
-rw-r--r--arch/ia64/sn/pci/tioca_provider.c162
-rw-r--r--arch/ia64/sn/pci/tioce_provider.c1062
9 files changed, 1606 insertions, 365 deletions
diff --git a/arch/ia64/sn/pci/Makefile b/arch/ia64/sn/pci/Makefile
index 2f915bce25f..df2a9014542 100644
--- a/arch/ia64/sn/pci/Makefile
+++ b/arch/ia64/sn/pci/Makefile
@@ -7,4 +7,6 @@
#
# Makefile for the sn pci general routines.
-obj-y := pci_dma.o tioca_provider.o pcibr/
+ccflags-y := -Iarch/ia64/sn/include
+
+obj-y := pci_dma.o tioca_provider.o tioce_provider.o pcibr/
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index 0e4b9ad9ef0..d0853e8e862 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -9,14 +9,16 @@
* a description of how these routines should be used.
*/
+#include <linux/gfp.h>
#include <linux/module.h>
+#include <linux/dma-mapping.h>
#include <asm/dma.h>
-#include <asm/sn/pcibr_provider.h>
+#include <asm/sn/intr.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h>
#include <asm/sn/sn_sal.h>
-#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
+#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
/**
@@ -30,15 +32,14 @@
* this function. Of course, SN only supports devices that have 32 or more
* address bits when using the PMU.
*/
-int sn_dma_supported(struct device *dev, u64 mask)
+static int sn_dma_supported(struct device *dev, u64 mask)
{
- BUG_ON(dev->bus != &pci_bus_type);
+ BUG_ON(!dev_is_pci(dev));
if (mask < 0x7fffffff)
return 0;
return 1;
}
-EXPORT_SYMBOL(sn_dma_supported);
/**
* sn_dma_set_mask - set the DMA mask
@@ -49,7 +50,7 @@ EXPORT_SYMBOL(sn_dma_supported);
*/
int sn_dma_set_mask(struct device *dev, u64 dma_mask)
{
- BUG_ON(dev->bus != &pci_bus_type);
+ BUG_ON(!dev_is_pci(dev));
if (!sn_dma_supported(dev, dma_mask))
return 0;
@@ -74,8 +75,9 @@ EXPORT_SYMBOL(sn_dma_set_mask);
* queue for a SCSI controller). See Documentation/DMA-API.txt for
* more information.
*/
-void *sn_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t * dma_handle, int flags)
+static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t * dma_handle, gfp_t flags,
+ struct dma_attrs *attrs)
{
void *cpuaddr;
unsigned long phys_addr;
@@ -83,21 +85,22 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size,
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
- BUG_ON(dev->bus != &pci_bus_type);
+ BUG_ON(!dev_is_pci(dev));
/*
* Allocate the memory.
*/
node = pcibus_to_node(pdev->bus);
if (likely(node >=0)) {
- struct page *p = alloc_pages_node(node, GFP_ATOMIC, get_order(size));
+ struct page *p = alloc_pages_exact_node(node,
+ flags, get_order(size));
if (likely(p))
cpuaddr = page_address(p);
else
return NULL;
} else
- cpuaddr = (void *)__get_free_pages(GFP_ATOMIC, get_order(size));
+ cpuaddr = (void *)__get_free_pages(flags, get_order(size));
if (unlikely(!cpuaddr))
return NULL;
@@ -113,16 +116,16 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size,
* resources.
*/
- *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size);
+ *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size,
+ SN_DMA_ADDR_PHYS);
if (!*dma_handle) {
- printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
+ printk(KERN_ERR "%s: out of ATEs\n", __func__);
free_pages((unsigned long)cpuaddr, get_order(size));
return NULL;
}
return cpuaddr;
}
-EXPORT_SYMBOL(sn_dma_alloc_coherent);
/**
* sn_pci_free_coherent - free memory associated with coherent DMAable region
@@ -134,25 +137,25 @@ EXPORT_SYMBOL(sn_dma_alloc_coherent);
* Frees the memory allocated by dma_alloc_coherent(), potentially unmapping
* any associated IOMMU mappings.
*/
-void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t dma_handle)
+static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle, struct dma_attrs *attrs)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
- BUG_ON(dev->bus != &pci_bus_type);
+ BUG_ON(!dev_is_pci(dev));
provider->dma_unmap(pdev, dma_handle, 0);
free_pages((unsigned long)cpu_addr, get_order(size));
}
-EXPORT_SYMBOL(sn_dma_free_coherent);
/**
- * sn_dma_map_single - map a single page for DMA
+ * sn_dma_map_single_attrs - map a single page for DMA
* @dev: device to map for
* @cpu_addr: kernel virtual address of the region to map
* @size: size of the region
* @direction: DMA direction
+ * @attrs: optional dma attributes
*
* Map the region pointed to by @cpu_addr for DMA and return the
* DMA address.
@@ -162,51 +165,68 @@ EXPORT_SYMBOL(sn_dma_free_coherent);
* no way of saving the dmamap handle from the alloc to later free
* (which is pretty much unacceptable).
*
+ * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with
+ * dma_map_consistent() so that writes force a flush of pending DMA.
+ * (See "SGI Altix Architecture Considerations for Linux Device Drivers",
+ * Document Number: 007-4763-001)
+ *
* TODO: simplify our interface;
* figure out how to save dmamap handle so can use two step.
*/
-dma_addr_t sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size,
- int direction)
+static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
+ void *cpu_addr = page_address(page) + offset;
dma_addr_t dma_addr;
unsigned long phys_addr;
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
+ int dmabarr;
+
+ dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
- BUG_ON(dev->bus != &pci_bus_type);
+ BUG_ON(!dev_is_pci(dev));
phys_addr = __pa(cpu_addr);
- dma_addr = provider->dma_map(pdev, phys_addr, size);
+ if (dmabarr)
+ dma_addr = provider->dma_map_consistent(pdev, phys_addr,
+ size, SN_DMA_ADDR_PHYS);
+ else
+ dma_addr = provider->dma_map(pdev, phys_addr, size,
+ SN_DMA_ADDR_PHYS);
+
if (!dma_addr) {
- printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
+ printk(KERN_ERR "%s: out of ATEs\n", __func__);
return 0;
}
return dma_addr;
}
-EXPORT_SYMBOL(sn_dma_map_single);
/**
- * sn_dma_unmap_single - unamp a DMA mapped page
+ * sn_dma_unmap_single_attrs - unamp a DMA mapped page
* @dev: device to sync
* @dma_addr: DMA address to sync
* @size: size of region
* @direction: DMA direction
+ * @attrs: optional dma attributes
*
* This routine is supposed to sync the DMA region specified
* by @dma_handle into the coherence domain. On SN, we're always cache
* coherent, so we just need to free any ATEs associated with this mapping.
*/
-void sn_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
- int direction)
+static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
- BUG_ON(dev->bus != &pci_bus_type);
+ BUG_ON(!dev_is_pci(dev));
- provider->dma_unmap(pdev, dma_addr, direction);
+ provider->dma_unmap(pdev, dma_addr, dir);
}
-EXPORT_SYMBOL(sn_dma_unmap_single);
/**
* sn_dma_unmap_sg - unmap a DMA scatterlist
@@ -214,25 +234,27 @@ EXPORT_SYMBOL(sn_dma_unmap_single);
* @sg: scatterlist to unmap
* @nhwentries: number of scatterlist entries
* @direction: DMA direction
+ * @attrs: optional dma attributes
*
* Unmap a set of streaming mode DMA translations.
*/
-void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nhwentries, int direction)
+static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
+ int nhwentries, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
int i;
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
+ struct scatterlist *sg;
- BUG_ON(dev->bus != &pci_bus_type);
+ BUG_ON(!dev_is_pci(dev));
- for (i = 0; i < nhwentries; i++, sg++) {
- provider->dma_unmap(pdev, sg->dma_address, direction);
+ for_each_sg(sgl, sg, nhwentries, i) {
+ provider->dma_unmap(pdev, sg->dma_address, dir);
sg->dma_address = (dma_addr_t) NULL;
sg->dma_length = 0;
}
}
-EXPORT_SYMBOL(sn_dma_unmap_sg);
/**
* sn_dma_map_sg - map a scatterlist for DMA
@@ -240,36 +262,55 @@ EXPORT_SYMBOL(sn_dma_unmap_sg);
* @sg: scatterlist to map
* @nhwentries: number of entries
* @direction: direction of the DMA transaction
+ * @attrs: optional dma attributes
+ *
+ * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with
+ * dma_map_consistent() so that writes force a flush of pending DMA.
+ * (See "SGI Altix Architecture Considerations for Linux Device Drivers",
+ * Document Number: 007-4763-001)
*
* Maps each entry of @sg for DMA.
*/
-int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
- int direction)
+static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl,
+ int nhwentries, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
unsigned long phys_addr;
- struct scatterlist *saved_sg = sg;
+ struct scatterlist *saved_sg = sgl, *sg;
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
int i;
+ int dmabarr;
- BUG_ON(dev->bus != &pci_bus_type);
+ dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
+
+ BUG_ON(!dev_is_pci(dev));
/*
* Setup a DMA address for each entry in the scatterlist.
*/
- for (i = 0; i < nhwentries; i++, sg++) {
+ for_each_sg(sgl, sg, nhwentries, i) {
+ dma_addr_t dma_addr;
phys_addr = SG_ENT_PHYS_ADDRESS(sg);
- sg->dma_address = provider->dma_map(pdev,
- phys_addr, sg->length);
+ if (dmabarr)
+ dma_addr = provider->dma_map_consistent(pdev,
+ phys_addr,
+ sg->length,
+ SN_DMA_ADDR_PHYS);
+ else
+ dma_addr = provider->dma_map(pdev, phys_addr,
+ sg->length,
+ SN_DMA_ADDR_PHYS);
+ sg->dma_address = dma_addr;
if (!sg->dma_address) {
- printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
+ printk(KERN_ERR "%s: out of ATEs\n", __func__);
/*
* Free any successfully allocated entries.
*/
if (i > 0)
- sn_dma_unmap_sg(dev, saved_sg, i, direction);
+ sn_dma_unmap_sg(dev, saved_sg, i, dir, attrs);
return 0;
}
@@ -278,41 +319,42 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
return nhwentries;
}
-EXPORT_SYMBOL(sn_dma_map_sg);
-void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
- size_t size, int direction)
+static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir)
{
- BUG_ON(dev->bus != &pci_bus_type);
+ BUG_ON(!dev_is_pci(dev));
}
-EXPORT_SYMBOL(sn_dma_sync_single_for_cpu);
-void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
- size_t size, int direction)
+static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
+ size_t size,
+ enum dma_data_direction dir)
{
- BUG_ON(dev->bus != &pci_bus_type);
+ BUG_ON(!dev_is_pci(dev));
}
-EXPORT_SYMBOL(sn_dma_sync_single_for_device);
-void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
- int nelems, int direction)
+static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir)
{
- BUG_ON(dev->bus != &pci_bus_type);
+ BUG_ON(!dev_is_pci(dev));
}
-EXPORT_SYMBOL(sn_dma_sync_sg_for_cpu);
-void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
- int nelems, int direction)
+static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir)
{
- BUG_ON(dev->bus != &pci_bus_type);
+ BUG_ON(!dev_is_pci(dev));
}
-EXPORT_SYMBOL(sn_dma_sync_sg_for_device);
-int sn_dma_mapping_error(dma_addr_t dma_addr)
+static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
-EXPORT_SYMBOL(sn_dma_mapping_error);
+
+u64 sn_dma_get_required_mask(struct device *dev)
+{
+ return DMA_BIT_MASK(64);
+}
+EXPORT_SYMBOL_GPL(sn_dma_get_required_mask);
char *sn_pci_get_legacy_mem(struct pci_bus *bus)
{
@@ -326,6 +368,29 @@ int sn_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
{
unsigned long addr;
int ret;
+ struct ia64_sal_retval isrv;
+
+ /*
+ * First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work
+ * around hw issues at the pci bus level. SGI proms older than
+ * 4.10 don't implement this.
+ */
+
+ SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,
+ pci_domain_nr(bus), bus->number,
+ 0, /* io */
+ 0, /* read */
+ port, size, __pa(val));
+
+ if (isrv.status == 0)
+ return size;
+
+ /*
+ * If the above failed, retry using the SAL_PROBE call which should
+ * be present in all proms (but which cannot work round PCI chipset
+ * bugs). This code is retained for compatibility with old
+ * pre-4.10 proms, and should be removed at some point in the future.
+ */
if (!SN_PCIBUS_BUSSOFT(bus))
return -ENODEV;
@@ -349,6 +414,29 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
int ret = size;
unsigned long paddr;
unsigned long *addr;
+ struct ia64_sal_retval isrv;
+
+ /*
+ * First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work
+ * around hw issues at the pci bus level. SGI proms older than
+ * 4.10 don't implement this.
+ */
+
+ SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,
+ pci_domain_nr(bus), bus->number,
+ 0, /* io */
+ 1, /* write */
+ port, size, __pa(&val));
+
+ if (isrv.status == 0)
+ return size;
+
+ /*
+ * If the above failed, retry using the SAL_PROBE call which should
+ * be present in all proms (but which cannot work round PCI chipset
+ * bugs). This code is retained for compatibility with old
+ * pre-4.10 proms, and should be removed at some point in the future.
+ */
if (!SN_PCIBUS_BUSSOFT(bus)) {
ret = -ENODEV;
@@ -377,3 +465,23 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
out:
return ret;
}
+
+static struct dma_map_ops sn_dma_ops = {
+ .alloc = sn_dma_alloc_coherent,
+ .free = sn_dma_free_coherent,
+ .map_page = sn_dma_map_page,
+ .unmap_page = sn_dma_unmap_page,
+ .map_sg = sn_dma_map_sg,
+ .unmap_sg = sn_dma_unmap_sg,
+ .sync_single_for_cpu = sn_dma_sync_single_for_cpu,
+ .sync_sg_for_cpu = sn_dma_sync_sg_for_cpu,
+ .sync_single_for_device = sn_dma_sync_single_for_device,
+ .sync_sg_for_device = sn_dma_sync_sg_for_device,
+ .mapping_error = sn_dma_mapping_error,
+ .dma_supported = sn_dma_supported,
+};
+
+void sn_dma_init(void)
+{
+ dma_ops = &sn_dma_ops;
+}
diff --git a/arch/ia64/sn/pci/pcibr/Makefile b/arch/ia64/sn/pci/pcibr/Makefile
index 1850c4a94c4..396bcae3630 100644
--- a/arch/ia64/sn/pci/pcibr/Makefile
+++ b/arch/ia64/sn/pci/pcibr/Makefile
@@ -7,5 +7,7 @@
#
# Makefile for the sn2 io routines.
+ccflags-y := -Iarch/ia64/sn/include
+
obj-y += pcibr_dma.o pcibr_reg.o \
pcibr_ate.o pcibr_provider.o
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_ate.c b/arch/ia64/sn/pci/pcibr/pcibr_ate.c
index d1647b863e6..5bc34eac9e0 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_ate.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_ate.c
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001-2004 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2001-2006 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
@@ -12,33 +12,30 @@
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h>
-int pcibr_invalidate_ate = 0; /* by default don't invalidate ATE on free */
+int pcibr_invalidate_ate; /* by default don't invalidate ATE on free */
/*
* mark_ate: Mark the ate as either free or inuse.
*/
static void mark_ate(struct ate_resource *ate_resource, int start, int number,
- uint64_t value)
+ u64 value)
{
-
- uint64_t *ate = ate_resource->ate;
+ u64 *ate = ate_resource->ate;
int index;
int length = 0;
for (index = start; length < number; index++, length++)
ate[index] = value;
-
}
/*
* find_free_ate: Find the first free ate index starting from the given
- * index for the desired consequtive count.
+ * index for the desired consecutive count.
*/
static int find_free_ate(struct ate_resource *ate_resource, int start,
int count)
{
-
- uint64_t *ate = ate_resource->ate;
+ u64 *ate = ate_resource->ate;
int index;
int start_free;
@@ -57,6 +54,8 @@ static int find_free_ate(struct ate_resource *ate_resource, int start,
break;
}
}
+ if (i >= ate_resource->num_ate)
+ return -1;
} else
index++; /* Try next ate */
}
@@ -70,12 +69,10 @@ static int find_free_ate(struct ate_resource *ate_resource, int start,
static inline void free_ate_resource(struct ate_resource *ate_resource,
int start)
{
-
mark_ate(ate_resource, start, ate_resource->ate[start], 0);
if ((ate_resource->lowest_free_index > start) ||
(ate_resource->lowest_free_index < 0))
ate_resource->lowest_free_index = start;
-
}
/*
@@ -84,7 +81,6 @@ static inline void free_ate_resource(struct ate_resource *ate_resource,
static inline int alloc_ate_resource(struct ate_resource *ate_resource,
int ate_needed)
{
-
int start_index;
/*
@@ -94,7 +90,7 @@ static inline int alloc_ate_resource(struct ate_resource *ate_resource,
return -1;
/*
- * Find the required number of free consequtive ates.
+ * Find the required number of free consecutive ates.
*/
start_index =
find_free_ate(ate_resource, ate_resource->lowest_free_index,
@@ -111,26 +107,19 @@ static inline int alloc_ate_resource(struct ate_resource *ate_resource,
/*
* Allocate "count" contiguous Bridge Address Translation Entries
* on the specified bridge to be used for PCI to XTALK mappings.
- * Indices in rm map range from 1..num_entries. Indicies returned
+ * Indices in rm map range from 1..num_entries. Indices returned
* to caller range from 0..num_entries-1.
*
* Return the start index on success, -1 on failure.
*/
int pcibr_ate_alloc(struct pcibus_info *pcibus_info, int count)
{
- int status = 0;
- uint64_t flag;
+ int status;
+ unsigned long flags;
- flag = pcibr_lock(pcibus_info);
+ spin_lock_irqsave(&pcibus_info->pbi_lock, flags);
status = alloc_ate_resource(&pcibus_info->pbi_int_ate_resource, count);
-
- if (status < 0) {
- /* Failed to allocate */
- pcibr_unlock(pcibus_info, flag);
- return -1;
- }
-
- pcibr_unlock(pcibus_info, flag);
+ spin_unlock_irqrestore(&pcibus_info->pbi_lock, flags);
return status;
}
@@ -139,7 +128,7 @@ int pcibr_ate_alloc(struct pcibus_info *pcibus_info, int count)
* Setup an Address Translation Entry as specified. Use either the Bridge
* internal maps or the external map RAM, as appropriate.
*/
-static inline uint64_t *pcibr_ate_addr(struct pcibus_info *pcibus_info,
+static inline u64 __iomem *pcibr_ate_addr(struct pcibus_info *pcibus_info,
int ate_index)
{
if (ate_index < pcibus_info->pbi_int_ate_size) {
@@ -153,7 +142,7 @@ static inline uint64_t *pcibr_ate_addr(struct pcibus_info *pcibus_info,
*/
void inline
ate_write(struct pcibus_info *pcibus_info, int ate_index, int count,
- volatile uint64_t ate)
+ volatile u64 ate)
{
while (count-- > 0) {
if (ate_index < pcibus_info->pbi_int_ate_size) {
@@ -171,9 +160,9 @@ ate_write(struct pcibus_info *pcibus_info, int ate_index, int count,
void pcibr_ate_free(struct pcibus_info *pcibus_info, int index)
{
- volatile uint64_t ate;
+ volatile u64 ate;
int count;
- uint64_t flags;
+ unsigned long flags;
if (pcibr_invalidate_ate) {
/* For debugging purposes, clear the valid bit in the ATE */
@@ -182,7 +171,7 @@ void pcibr_ate_free(struct pcibus_info *pcibus_info, int index)
ate_write(pcibus_info, index, count, (ate & ~PCI32_ATE_V));
}
- flags = pcibr_lock(pcibus_info);
+ spin_lock_irqsave(&pcibus_info->pbi_lock, flags);
free_ate_resource(&pcibus_info->pbi_int_ate_resource, index);
- pcibr_unlock(pcibus_info, flags);
+ spin_unlock_irqrestore(&pcibus_info->pbi_lock, flags);
}
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
index b058dc2a0b9..1e863b277ac 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_dma.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
@@ -3,11 +3,12 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001-2004 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2001-2005 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <linux/pci.h>
+#include <linux/export.h>
#include <asm/sn/addrs.h>
#include <asm/sn/geo.h>
#include <asm/sn/pcibr_provider.h>
@@ -41,21 +42,21 @@ extern int sn_ioif_inited;
static dma_addr_t
pcibr_dmamap_ate32(struct pcidev_info *info,
- uint64_t paddr, size_t req_size, uint64_t flags)
+ u64 paddr, size_t req_size, u64 flags, int dma_flags)
{
struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->
pdi_pcibus_info;
- uint8_t internal_device = (PCI_SLOT(pcidev_info->pdi_host_pcidev_info->
+ u8 internal_device = (PCI_SLOT(pcidev_info->pdi_host_pcidev_info->
pdi_linux_pcidev->devfn)) - 1;
int ate_count;
int ate_index;
- uint64_t ate_flags = flags | PCI32_ATE_V;
- uint64_t ate;
- uint64_t pci_addr;
- uint64_t xio_addr;
- uint64_t offset;
+ u64 ate_flags = flags | PCI32_ATE_V;
+ u64 ate;
+ u64 pci_addr;
+ u64 xio_addr;
+ u64 offset;
/* PIC in PCI-X mode does not supports 32bit PageMap mode */
if (IS_PIC_SOFT(pcibus_info) && IS_PCIX(pcibus_info)) {
@@ -81,9 +82,12 @@ pcibr_dmamap_ate32(struct pcidev_info *info,
if (IS_PCIX(pcibus_info))
ate_flags &= ~(PCI32_ATE_PREF);
- xio_addr =
- IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
- PHYS_TO_TIODMA(paddr);
+ if (SN_DMA_ADDRTYPE(dma_flags == SN_DMA_ADDR_PHYS))
+ xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
+ PHYS_TO_TIODMA(paddr);
+ else
+ xio_addr = paddr;
+
offset = IOPGOFF(xio_addr);
ate = ate_flags | (xio_addr - offset);
@@ -91,6 +95,17 @@ pcibr_dmamap_ate32(struct pcidev_info *info,
if (IS_PIC_SOFT(pcibus_info)) {
ate |= (pcibus_info->pbi_hub_xid << PIC_ATE_TARGETID_SHFT);
}
+
+ /*
+ * If we're mapping for MSI, set the MSI bit in the ATE. If it's a
+ * TIOCP based pci bus, we also need to set the PIO bit in the ATE.
+ */
+ if (dma_flags & SN_DMA_MSI) {
+ ate |= PCI32_ATE_MSI;
+ if (IS_TIOCP_SOFT(pcibus_info))
+ ate |= PCI32_ATE_PIO;
+ }
+
ate_write(pcibus_info, ate_index, ate_count, ate);
/*
@@ -105,20 +120,26 @@ pcibr_dmamap_ate32(struct pcidev_info *info,
if (pcibus_info->pbi_devreg[internal_device] & PCIBR_DEV_SWAP_DIR)
ATE_SWAP_ON(pci_addr);
+
return pci_addr;
}
static dma_addr_t
-pcibr_dmatrans_direct64(struct pcidev_info * info, uint64_t paddr,
- uint64_t dma_attributes)
+pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr,
+ u64 dma_attributes, int dma_flags)
{
struct pcibus_info *pcibus_info = (struct pcibus_info *)
((info->pdi_host_pcidev_info)->pdi_pcibus_info);
- uint64_t pci_addr;
+ u64 pci_addr;
/* Translate to Crosstalk View of Physical Address */
- pci_addr = (IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
- PHYS_TO_TIODMA(paddr)) | dma_attributes;
+ if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS)
+ pci_addr = IS_PIC_SOFT(pcibus_info) ?
+ PHYS_TO_DMA(paddr) :
+ PHYS_TO_TIODMA(paddr);
+ else
+ pci_addr = paddr;
+ pci_addr |= dma_attributes;
/* Handle Bus mode */
if (IS_PCIX(pcibus_info))
@@ -127,39 +148,45 @@ pcibr_dmatrans_direct64(struct pcidev_info * info, uint64_t paddr,
/* Handle Bridge Chipset differences */
if (IS_PIC_SOFT(pcibus_info)) {
pci_addr |=
- ((uint64_t) pcibus_info->
+ ((u64) pcibus_info->
pbi_hub_xid << PIC_PCI64_ATTR_TARG_SHFT);
} else
- pci_addr |= TIOCP_PCI64_CMDTYPE_MEM;
+ pci_addr |= (dma_flags & SN_DMA_MSI) ?
+ TIOCP_PCI64_CMDTYPE_MSI :
+ TIOCP_PCI64_CMDTYPE_MEM;
/* If PCI mode, func zero uses VCHAN0, every other func uses VCHAN1 */
if (!IS_PCIX(pcibus_info) && PCI_FUNC(info->pdi_linux_pcidev->devfn))
pci_addr |= PCI64_ATTR_VIRTUAL;
return pci_addr;
-
}
static dma_addr_t
pcibr_dmatrans_direct32(struct pcidev_info * info,
- uint64_t paddr, size_t req_size, uint64_t flags)
+ u64 paddr, size_t req_size, u64 flags, int dma_flags)
{
-
struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->
pdi_pcibus_info;
- uint64_t xio_addr;
+ u64 xio_addr;
- uint64_t xio_base;
- uint64_t offset;
- uint64_t endoff;
+ u64 xio_base;
+ u64 offset;
+ u64 endoff;
if (IS_PCIX(pcibus_info)) {
return 0;
}
- xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
- PHYS_TO_TIODMA(paddr);
+ if (dma_flags & SN_DMA_MSI)
+ return 0;
+
+ if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS)
+ xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
+ PHYS_TO_TIODMA(paddr);
+ else
+ xio_addr = paddr;
xio_base = pcibus_info->pbi_dir_xbase;
offset = xio_addr - xio_base;
@@ -171,11 +198,10 @@ pcibr_dmatrans_direct32(struct pcidev_info * info,
}
return PCI32_DIRECT_BASE | offset;
-
}
/*
- * Wrapper routine for free'ing DMA maps
+ * Wrapper routine for freeing DMA maps
* DMA mappings for Direct 64 and 32 do not have any DMA maps.
*/
void
@@ -202,23 +228,24 @@ pcibr_dma_unmap(struct pci_dev *hwdev, dma_addr_t dma_handle, int direction)
* after doing the read. For PIC this routine then forces a fake interrupt
* on another line, which is logically associated with the slot that the PIO
* is addressed to. It then spins while watching the memory location that
- * the interrupt is targetted to. When the interrupt response arrives, we
+ * the interrupt is targeted to. When the interrupt response arrives, we
* are sure that the DMA has landed in memory and it is safe for the driver
* to proceed. For TIOCP use the Device(x) Write Request Buffer Flush
* Bridge register since it ensures the data has entered the coherence domain,
* unlike the PIC Device(x) Write Request Buffer Flush register.
*/
-void sn_dma_flush(uint64_t addr)
+void sn_dma_flush(u64 addr)
{
nasid_t nasid;
int is_tio;
int wid_num;
int i, j;
- int bwin;
- uint64_t flags;
+ unsigned long flags;
+ u64 itte;
struct hubdev_info *hubinfo;
- volatile struct sn_flush_device_list *p;
+ struct sn_flush_device_kernel *p;
+ struct sn_flush_device_common *common;
struct sn_flush_nasid_entry *flush_nasid_list;
if (!sn_ioif_inited)
@@ -230,50 +257,53 @@ void sn_dma_flush(uint64_t addr)
hubinfo = (NODEPDA(nasid_to_cnodeid(nasid)))->pdinfo;
- if (!hubinfo) {
- BUG();
- }
- is_tio = (nasid & 1);
- if (is_tio) {
- wid_num = TIO_SWIN_WIDGETNUM(addr);
- bwin = TIO_BWIN_WINDOWNUM(addr);
- } else {
- wid_num = SWIN_WIDGETNUM(addr);
- bwin = BWIN_WINDOWNUM(addr);
- }
+ BUG_ON(!hubinfo);
flush_nasid_list = &hubinfo->hdi_flush_nasid_list;
if (flush_nasid_list->widget_p == NULL)
return;
- if (bwin > 0) {
- uint64_t itte = flush_nasid_list->iio_itte[bwin];
- if (is_tio) {
- wid_num = (itte >> TIO_ITTE_WIDGET_SHIFT) &
- TIO_ITTE_WIDGET_MASK;
- } else {
- wid_num = (itte >> IIO_ITTE_WIDGET_SHIFT) &
- IIO_ITTE_WIDGET_MASK;
- }
+ is_tio = (nasid & 1);
+ if (is_tio) {
+ int itte_index;
+
+ if (TIO_HWIN(addr))
+ itte_index = 0;
+ else if (TIO_BWIN_WINDOWNUM(addr))
+ itte_index = TIO_BWIN_WINDOWNUM(addr);
+ else
+ itte_index = -1;
+
+ if (itte_index >= 0) {
+ itte = flush_nasid_list->iio_itte[itte_index];
+ if (! TIO_ITTE_VALID(itte))
+ return;
+ wid_num = TIO_ITTE_WIDGET(itte);
+ } else
+ wid_num = TIO_SWIN_WIDGETNUM(addr);
+ } else {
+ if (BWIN_WINDOWNUM(addr)) {
+ itte = flush_nasid_list->iio_itte[BWIN_WINDOWNUM(addr)];
+ wid_num = IIO_ITTE_WIDGET(itte);
+ } else
+ wid_num = SWIN_WIDGETNUM(addr);
}
- if (flush_nasid_list->widget_p == NULL)
- return;
if (flush_nasid_list->widget_p[wid_num] == NULL)
return;
p = &flush_nasid_list->widget_p[wid_num][0];
/* find a matching BAR */
- for (i = 0; i < DEV_PER_WIDGET; i++) {
+ for (i = 0; i < DEV_PER_WIDGET; i++,p++) {
+ common = p->common;
for (j = 0; j < PCI_ROM_RESOURCE; j++) {
- if (p->sfdl_bar_list[j].start == 0)
+ if (common->sfdl_bar_list[j].start == 0)
break;
- if (addr >= p->sfdl_bar_list[j].start
- && addr <= p->sfdl_bar_list[j].end)
+ if (addr >= common->sfdl_bar_list[j].start
+ && addr <= common->sfdl_bar_list[j].end)
break;
}
- if (j < PCI_ROM_RESOURCE && p->sfdl_bar_list[j].start != 0)
+ if (j < PCI_ROM_RESOURCE && common->sfdl_bar_list[j].start != 0)
break;
- p++;
}
/* if no matching BAR, return without doing anything. */
@@ -283,33 +313,38 @@ void sn_dma_flush(uint64_t addr)
/*
* For TIOCP use the Device(x) Write Request Buffer Flush Bridge
* register since it ensures the data has entered the coherence
- * domain, unlike PIC
+ * domain, unlike PIC.
*/
if (is_tio) {
- uint32_t tio_id = REMOTE_HUB_L(nasid, TIO_NODE_ID);
- uint32_t revnum = XWIDGET_PART_REV_NUM(tio_id);
+ /*
+ * Note: devices behind TIOCE should never be matched in the
+ * above code, and so the following code is PIC/CP centric.
+ * If CE ever needs the sn_dma_flush mechanism, we will have
+ * to account for that here and in tioce_bus_fixup().
+ */
+ u32 tio_id = HUB_L(TIO_IOSPACE_ADDR(nasid, TIO_NODE_ID));
+ u32 revnum = XWIDGET_PART_REV_NUM(tio_id);
/* TIOCP BRINGUP WAR (PV907516): Don't write buffer flush reg */
if ((1 << XWIDGET_PART_REV_NUM_REV(revnum)) & PV907516) {
return;
} else {
- pcireg_wrb_flush_get(p->sfdl_pcibus_info,
- (p->sfdl_slot - 1));
+ pcireg_wrb_flush_get(common->sfdl_pcibus_info,
+ (common->sfdl_slot - 1));
}
} else {
- spin_lock_irqsave(&((struct sn_flush_device_list *)p)->
- sfdl_flush_lock, flags);
-
- *p->sfdl_flush_addr = 0;
+ spin_lock_irqsave(&p->sfdl_flush_lock, flags);
+ *common->sfdl_flush_addr = 0;
/* force an interrupt. */
- *(volatile uint32_t *)(p->sfdl_force_int_addr) = 1;
+ *(volatile u32 *)(common->sfdl_force_int_addr) = 1;
/* wait for the interrupt to come back. */
- while (*(p->sfdl_flush_addr) != 0x10f) ;
+ while (*(common->sfdl_flush_addr) != 0x10f)
+ cpu_relax();
/* okay, everything is synched up. */
- spin_unlock_irqrestore((spinlock_t *)&p->sfdl_flush_lock, flags);
+ spin_unlock_irqrestore(&p->sfdl_flush_lock, flags);
}
return;
}
@@ -319,7 +354,7 @@ void sn_dma_flush(uint64_t addr)
*/
dma_addr_t
-pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size)
+pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size, int dma_flags)
{
dma_addr_t dma_handle;
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
@@ -336,11 +371,11 @@ pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size)
*/
dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr,
- PCI64_ATTR_PREF);
+ PCI64_ATTR_PREF, dma_flags);
} else {
/* Handle 32-63 bit cards via direct mapping */
dma_handle = pcibr_dmatrans_direct32(pcidev_info, phys_addr,
- size, 0);
+ size, 0, dma_flags);
if (!dma_handle) {
/*
* It is a 32 bit card and we cannot do direct mapping,
@@ -348,7 +383,8 @@ pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size)
*/
dma_handle = pcibr_dmamap_ate32(pcidev_info, phys_addr,
- size, PCI32_ATE_PREF);
+ size, PCI32_ATE_PREF,
+ dma_flags);
}
}
@@ -357,18 +393,18 @@ pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size)
dma_addr_t
pcibr_dma_map_consistent(struct pci_dev * hwdev, unsigned long phys_addr,
- size_t size)
+ size_t size, int dma_flags)
{
dma_addr_t dma_handle;
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
if (hwdev->dev.coherent_dma_mask == ~0UL) {
dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr,
- PCI64_ATTR_BAR);
+ PCI64_ATTR_BAR, dma_flags);
} else {
dma_handle = (dma_addr_t) pcibr_dmamap_ate32(pcidev_info,
phys_addr, size,
- PCI32_ATE_BAR);
+ PCI32_ATE_BAR, dma_flags);
}
return dma_handle;
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
index b95e928636a..8dbbef4a4f4 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
@@ -3,33 +3,41 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001-2004 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2001-2004, 2006 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/interrupt.h>
#include <linux/types.h>
+#include <linux/slab.h>
#include <linux/pci.h>
+#include <linux/export.h>
#include <asm/sn/addrs.h>
#include <asm/sn/geo.h>
#include <asm/sn/pcibr_provider.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h>
#include <asm/sn/sn_sal.h>
+#include <asm/sn/pic.h>
+#include <asm/sn/sn2/sn_hwperf.h>
#include "xtalk/xwidgetdev.h"
#include "xtalk/hubdev.h"
int
-sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp)
+sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp,
+ char **ssdt)
{
struct ia64_sal_retval ret_stuff;
- uint64_t busnum;
+ u64 busnum;
+ u64 segment;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
+ segment = soft->pbi_buscommon.bs_persist_segment;
busnum = soft->pbi_buscommon.bs_persist_busnum;
- SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, (u64) busnum,
- (u64) device, (u64) resp, 0, 0, 0, 0);
+ SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, segment,
+ busnum, (u64) device, (u64) resp, (u64)ia64_tpa(ssdt),
+ 0, 0);
return (int)ret_stuff.v0;
}
@@ -39,15 +47,17 @@ sal_pcibr_slot_disable(struct pcibus_info *soft, int device, int action,
void *resp)
{
struct ia64_sal_retval ret_stuff;
- uint64_t busnum;
+ u64 busnum;
+ u64 segment;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
+ segment = soft->pbi_buscommon.bs_persist_segment;
busnum = soft->pbi_buscommon.bs_persist_busnum;
SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_DISABLE,
- (u64) busnum, (u64) device, (u64) action,
- (u64) resp, 0, 0, 0);
+ segment, busnum, (u64) device, (u64) action,
+ (u64) resp, 0, 0);
return (int)ret_stuff.v0;
}
@@ -55,12 +65,12 @@ sal_pcibr_slot_disable(struct pcibus_info *soft, int device, int action,
static int sal_pcibr_error_interrupt(struct pcibus_info *soft)
{
struct ia64_sal_retval ret_stuff;
- uint64_t busnum;
+ u64 busnum;
int segment;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
- segment = 0;
+ segment = soft->pbi_buscommon.bs_persist_segment;
busnum = soft->pbi_buscommon.bs_persist_busnum;
SAL_CALL_NOLOCK(ret_stuff,
(u64) SN_SAL_IOIF_ERROR_INTERRUPT,
@@ -69,18 +79,34 @@ static int sal_pcibr_error_interrupt(struct pcibus_info *soft)
return (int)ret_stuff.v0;
}
+u16 sn_ioboard_to_pci_bus(struct pci_bus *pci_bus)
+{
+ long rc;
+ u16 uninitialized_var(ioboard); /* GCC be quiet */
+ nasid_t nasid = NASID_GET(SN_PCIBUS_BUSSOFT(pci_bus)->bs_base);
+
+ rc = ia64_sn_sysctl_ioboard_get(nasid, &ioboard);
+ if (rc) {
+ printk(KERN_WARNING "ia64_sn_sysctl_ioboard_get failed: %ld\n",
+ rc);
+ return 0;
+ }
+
+ return ioboard;
+}
+
/*
* PCI Bridge Error interrupt handler. Gets invoked whenever a PCI
* bridge sends an error interrupt.
*/
static irqreturn_t
-pcibr_error_intr_handler(int irq, void *arg, struct pt_regs *regs)
+pcibr_error_intr_handler(int irq, void *arg)
{
- struct pcibus_info *soft = (struct pcibus_info *)arg;
+ struct pcibus_info *soft = arg;
- if (sal_pcibr_error_interrupt(soft) < 0) {
+ if (sal_pcibr_error_interrupt(soft) < 0)
panic("pcibr_error_intr_handler(): Fatal Bridge Error");
- }
+
return IRQ_HANDLED;
}
@@ -90,7 +116,8 @@ pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
int nasid, cnode, j;
struct hubdev_info *hubdev_info;
struct pcibus_info *soft;
- struct sn_flush_device_list *sn_flush_device_list;
+ struct sn_flush_device_kernel *sn_flush_device_kernel;
+ struct sn_flush_device_common *common;
if (! IS_PCI_BRIDGE_ASIC(prom_bussoft->bs_asic_type)) {
return NULL;
@@ -100,26 +127,27 @@ pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
* Allocate kernel bus soft and copy from prom.
*/
- soft = kmalloc(sizeof(struct pcibus_info), GFP_KERNEL);
+ soft = kmemdup(prom_bussoft, sizeof(struct pcibus_info), GFP_KERNEL);
if (!soft) {
return NULL;
}
- memcpy(soft, prom_bussoft, sizeof(struct pcibus_info));
- soft->pbi_buscommon.bs_base =
- (((u64) soft->pbi_buscommon.
- bs_base << 4) >> 4) | __IA64_UNCACHED_OFFSET;
+ soft->pbi_buscommon.bs_base = (unsigned long)
+ ioremap(REGION_OFFSET(soft->pbi_buscommon.bs_base),
+ sizeof(struct pic));
spin_lock_init(&soft->pbi_lock);
/*
* register the bridge's error interrupt handler
*/
- if (request_irq(SGI_PCIBR_ERROR, (void *)pcibr_error_intr_handler,
- SA_SHIRQ, "PCIBR error", (void *)(soft))) {
+ if (request_irq(SGI_PCIASIC_ERROR, pcibr_error_intr_handler,
+ IRQF_SHARED, "PCIBR error", (void *)(soft))) {
printk(KERN_WARNING
"pcibr cannot allocate interrupt for error handler\n");
}
+ irq_set_handler(SGI_PCIASIC_ERROR, handle_level_irq);
+ sn_set_err_irq_affinity(SGI_PCIASIC_ERROR);
/*
* Update the Bridge with the "kernel" pagesize
@@ -135,17 +163,19 @@ pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
if (hubdev_info->hdi_flush_nasid_list.widget_p) {
- sn_flush_device_list = hubdev_info->hdi_flush_nasid_list.
+ sn_flush_device_kernel = hubdev_info->hdi_flush_nasid_list.
widget_p[(int)soft->pbi_buscommon.bs_xid];
- if (sn_flush_device_list) {
+ if (sn_flush_device_kernel) {
for (j = 0; j < DEV_PER_WIDGET;
- j++, sn_flush_device_list++) {
- if (sn_flush_device_list->sfdl_slot == -1)
+ j++, sn_flush_device_kernel++) {
+ common = sn_flush_device_kernel->common;
+ if (common->sfdl_slot == -1)
continue;
- if (sn_flush_device_list->
- sfdl_persistent_busnum ==
- soft->pbi_buscommon.bs_persist_busnum)
- sn_flush_device_list->sfdl_pcibus_info =
+ if ((common->sfdl_persistent_segment ==
+ soft->pbi_buscommon.bs_persist_segment) &&
+ (common->sfdl_persistent_busnum ==
+ soft->pbi_buscommon.bs_persist_busnum))
+ common->sfdl_pcibus_info =
soft;
}
}
@@ -154,18 +184,13 @@ pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
/* Setup the PMU ATE map */
soft->pbi_int_ate_resource.lowest_free_index = 0;
soft->pbi_int_ate_resource.ate =
- kmalloc(soft->pbi_int_ate_size * sizeof(uint64_t), GFP_KERNEL);
- memset(soft->pbi_int_ate_resource.ate, 0,
- (soft->pbi_int_ate_size * sizeof(uint64_t)));
-
- if (prom_bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCP)
- /*
- * TIO PCI Bridge with no closest node information.
- * FIXME: Find another way to determine the closest node
- */
- controller->node = -1;
- else
- controller->node = cnode;
+ kzalloc(soft->pbi_int_ate_size * sizeof(u64), GFP_KERNEL);
+
+ if (!soft->pbi_int_ate_resource.ate) {
+ kfree(soft);
+ return NULL;
+ }
+
return soft;
}
@@ -175,6 +200,9 @@ void pcibr_force_interrupt(struct sn_irq_info *sn_irq_info)
struct pcibus_info *pcibus_info;
int bit = sn_irq_info->irq_int_bit;
+ if (! sn_irq_info->irq_bridge)
+ return;
+
pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
if (pcidev_info) {
pcibus_info =
@@ -184,12 +212,12 @@ void pcibr_force_interrupt(struct sn_irq_info *sn_irq_info)
}
}
-void pcibr_change_devices_irq(struct sn_irq_info *sn_irq_info)
+void pcibr_target_interrupt(struct sn_irq_info *sn_irq_info)
{
struct pcidev_info *pcidev_info;
struct pcibus_info *pcibus_info;
int bit = sn_irq_info->irq_int_bit;
- uint64_t xtalk_addr = sn_irq_info->irq_xtalkaddr;
+ u64 xtalk_addr = sn_irq_info->irq_xtalkaddr;
pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
if (pcidev_info) {
@@ -198,13 +226,13 @@ void pcibr_change_devices_irq(struct sn_irq_info *sn_irq_info)
pdi_pcibus_info;
/* Disable the device's IRQ */
- pcireg_intr_enable_bit_clr(pcibus_info, bit);
+ pcireg_intr_enable_bit_clr(pcibus_info, (1 << bit));
/* Change the device's IRQ */
pcireg_intr_addr_addr_set(pcibus_info, bit, xtalk_addr);
/* Re-enable the device's IRQ */
- pcireg_intr_enable_bit_set(pcibus_info, bit);
+ pcireg_intr_enable_bit_set(pcibus_info, (1 << bit));
pcibr_force_interrupt(sn_irq_info);
}
@@ -219,6 +247,8 @@ struct sn_pcibus_provider pcibr_provider = {
.dma_map_consistent = pcibr_dma_map_consistent,
.dma_unmap = pcibr_dma_unmap,
.bus_fixup = pcibr_bus_fixup,
+ .force_interrupt = pcibr_force_interrupt,
+ .target_interrupt = pcibr_target_interrupt
};
int
@@ -232,3 +262,4 @@ pcibr_init_provider(void)
EXPORT_SYMBOL_GPL(sal_pcibr_slot_enable);
EXPORT_SYMBOL_GPL(sal_pcibr_slot_disable);
+EXPORT_SYMBOL_GPL(sn_ioboard_to_pci_bus);
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_reg.c b/arch/ia64/sn/pci/pcibr/pcibr_reg.c
index 21426d02fbe..8b8bbd51d43 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_reg.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_reg.c
@@ -8,6 +8,7 @@
#include <linux/interrupt.h>
#include <linux/types.h>
+#include <asm/sn/io.h>
#include <asm/sn/pcibr_provider.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h>
@@ -22,42 +23,42 @@ union br_ptr {
/*
* Control Register Access -- Read/Write 0000_0020
*/
-void pcireg_control_bit_clr(struct pcibus_info *pcibus_info, uint64_t bits)
+void pcireg_control_bit_clr(struct pcibus_info *pcibus_info, u64 bits)
{
- union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
+ union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
- ptr->tio.cp_control &= ~bits;
+ __sn_clrq_relaxed(&ptr->tio.cp_control, bits);
break;
case PCIBR_BRIDGETYPE_PIC:
- ptr->pic.p_wid_control &= ~bits;
+ __sn_clrq_relaxed(&ptr->pic.p_wid_control, bits);
break;
default:
panic
("pcireg_control_bit_clr: unknown bridgetype bridge 0x%p",
- (void *)ptr);
+ ptr);
}
}
}
-void pcireg_control_bit_set(struct pcibus_info *pcibus_info, uint64_t bits)
+void pcireg_control_bit_set(struct pcibus_info *pcibus_info, u64 bits)
{
- union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
+ union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
- ptr->tio.cp_control |= bits;
+ __sn_setq_relaxed(&ptr->tio.cp_control, bits);
break;
case PCIBR_BRIDGETYPE_PIC:
- ptr->pic.p_wid_control |= bits;
+ __sn_setq_relaxed(&ptr->pic.p_wid_control, bits);
break;
default:
panic
("pcireg_control_bit_set: unknown bridgetype bridge 0x%p",
- (void *)ptr);
+ ptr);
}
}
}
@@ -65,23 +66,23 @@ void pcireg_control_bit_set(struct pcibus_info *pcibus_info, uint64_t bits)
/*
* PCI/PCIX Target Flush Register Access -- Read Only 0000_0050
*/
-uint64_t pcireg_tflush_get(struct pcibus_info *pcibus_info)
+u64 pcireg_tflush_get(struct pcibus_info *pcibus_info)
{
- union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
- uint64_t ret = 0;
+ union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
+ u64 ret = 0;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
- ret = ptr->tio.cp_tflush;
+ ret = __sn_readq_relaxed(&ptr->tio.cp_tflush);
break;
case PCIBR_BRIDGETYPE_PIC:
- ret = ptr->pic.p_wid_tflush;
+ ret = __sn_readq_relaxed(&ptr->pic.p_wid_tflush);
break;
default:
panic
("pcireg_tflush_get: unknown bridgetype bridge 0x%p",
- (void *)ptr);
+ ptr);
}
}
@@ -95,23 +96,23 @@ uint64_t pcireg_tflush_get(struct pcibus_info *pcibus_info)
/*
* Interrupt Status Register Access -- Read Only 0000_0100
*/
-uint64_t pcireg_intr_status_get(struct pcibus_info * pcibus_info)
+u64 pcireg_intr_status_get(struct pcibus_info * pcibus_info)
{
- union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
- uint64_t ret = 0;
+ union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
+ u64 ret = 0;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
- ret = ptr->tio.cp_int_status;
+ ret = __sn_readq_relaxed(&ptr->tio.cp_int_status);
break;
case PCIBR_BRIDGETYPE_PIC:
- ret = ptr->pic.p_int_status;
+ ret = __sn_readq_relaxed(&ptr->pic.p_int_status);
break;
default:
panic
("pcireg_intr_status_get: unknown bridgetype bridge 0x%p",
- (void *)ptr);
+ ptr);
}
}
return ret;
@@ -120,42 +121,42 @@ uint64_t pcireg_intr_status_get(struct pcibus_info * pcibus_info)
/*
* Interrupt Enable Register Access -- Read/Write 0000_0108
*/
-void pcireg_intr_enable_bit_clr(struct pcibus_info *pcibus_info, uint64_t bits)
+void pcireg_intr_enable_bit_clr(struct pcibus_info *pcibus_info, u64 bits)
{
- union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
+ union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
- ptr->tio.cp_int_enable &= ~bits;
+ __sn_clrq_relaxed(&ptr->tio.cp_int_enable, bits);
break;
case PCIBR_BRIDGETYPE_PIC:
- ptr->pic.p_int_enable &= ~bits;
+ __sn_clrq_relaxed(&ptr->pic.p_int_enable, bits);
break;
default:
panic
("pcireg_intr_enable_bit_clr: unknown bridgetype bridge 0x%p",
- (void *)ptr);
+ ptr);
}
}
}
-void pcireg_intr_enable_bit_set(struct pcibus_info *pcibus_info, uint64_t bits)
+void pcireg_intr_enable_bit_set(struct pcibus_info *pcibus_info, u64 bits)
{
- union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
+ union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
- ptr->tio.cp_int_enable |= bits;
+ __sn_setq_relaxed(&ptr->tio.cp_int_enable, bits);
break;
case PCIBR_BRIDGETYPE_PIC:
- ptr->pic.p_int_enable |= bits;
+ __sn_setq_relaxed(&ptr->pic.p_int_enable, bits);
break;
default:
panic
("pcireg_intr_enable_bit_set: unknown bridgetype bridge 0x%p",
- (void *)ptr);
+ ptr);
}
}
}
@@ -164,26 +165,28 @@ void pcireg_intr_enable_bit_set(struct pcibus_info *pcibus_info, uint64_t bits)
* Intr Host Address Register (int_addr) -- Read/Write 0000_0130 - 0000_0168
*/
void pcireg_intr_addr_addr_set(struct pcibus_info *pcibus_info, int int_n,
- uint64_t addr)
+ u64 addr)
{
- union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
+ union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
- ptr->tio.cp_int_addr[int_n] &= ~TIOCP_HOST_INTR_ADDR;
- ptr->tio.cp_int_addr[int_n] |=
- (addr & TIOCP_HOST_INTR_ADDR);
+ __sn_clrq_relaxed(&ptr->tio.cp_int_addr[int_n],
+ TIOCP_HOST_INTR_ADDR);
+ __sn_setq_relaxed(&ptr->tio.cp_int_addr[int_n],
+ (addr & TIOCP_HOST_INTR_ADDR));
break;
case PCIBR_BRIDGETYPE_PIC:
- ptr->pic.p_int_addr[int_n] &= ~PIC_HOST_INTR_ADDR;
- ptr->pic.p_int_addr[int_n] |=
- (addr & PIC_HOST_INTR_ADDR);
+ __sn_clrq_relaxed(&ptr->pic.p_int_addr[int_n],
+ PIC_HOST_INTR_ADDR);
+ __sn_setq_relaxed(&ptr->pic.p_int_addr[int_n],
+ (addr & PIC_HOST_INTR_ADDR));
break;
default:
panic
("pcireg_intr_addr_addr_get: unknown bridgetype bridge 0x%p",
- (void *)ptr);
+ ptr);
}
}
}
@@ -193,20 +196,20 @@ void pcireg_intr_addr_addr_set(struct pcibus_info *pcibus_info, int int_n,
*/
void pcireg_force_intr_set(struct pcibus_info *pcibus_info, int int_n)
{
- union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
+ union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
- ptr->tio.cp_force_pin[int_n] = 1;
+ writeq(1, &ptr->tio.cp_force_pin[int_n]);
break;
case PCIBR_BRIDGETYPE_PIC:
- ptr->pic.p_force_pin[int_n] = 1;
+ writeq(1, &ptr->pic.p_force_pin[int_n]);
break;
default:
panic
("pcireg_force_intr_set: unknown bridgetype bridge 0x%p",
- (void *)ptr);
+ ptr);
}
}
}
@@ -214,21 +217,23 @@ void pcireg_force_intr_set(struct pcibus_info *pcibus_info, int int_n)
/*
* Device(x) Write Buffer Flush Reg Access -- Read Only 0000_0240 - 0000_0258
*/
-uint64_t pcireg_wrb_flush_get(struct pcibus_info *pcibus_info, int device)
+u64 pcireg_wrb_flush_get(struct pcibus_info *pcibus_info, int device)
{
- union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
- uint64_t ret = 0;
+ union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
+ u64 ret = 0;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
- ret = ptr->tio.cp_wr_req_buf[device];
+ ret =
+ __sn_readq_relaxed(&ptr->tio.cp_wr_req_buf[device]);
break;
case PCIBR_BRIDGETYPE_PIC:
- ret = ptr->pic.p_wr_req_buf[device];
+ ret =
+ __sn_readq_relaxed(&ptr->pic.p_wr_req_buf[device]);
break;
default:
- panic("pcireg_wrb_flush_get: unknown bridgetype bridge 0x%p", (void *)ptr);
+ panic("pcireg_wrb_flush_get: unknown bridgetype bridge 0x%p", ptr);
}
}
@@ -237,45 +242,43 @@ uint64_t pcireg_wrb_flush_get(struct pcibus_info *pcibus_info, int device)
}
void pcireg_int_ate_set(struct pcibus_info *pcibus_info, int ate_index,
- uint64_t val)
+ u64 val)
{
- union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
+ union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
- ptr->tio.cp_int_ate_ram[ate_index] = (uint64_t) val;
+ writeq(val, &ptr->tio.cp_int_ate_ram[ate_index]);
break;
case PCIBR_BRIDGETYPE_PIC:
- ptr->pic.p_int_ate_ram[ate_index] = (uint64_t) val;
+ writeq(val, &ptr->pic.p_int_ate_ram[ate_index]);
break;
default:
panic
("pcireg_int_ate_set: unknown bridgetype bridge 0x%p",
- (void *)ptr);
+ ptr);
}
}
}
-uint64_t *pcireg_int_ate_addr(struct pcibus_info *pcibus_info, int ate_index)
+u64 __iomem *pcireg_int_ate_addr(struct pcibus_info *pcibus_info, int ate_index)
{
- union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
- uint64_t *ret = (uint64_t *) 0;
+ union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
+ u64 __iomem *ret = NULL;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
- ret =
- (uint64_t *) & (ptr->tio.cp_int_ate_ram[ate_index]);
+ ret = &ptr->tio.cp_int_ate_ram[ate_index];
break;
case PCIBR_BRIDGETYPE_PIC:
- ret =
- (uint64_t *) & (ptr->pic.p_int_ate_ram[ate_index]);
+ ret = &ptr->pic.p_int_ate_ram[ate_index];
break;
default:
panic
("pcireg_int_ate_addr: unknown bridgetype bridge 0x%p",
- (void *)ptr);
+ ptr);
}
}
return ret;
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c
index 5d76a758146..a70b11fd57d 100644
--- a/arch/ia64/sn/pci/tioca_provider.c
+++ b/arch/ia64/sn/pci/tioca_provider.c
@@ -9,13 +9,17 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
+#include <linux/bitmap.h>
+#include <linux/slab.h>
+#include <linux/export.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/addrs.h>
+#include <asm/sn/io.h>
#include <asm/sn/pcidev.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/tioca_provider.h>
-uint32_t tioca_gart_found;
+u32 tioca_gart_found;
EXPORT_SYMBOL(tioca_gart_found); /* used by agp-sgi */
LIST_HEAD(tioca_list);
@@ -33,14 +37,14 @@ static int tioca_gart_init(struct tioca_kernel *);
static int
tioca_gart_init(struct tioca_kernel *tioca_kern)
{
- uint64_t ap_reg;
- uint64_t offset;
+ u64 ap_reg;
+ u64 offset;
struct page *tmp;
struct tioca_common *tioca_common;
- volatile struct tioca *ca_base;
+ struct tioca __iomem *ca_base;
tioca_common = tioca_kern->ca_common;
- ca_base = (struct tioca *)tioca_common->ca_common.bs_base;
+ ca_base = (struct tioca __iomem *)tioca_common->ca_common.bs_base;
if (list_empty(tioca_kern->ca_devices))
return 0;
@@ -87,7 +91,7 @@ tioca_gart_init(struct tioca_kernel *tioca_kern)
break;
default:
printk(KERN_ERR "%s: Invalid CA_APERATURE_SIZE "
- "0x%lx\n", __FUNCTION__, (ulong) CA_APERATURE_SIZE);
+ "0x%lx\n", __func__, (ulong) CA_APERATURE_SIZE);
return -1;
}
@@ -122,8 +126,8 @@ tioca_gart_init(struct tioca_kernel *tioca_kern)
if (!tmp) {
printk(KERN_ERR "%s: Could not allocate "
- "%lu bytes (order %d) for GART\n",
- __FUNCTION__,
+ "%llu bytes (order %d) for GART\n",
+ __func__,
tioca_kern->ca_gart_size,
get_order(tioca_kern->ca_gart_size));
return -ENOMEM;
@@ -148,7 +152,7 @@ tioca_gart_init(struct tioca_kernel *tioca_kern)
tioca_kern->ca_pcigart_entries =
tioca_kern->ca_pciap_size / tioca_kern->ca_ap_pagesize;
tioca_kern->ca_pcigart_pagemap =
- kcalloc(1, tioca_kern->ca_pcigart_entries / 8, GFP_KERNEL);
+ kzalloc(tioca_kern->ca_pcigart_entries / 8, GFP_KERNEL);
if (!tioca_kern->ca_pcigart_pagemap) {
free_pages((unsigned long)tioca_kern->ca_gart,
get_order(tioca_kern->ca_gart_size));
@@ -174,27 +178,29 @@ tioca_gart_init(struct tioca_kernel *tioca_kern)
* DISABLE GART PREFETCHING due to hw bug tracked in SGI PV930029
*/
- ca_base->ca_control1 |= CA_AGPDMA_OP_ENB_COMBDELAY; /* PV895469 ? */
- ca_base->ca_control2 &= ~(CA_GART_MEM_PARAM);
- ca_base->ca_control2 |= (0x2ull << CA_GART_MEM_PARAM_SHFT);
+ __sn_setq_relaxed(&ca_base->ca_control1,
+ CA_AGPDMA_OP_ENB_COMBDELAY); /* PV895469 ? */
+ __sn_clrq_relaxed(&ca_base->ca_control2, CA_GART_MEM_PARAM);
+ __sn_setq_relaxed(&ca_base->ca_control2,
+ (0x2ull << CA_GART_MEM_PARAM_SHFT));
tioca_kern->ca_gart_iscoherent = 1;
- ca_base->ca_control2 &=
- ~(CA_GART_WR_PREFETCH_ENB | CA_GART_RD_PREFETCH_ENB);
+ __sn_clrq_relaxed(&ca_base->ca_control2,
+ (CA_GART_WR_PREFETCH_ENB | CA_GART_RD_PREFETCH_ENB));
/*
* Unmask GART fetch error interrupts. Clear residual errors first.
*/
- ca_base->ca_int_status_alias = CA_GART_FETCH_ERR;
- ca_base->ca_mult_error_alias = CA_GART_FETCH_ERR;
- ca_base->ca_int_mask &= ~CA_GART_FETCH_ERR;
+ writeq(CA_GART_FETCH_ERR, &ca_base->ca_int_status_alias);
+ writeq(CA_GART_FETCH_ERR, &ca_base->ca_mult_error_alias);
+ __sn_clrq_relaxed(&ca_base->ca_int_mask, CA_GART_FETCH_ERR);
/*
* Program the aperature and gart registers in TIOCA
*/
- ca_base->ca_gart_aperature = ap_reg;
- ca_base->ca_gart_ptr_table = tioca_kern->ca_gart_coretalk_addr | 1;
+ writeq(ap_reg, &ca_base->ca_gart_aperature);
+ writeq(tioca_kern->ca_gart_coretalk_addr|1, &ca_base->ca_gart_ptr_table);
return 0;
}
@@ -211,9 +217,8 @@ void
tioca_fastwrite_enable(struct tioca_kernel *tioca_kern)
{
int cap_ptr;
- uint64_t ca_control1;
- uint32_t reg;
- struct tioca *tioca_base;
+ u32 reg;
+ struct tioca __iomem *tioca_base;
struct pci_dev *pdev;
struct tioca_common *common;
@@ -221,7 +226,7 @@ tioca_fastwrite_enable(struct tioca_kernel *tioca_kern)
/*
* Scan all vga controllers on this bus making sure they all
- * suport FW. If not, return.
+ * support FW. If not, return.
*/
list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) {
@@ -255,10 +260,8 @@ tioca_fastwrite_enable(struct tioca_kernel *tioca_kern)
* Set ca's fw to match
*/
- tioca_base = (struct tioca *)common->ca_common.bs_base;
- ca_control1 = tioca_base->ca_control1;
- ca_control1 |= CA_AGP_FW_ENABLE;
- tioca_base->ca_control1 = ca_control1;
+ tioca_base = (struct tioca __iomem*)common->ca_common.bs_base;
+ __sn_setq_relaxed(&tioca_base->ca_control1, CA_AGP_FW_ENABLE);
}
EXPORT_SYMBOL(tioca_fastwrite_enable); /* used by agp-sgi */
@@ -276,7 +279,7 @@ EXPORT_SYMBOL(tioca_fastwrite_enable); /* used by agp-sgi */
* We will always use 0x1
* 55:55 - Swap bytes Currently unused
*/
-static uint64_t
+static u64
tioca_dma_d64(unsigned long paddr)
{
dma_addr_t bus_addr;
@@ -318,19 +321,19 @@ tioca_dma_d64(unsigned long paddr)
* and so a given CA can only directly target nodes in the range
* xxx - xxx+255.
*/
-static uint64_t
-tioca_dma_d48(struct pci_dev *pdev, uint64_t paddr)
+static u64
+tioca_dma_d48(struct pci_dev *pdev, u64 paddr)
{
struct tioca_common *tioca_common;
- struct tioca *ca_base;
- uint64_t ct_addr;
+ struct tioca __iomem *ca_base;
+ u64 ct_addr;
dma_addr_t bus_addr;
- uint32_t node_upper;
- uint64_t agp_dma_extn;
+ u32 node_upper;
+ u64 agp_dma_extn;
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);
tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info;
- ca_base = (struct tioca *)tioca_common->ca_common.bs_base;
+ ca_base = (struct tioca __iomem *)tioca_common->ca_common.bs_base;
ct_addr = PHYS_TO_TIODMA(paddr);
if (!ct_addr)
@@ -341,15 +344,15 @@ tioca_dma_d48(struct pci_dev *pdev, uint64_t paddr)
if (node_upper > 64) {
printk(KERN_ERR "%s: coretalk addr 0x%p node id out "
- "of range\n", __FUNCTION__, (void *)ct_addr);
+ "of range\n", __func__, (void *)ct_addr);
return 0;
}
- agp_dma_extn = ca_base->ca_agp_dma_addr_extn;
+ agp_dma_extn = __sn_readq_relaxed(&ca_base->ca_agp_dma_addr_extn);
if (node_upper != (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)) {
printk(KERN_ERR "%s: coretalk upper node (%u) "
- "mismatch with ca_agp_dma_addr_extn (%lu)\n",
- __FUNCTION__,
+ "mismatch with ca_agp_dma_addr_extn (%llu)\n",
+ __func__,
node_upper, (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT));
return 0;
}
@@ -364,20 +367,20 @@ tioca_dma_d48(struct pci_dev *pdev, uint64_t paddr)
* @req_size: len (bytes) to map
*
* Map @paddr into CA address space using the GART mechanism. The mapped
- * dma_addr_t is guarenteed to be contiguous in CA bus space.
+ * dma_addr_t is guaranteed to be contiguous in CA bus space.
*/
static dma_addr_t
-tioca_dma_mapped(struct pci_dev *pdev, uint64_t paddr, size_t req_size)
+tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size)
{
- int i, ps, ps_shift, entry, entries, mapsize, last_entry;
- uint64_t xio_addr, end_xio_addr;
+ int ps, ps_shift, entry, entries, mapsize;
+ u64 xio_addr, end_xio_addr;
struct tioca_common *tioca_common;
struct tioca_kernel *tioca_kern;
dma_addr_t bus_addr = 0;
struct tioca_dmamap *ca_dmamap;
void *map;
unsigned long flags;
- struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);;
+ struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);
tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info;
tioca_kern = (struct tioca_kernel *)tioca_common->ca_kernel_private;
@@ -392,7 +395,7 @@ tioca_dma_mapped(struct pci_dev *pdev, uint64_t paddr, size_t req_size)
* allocate a map struct
*/
- ca_dmamap = kcalloc(1, sizeof(struct tioca_dmamap), GFP_ATOMIC);
+ ca_dmamap = kzalloc(sizeof(struct tioca_dmamap), GFP_ATOMIC);
if (!ca_dmamap)
goto map_return;
@@ -410,21 +413,13 @@ tioca_dma_mapped(struct pci_dev *pdev, uint64_t paddr, size_t req_size)
map = tioca_kern->ca_pcigart_pagemap;
mapsize = tioca_kern->ca_pcigart_entries;
- entry = find_first_zero_bit(map, mapsize);
- while (entry < mapsize) {
- last_entry = find_next_bit(map, mapsize, entry);
-
- if (last_entry - entry >= entries)
- break;
-
- entry = find_next_zero_bit(map, mapsize, last_entry);
- }
-
- if (entry > mapsize)
+ entry = bitmap_find_next_zero_area(map, mapsize, 0, entries, 0);
+ if (entry >= mapsize) {
+ kfree(ca_dmamap);
goto map_return;
+ }
- for (i = 0; i < entries; i++)
- set_bit(entry + i, map);
+ bitmap_set(map, entry, entries);
bus_addr = tioca_kern->ca_pciap_base + (entry * ps);
@@ -514,13 +509,19 @@ tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
* The mapping mode used is based on the devices dma_mask. As a last resort
* use the GART mapped mode.
*/
-static uint64_t
-tioca_dma_map(struct pci_dev *pdev, uint64_t paddr, size_t byte_count)
+static u64
+tioca_dma_map(struct pci_dev *pdev, unsigned long paddr, size_t byte_count, int dma_flags)
{
- uint64_t mapaddr;
+ u64 mapaddr;
/*
- * If card is 64 or 48 bit addresable, use a direct mapping. 32
+ * Not supported for now ...
+ */
+ if (dma_flags & SN_DMA_MSI)
+ return 0;
+
+ /*
+ * If card is 64 or 48 bit addressable, use a direct mapping. 32
* bit direct is so restrictive w.r.t. where the memory resides that
* we don't use it even though CA has some support.
*/
@@ -544,22 +545,21 @@ tioca_dma_map(struct pci_dev *pdev, uint64_t paddr, size_t byte_count)
* tioca_error_intr_handler - SGI TIO CA error interrupt handler
* @irq: unused
* @arg: pointer to tioca_common struct for the given CA
- * @pt: unused
*
* Handle a CA error interrupt. Simply a wrapper around a SAL call which
* defers processing to the SGI prom.
*/
static irqreturn_t
-tioca_error_intr_handler(int irq, void *arg, struct pt_regs *pt)
+tioca_error_intr_handler(int irq, void *arg)
{
struct tioca_common *soft = arg;
struct ia64_sal_retval ret_stuff;
- uint64_t segment;
- uint64_t busnum;
+ u64 segment;
+ u64 busnum;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
- segment = 0;
+ segment = soft->ca_common.bs_persist_segment;
busnum = soft->ca_common.bs_persist_busnum;
SAL_CALL_NOLOCK(ret_stuff,
@@ -589,10 +589,10 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
/* sanity check prom rev */
- if (sn_sal_rev() < 0x0406) {
+ if (is_shub1() && sn_sal_rev() < 0x0406) {
printk
(KERN_ERR "%s: SGI prom rev 4.06 or greater required "
- "for tioca support\n", __FUNCTION__);
+ "for tioca support\n", __func__);
return NULL;
}
@@ -600,16 +600,18 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
* Allocate kernel bus soft and copy from prom.
*/
- tioca_common = kcalloc(1, sizeof(struct tioca_common), GFP_KERNEL);
+ tioca_common = kmemdup(prom_bussoft, sizeof(struct tioca_common),
+ GFP_KERNEL);
if (!tioca_common)
return NULL;
- memcpy(tioca_common, prom_bussoft, sizeof(struct tioca_common));
- tioca_common->ca_common.bs_base |= __IA64_UNCACHED_OFFSET;
+ tioca_common->ca_common.bs_base = (unsigned long)
+ ioremap(REGION_OFFSET(tioca_common->ca_common.bs_base),
+ sizeof(struct tioca_common));
/* init kernel-private area */
- tioca_kern = kcalloc(1, sizeof(struct tioca_kernel), GFP_KERNEL);
+ tioca_kern = kzalloc(sizeof(struct tioca_kernel), GFP_KERNEL);
if (!tioca_kern) {
kfree(tioca_common);
return NULL;
@@ -620,9 +622,10 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
INIT_LIST_HEAD(&tioca_kern->ca_dmamaps);
tioca_kern->ca_closest_node =
nasid_to_cnodeid(tioca_common->ca_closest_nasid);
- tioca_common->ca_kernel_private = (uint64_t) tioca_kern;
+ tioca_common->ca_kernel_private = (u64) tioca_kern;
- bus = pci_find_bus(0, tioca_common->ca_common.bs_persist_busnum);
+ bus = pci_find_bus(tioca_common->ca_common.bs_persist_segment,
+ tioca_common->ca_common.bs_persist_busnum);
BUG_ON(!bus);
tioca_kern->ca_devices = &bus->devices;
@@ -639,13 +642,16 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
if (request_irq(SGI_TIOCA_ERROR,
tioca_error_intr_handler,
- SA_SHIRQ, "TIOCA error", (void *)tioca_common))
+ IRQF_SHARED, "TIOCA error", (void *)tioca_common))
printk(KERN_WARNING
"%s: Unable to get irq %d. "
"Error interrupts won't be routed for TIOCA bus %d\n",
- __FUNCTION__, SGI_TIOCA_ERROR,
+ __func__, SGI_TIOCA_ERROR,
(int)tioca_common->ca_common.bs_persist_busnum);
+ irq_set_handler(SGI_TIOCA_ERROR, handle_level_irq);
+ sn_set_err_irq_affinity(SGI_TIOCA_ERROR);
+
/* Setup locality information */
controller->node = tioca_kern->ca_closest_node;
return tioca_common;
@@ -656,6 +662,8 @@ static struct sn_pcibus_provider tioca_pci_interfaces = {
.dma_map_consistent = tioca_dma_map,
.dma_unmap = tioca_dma_unmap,
.bus_fixup = tioca_bus_fixup,
+ .force_interrupt = NULL,
+ .target_interrupt = NULL
};
/**
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c
new file mode 100644
index 00000000000..46d3df4b03a
--- /dev/null
+++ b/arch/ia64/sn/pci/tioce_provider.c
@@ -0,0 +1,1062 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003-2006 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <asm/sn/sn_sal.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/io.h>
+#include <asm/sn/pcidev.h>
+#include <asm/sn/pcibus_provider_defs.h>
+#include <asm/sn/tioce_provider.h>
+
+/*
+ * 1/26/2006
+ *
+ * WAR for SGI PV 944642. For revA TIOCE, need to use the following recipe
+ * (taken from the above PV) before and after accessing tioce internal MMR's
+ * to avoid tioce lockups.
+ *
+ * The recipe as taken from the PV:
+ *
+ * if(mmr address < 0x45000) {
+ * if(mmr address == 0 or 0x80)
+ * mmr wrt or read address 0xc0
+ * else if(mmr address == 0x148 or 0x200)
+ * mmr wrt or read address 0x28
+ * else
+ * mmr wrt or read address 0x158
+ *
+ * do desired mmr access (rd or wrt)
+ *
+ * if(mmr address == 0x100)
+ * mmr wrt or read address 0x38
+ * mmr wrt or read address 0xb050
+ * } else
+ * do desired mmr access
+ *
+ * According to hw, we can use reads instead of writes to the above address
+ *
+ * Note this WAR can only to be used for accessing internal MMR's in the
+ * TIOCE Coretalk Address Range 0x0 - 0x07ff_ffff. This includes the
+ * "Local CE Registers and Memories" and "PCI Compatible Config Space" address
+ * spaces from table 2-1 of the "CE Programmer's Reference Overview" document.
+ *
+ * All registers defined in struct tioce will meet that criteria.
+ */
+
+static void inline
+tioce_mmr_war_pre(struct tioce_kernel *kern, void __iomem *mmr_addr)
+{
+ u64 mmr_base;
+ u64 mmr_offset;
+
+ if (kern->ce_common->ce_rev != TIOCE_REV_A)
+ return;
+
+ mmr_base = kern->ce_common->ce_pcibus.bs_base;
+ mmr_offset = (unsigned long)mmr_addr - mmr_base;
+
+ if (mmr_offset < 0x45000) {
+ u64 mmr_war_offset;
+
+ if (mmr_offset == 0 || mmr_offset == 0x80)
+ mmr_war_offset = 0xc0;
+ else if (mmr_offset == 0x148 || mmr_offset == 0x200)
+ mmr_war_offset = 0x28;
+ else
+ mmr_war_offset = 0x158;
+
+ readq_relaxed((void __iomem *)(mmr_base + mmr_war_offset));
+ }
+}
+
+static void inline
+tioce_mmr_war_post(struct tioce_kernel *kern, void __iomem *mmr_addr)
+{
+ u64 mmr_base;
+ u64 mmr_offset;
+
+ if (kern->ce_common->ce_rev != TIOCE_REV_A)
+ return;
+
+ mmr_base = kern->ce_common->ce_pcibus.bs_base;
+ mmr_offset = (unsigned long)mmr_addr - mmr_base;
+
+ if (mmr_offset < 0x45000) {
+ if (mmr_offset == 0x100)
+ readq_relaxed((void __iomem *)(mmr_base + 0x38));
+ readq_relaxed((void __iomem *)(mmr_base + 0xb050));
+ }
+}
+
+/* load mmr contents into a variable */
+#define tioce_mmr_load(kern, mmrp, varp) do {\
+ tioce_mmr_war_pre(kern, mmrp); \
+ *(varp) = readq_relaxed(mmrp); \
+ tioce_mmr_war_post(kern, mmrp); \
+} while (0)
+
+/* store variable contents into mmr */
+#define tioce_mmr_store(kern, mmrp, varp) do {\
+ tioce_mmr_war_pre(kern, mmrp); \
+ writeq(*varp, mmrp); \
+ tioce_mmr_war_post(kern, mmrp); \
+} while (0)
+
+/* store immediate value into mmr */
+#define tioce_mmr_storei(kern, mmrp, val) do {\
+ tioce_mmr_war_pre(kern, mmrp); \
+ writeq(val, mmrp); \
+ tioce_mmr_war_post(kern, mmrp); \
+} while (0)
+
+/* set bits (immediate value) into mmr */
+#define tioce_mmr_seti(kern, mmrp, bits) do {\
+ u64 tmp; \
+ tioce_mmr_load(kern, mmrp, &tmp); \
+ tmp |= (bits); \
+ tioce_mmr_store(kern, mmrp, &tmp); \
+} while (0)
+
+/* clear bits (immediate value) into mmr */
+#define tioce_mmr_clri(kern, mmrp, bits) do { \
+ u64 tmp; \
+ tioce_mmr_load(kern, mmrp, &tmp); \
+ tmp &= ~(bits); \
+ tioce_mmr_store(kern, mmrp, &tmp); \
+} while (0)
+
+/**
+ * Bus address ranges for the 5 flavors of TIOCE DMA
+ */
+
+#define TIOCE_D64_MIN 0x8000000000000000UL
+#define TIOCE_D64_MAX 0xffffffffffffffffUL
+#define TIOCE_D64_ADDR(a) ((a) >= TIOCE_D64_MIN)
+
+#define TIOCE_D32_MIN 0x0000000080000000UL
+#define TIOCE_D32_MAX 0x00000000ffffffffUL
+#define TIOCE_D32_ADDR(a) ((a) >= TIOCE_D32_MIN && (a) <= TIOCE_D32_MAX)
+
+#define TIOCE_M32_MIN 0x0000000000000000UL
+#define TIOCE_M32_MAX 0x000000007fffffffUL
+#define TIOCE_M32_ADDR(a) ((a) >= TIOCE_M32_MIN && (a) <= TIOCE_M32_MAX)
+
+#define TIOCE_M40_MIN 0x0000004000000000UL
+#define TIOCE_M40_MAX 0x0000007fffffffffUL
+#define TIOCE_M40_ADDR(a) ((a) >= TIOCE_M40_MIN && (a) <= TIOCE_M40_MAX)
+
+#define TIOCE_M40S_MIN 0x0000008000000000UL
+#define TIOCE_M40S_MAX 0x000000ffffffffffUL
+#define TIOCE_M40S_ADDR(a) ((a) >= TIOCE_M40S_MIN && (a) <= TIOCE_M40S_MAX)
+
+/*
+ * ATE manipulation macros.
+ */
+
+#define ATE_PAGESHIFT(ps) (__ffs(ps))
+#define ATE_PAGEMASK(ps) ((ps)-1)
+
+#define ATE_PAGE(x, ps) ((x) >> ATE_PAGESHIFT(ps))
+#define ATE_NPAGES(start, len, pagesize) \
+ (ATE_PAGE((start)+(len)-1, pagesize) - ATE_PAGE(start, pagesize) + 1)
+
+#define ATE_VALID(ate) ((ate) & (1UL << 63))
+#define ATE_MAKE(addr, ps, msi) \
+ (((addr) & ~ATE_PAGEMASK(ps)) | (1UL << 63) | ((msi)?(1UL << 62):0))
+
+/*
+ * Flavors of ate-based mapping supported by tioce_alloc_map()
+ */
+
+#define TIOCE_ATE_M32 1
+#define TIOCE_ATE_M40 2
+#define TIOCE_ATE_M40S 3
+
+#define KB(x) ((u64)(x) << 10)
+#define MB(x) ((u64)(x) << 20)
+#define GB(x) ((u64)(x) << 30)
+
+/**
+ * tioce_dma_d64 - create a DMA mapping using 64-bit direct mode
+ * @ct_addr: system coretalk address
+ *
+ * Map @ct_addr into 64-bit CE bus space. No device context is necessary
+ * and no CE mapping are consumed.
+ *
+ * Bits 53:0 come from the coretalk address. The remaining bits are set as
+ * follows:
+ *
+ * 63 - must be 1 to indicate d64 mode to CE hardware
+ * 62 - barrier bit ... controlled with tioce_dma_barrier()
+ * 61 - msi bit ... specified through dma_flags
+ * 60:54 - reserved, MBZ
+ */
+static u64
+tioce_dma_d64(unsigned long ct_addr, int dma_flags)
+{
+ u64 bus_addr;
+
+ bus_addr = ct_addr | (1UL << 63);
+ if (dma_flags & SN_DMA_MSI)
+ bus_addr |= (1UL << 61);
+
+ return bus_addr;
+}
+
+/**
+ * pcidev_to_tioce - return misc ce related pointers given a pci_dev
+ * @pci_dev: pci device context
+ * @base: ptr to store struct tioce_mmr * for the CE holding this device
+ * @kernel: ptr to store struct tioce_kernel * for the CE holding this device
+ * @port: ptr to store the CE port number that this device is on
+ *
+ * Return pointers to various CE-related structures for the CE upstream of
+ * @pci_dev.
+ */
+static inline void
+pcidev_to_tioce(struct pci_dev *pdev, struct tioce __iomem **base,
+ struct tioce_kernel **kernel, int *port)
+{
+ struct pcidev_info *pcidev_info;
+ struct tioce_common *ce_common;
+ struct tioce_kernel *ce_kernel;
+
+ pcidev_info = SN_PCIDEV_INFO(pdev);
+ ce_common = (struct tioce_common *)pcidev_info->pdi_pcibus_info;
+ ce_kernel = (struct tioce_kernel *)ce_common->ce_kernel_private;
+
+ if (base)
+ *base = (struct tioce __iomem *)ce_common->ce_pcibus.bs_base;
+ if (kernel)
+ *kernel = ce_kernel;
+
+ /*
+ * we use port as a zero-based value internally, even though the
+ * documentation is 1-based.
+ */
+ if (port)
+ *port =
+ (pdev->bus->number < ce_kernel->ce_port1_secondary) ? 0 : 1;
+}
+
+/**
+ * tioce_alloc_map - Given a coretalk address, map it to pcie bus address
+ * space using one of the various ATE-based address modes.
+ * @ce_kern: tioce context
+ * @type: map mode to use
+ * @port: 0-based port that the requesting device is downstream of
+ * @ct_addr: the coretalk address to map
+ * @len: number of bytes to map
+ *
+ * Given the addressing type, set up various parameters that define the
+ * ATE pool to use. Search for a contiguous block of entries to cover the
+ * length, and if enough resources exist, fill in the ATEs and construct a
+ * tioce_dmamap struct to track the mapping.
+ */
+static u64
+tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
+ u64 ct_addr, int len, int dma_flags)
+{
+ int i;
+ int j;
+ int first;
+ int last;
+ int entries;
+ int nates;
+ u64 pagesize;
+ int msi_capable, msi_wanted;
+ u64 *ate_shadow;
+ u64 __iomem *ate_reg;
+ u64 addr;
+ struct tioce __iomem *ce_mmr;
+ u64 bus_base;
+ struct tioce_dmamap *map;
+
+ ce_mmr = (struct tioce __iomem *)ce_kern->ce_common->ce_pcibus.bs_base;
+
+ switch (type) {
+ case TIOCE_ATE_M32:
+ /*
+ * The first 64 entries of the ate3240 pool are dedicated to
+ * super-page (TIOCE_ATE_M40S) mode.
+ */
+ first = 64;
+ entries = TIOCE_NUM_M3240_ATES - 64;
+ ate_shadow = ce_kern->ce_ate3240_shadow;
+ ate_reg = ce_mmr->ce_ure_ate3240;
+ pagesize = ce_kern->ce_ate3240_pagesize;
+ bus_base = TIOCE_M32_MIN;
+ msi_capable = 1;
+ break;
+ case TIOCE_ATE_M40:
+ first = 0;
+ entries = TIOCE_NUM_M40_ATES;
+ ate_shadow = ce_kern->ce_ate40_shadow;
+ ate_reg = ce_mmr->ce_ure_ate40;
+ pagesize = MB(64);
+ bus_base = TIOCE_M40_MIN;
+ msi_capable = 0;
+ break;
+ case TIOCE_ATE_M40S:
+ /*
+ * ate3240 entries 0-31 are dedicated to port1 super-page
+ * mappings. ate3240 entries 32-63 are dedicated to port2.
+ */
+ first = port * 32;
+ entries = 32;
+ ate_shadow = ce_kern->ce_ate3240_shadow;
+ ate_reg = ce_mmr->ce_ure_ate3240;
+ pagesize = GB(16);
+ bus_base = TIOCE_M40S_MIN;
+ msi_capable = 0;
+ break;
+ default:
+ return 0;
+ }
+
+ msi_wanted = dma_flags & SN_DMA_MSI;
+ if (msi_wanted && !msi_capable)
+ return 0;
+
+ nates = ATE_NPAGES(ct_addr, len, pagesize);
+ if (nates > entries)
+ return 0;
+
+ last = first + entries - nates;
+ for (i = first; i <= last; i++) {
+ if (ATE_VALID(ate_shadow[i]))
+ continue;
+
+ for (j = i; j < i + nates; j++)
+ if (ATE_VALID(ate_shadow[j]))
+ break;
+
+ if (j >= i + nates)
+ break;
+ }
+
+ if (i > last)
+ return 0;
+
+ map = kzalloc(sizeof(struct tioce_dmamap), GFP_ATOMIC);
+ if (!map)
+ return 0;
+
+ addr = ct_addr;
+ for (j = 0; j < nates; j++) {
+ u64 ate;
+
+ ate = ATE_MAKE(addr, pagesize, msi_wanted);
+ ate_shadow[i + j] = ate;
+ tioce_mmr_storei(ce_kern, &ate_reg[i + j], ate);
+ addr += pagesize;
+ }
+
+ map->refcnt = 1;
+ map->nbytes = nates * pagesize;
+ map->ct_start = ct_addr & ~ATE_PAGEMASK(pagesize);
+ map->pci_start = bus_base + (i * pagesize);
+ map->ate_hw = &ate_reg[i];
+ map->ate_shadow = &ate_shadow[i];
+ map->ate_count = nates;
+
+ list_add(&map->ce_dmamap_list, &ce_kern->ce_dmamap_list);
+
+ return (map->pci_start + (ct_addr - map->ct_start));
+}
+
+/**
+ * tioce_dma_d32 - create a DMA mapping using 32-bit direct mode
+ * @pdev: linux pci_dev representing the function
+ * @paddr: system physical address
+ *
+ * Map @paddr into 32-bit bus space of the CE associated with @pcidev_info.
+ */
+static u64
+tioce_dma_d32(struct pci_dev *pdev, u64 ct_addr, int dma_flags)
+{
+ int dma_ok;
+ int port;
+ struct tioce __iomem *ce_mmr;
+ struct tioce_kernel *ce_kern;
+ u64 ct_upper;
+ u64 ct_lower;
+ dma_addr_t bus_addr;
+
+ if (dma_flags & SN_DMA_MSI)
+ return 0;
+
+ ct_upper = ct_addr & ~0x3fffffffUL;
+ ct_lower = ct_addr & 0x3fffffffUL;
+
+ pcidev_to_tioce(pdev, &ce_mmr, &ce_kern, &port);
+
+ if (ce_kern->ce_port[port].dirmap_refcnt == 0) {
+ u64 tmp;
+
+ ce_kern->ce_port[port].dirmap_shadow = ct_upper;
+ tioce_mmr_storei(ce_kern, &ce_mmr->ce_ure_dir_map[port],
+ ct_upper);
+ tmp = ce_mmr->ce_ure_dir_map[port];
+ dma_ok = 1;
+ } else
+ dma_ok = (ce_kern->ce_port[port].dirmap_shadow == ct_upper);
+
+ if (dma_ok) {
+ ce_kern->ce_port[port].dirmap_refcnt++;
+ bus_addr = TIOCE_D32_MIN + ct_lower;
+ } else
+ bus_addr = 0;
+
+ return bus_addr;
+}
+
+/**
+ * tioce_dma_barrier - swizzle a TIOCE bus address to include or exclude
+ * the barrier bit.
+ * @bus_addr: bus address to swizzle
+ *
+ * Given a TIOCE bus address, set the appropriate bit to indicate barrier
+ * attributes.
+ */
+static u64
+tioce_dma_barrier(u64 bus_addr, int on)
+{
+ u64 barrier_bit;
+
+ /* barrier not supported in M40/M40S mode */
+ if (TIOCE_M40_ADDR(bus_addr) || TIOCE_M40S_ADDR(bus_addr))
+ return bus_addr;
+
+ if (TIOCE_D64_ADDR(bus_addr))
+ barrier_bit = (1UL << 62);
+ else /* must be m32 or d32 */
+ barrier_bit = (1UL << 30);
+
+ return (on) ? (bus_addr | barrier_bit) : (bus_addr & ~barrier_bit);
+}
+
+/**
+ * tioce_dma_unmap - release CE mapping resources
+ * @pdev: linux pci_dev representing the function
+ * @bus_addr: bus address returned by an earlier tioce_dma_map
+ * @dir: mapping direction (unused)
+ *
+ * Locate mapping resources associated with @bus_addr and release them.
+ * For mappings created using the direct modes there are no resources
+ * to release.
+ */
+void
+tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
+{
+ int i;
+ int port;
+ struct tioce_kernel *ce_kern;
+ struct tioce __iomem *ce_mmr;
+ unsigned long flags;
+
+ bus_addr = tioce_dma_barrier(bus_addr, 0);
+ pcidev_to_tioce(pdev, &ce_mmr, &ce_kern, &port);
+
+ /* nothing to do for D64 */
+
+ if (TIOCE_D64_ADDR(bus_addr))
+ return;
+
+ spin_lock_irqsave(&ce_kern->ce_lock, flags);
+
+ if (TIOCE_D32_ADDR(bus_addr)) {
+ if (--ce_kern->ce_port[port].dirmap_refcnt == 0) {
+ ce_kern->ce_port[port].dirmap_shadow = 0;
+ tioce_mmr_storei(ce_kern, &ce_mmr->ce_ure_dir_map[port],
+ 0);
+ }
+ } else {
+ struct tioce_dmamap *map;
+
+ list_for_each_entry(map, &ce_kern->ce_dmamap_list,
+ ce_dmamap_list) {
+ u64 last;
+
+ last = map->pci_start + map->nbytes - 1;
+ if (bus_addr >= map->pci_start && bus_addr <= last)
+ break;
+ }
+
+ if (&map->ce_dmamap_list == &ce_kern->ce_dmamap_list) {
+ printk(KERN_WARNING
+ "%s: %s - no map found for bus_addr 0x%llx\n",
+ __func__, pci_name(pdev), bus_addr);
+ } else if (--map->refcnt == 0) {
+ for (i = 0; i < map->ate_count; i++) {
+ map->ate_shadow[i] = 0;
+ tioce_mmr_storei(ce_kern, &map->ate_hw[i], 0);
+ }
+
+ list_del(&map->ce_dmamap_list);
+ kfree(map);
+ }
+ }
+
+ spin_unlock_irqrestore(&ce_kern->ce_lock, flags);
+}
+
+/**
+ * tioce_do_dma_map - map pages for PCI DMA
+ * @pdev: linux pci_dev representing the function
+ * @paddr: host physical address to map
+ * @byte_count: bytes to map
+ *
+ * This is the main wrapper for mapping host physical pages to CE PCI space.
+ * The mapping mode used is based on the device's dma_mask.
+ */
+static u64
+tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
+ int barrier, int dma_flags)
+{
+ unsigned long flags;
+ u64 ct_addr;
+ u64 mapaddr = 0;
+ struct tioce_kernel *ce_kern;
+ struct tioce_dmamap *map;
+ int port;
+ u64 dma_mask;
+
+ dma_mask = (barrier) ? pdev->dev.coherent_dma_mask : pdev->dma_mask;
+
+ /* cards must be able to address at least 31 bits */
+ if (dma_mask < 0x7fffffffUL)
+ return 0;
+
+ if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS)
+ ct_addr = PHYS_TO_TIODMA(paddr);
+ else
+ ct_addr = paddr;
+
+ /*
+ * If the device can generate 64 bit addresses, create a D64 map.
+ */
+ if (dma_mask == ~0UL) {
+ mapaddr = tioce_dma_d64(ct_addr, dma_flags);
+ if (mapaddr)
+ goto dma_map_done;
+ }
+
+ pcidev_to_tioce(pdev, NULL, &ce_kern, &port);
+
+ spin_lock_irqsave(&ce_kern->ce_lock, flags);
+
+ /*
+ * D64 didn't work ... See if we have an existing map that covers
+ * this address range. Must account for devices dma_mask here since
+ * an existing map might have been done in a mode using more pci
+ * address bits than this device can support.
+ */
+ list_for_each_entry(map, &ce_kern->ce_dmamap_list, ce_dmamap_list) {
+ u64 last;
+
+ last = map->ct_start + map->nbytes - 1;
+ if (ct_addr >= map->ct_start &&
+ ct_addr + byte_count - 1 <= last &&
+ map->pci_start <= dma_mask) {
+ map->refcnt++;
+ mapaddr = map->pci_start + (ct_addr - map->ct_start);
+ break;
+ }
+ }
+
+ /*
+ * If we don't have a map yet, and the card can generate 40
+ * bit addresses, try the M40/M40S modes. Note these modes do not
+ * support a barrier bit, so if we need a consistent map these
+ * won't work.
+ */
+ if (!mapaddr && !barrier && dma_mask >= 0xffffffffffUL) {
+ /*
+ * We have two options for 40-bit mappings: 16GB "super" ATEs
+ * and 64MB "regular" ATEs. We'll try both if needed for a
+ * given mapping but which one we try first depends on the
+ * size. For requests >64MB, prefer to use a super page with
+ * regular as the fallback. Otherwise, try in the reverse order.
+ */
+
+ if (byte_count > MB(64)) {
+ mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40S,
+ port, ct_addr, byte_count,
+ dma_flags);
+ if (!mapaddr)
+ mapaddr =
+ tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1,
+ ct_addr, byte_count,
+ dma_flags);
+ } else {
+ mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1,
+ ct_addr, byte_count,
+ dma_flags);
+ if (!mapaddr)
+ mapaddr =
+ tioce_alloc_map(ce_kern, TIOCE_ATE_M40S,
+ port, ct_addr, byte_count,
+ dma_flags);
+ }
+ }
+
+ /*
+ * 32-bit direct is the next mode to try
+ */
+ if (!mapaddr && dma_mask >= 0xffffffffUL)
+ mapaddr = tioce_dma_d32(pdev, ct_addr, dma_flags);
+
+ /*
+ * Last resort, try 32-bit ATE-based map.
+ */
+ if (!mapaddr)
+ mapaddr =
+ tioce_alloc_map(ce_kern, TIOCE_ATE_M32, -1, ct_addr,
+ byte_count, dma_flags);
+
+ spin_unlock_irqrestore(&ce_kern->ce_lock, flags);
+
+dma_map_done:
+ if (mapaddr && barrier)
+ mapaddr = tioce_dma_barrier(mapaddr, 1);
+
+ return mapaddr;
+}
+
+/**
+ * tioce_dma - standard pci dma map interface
+ * @pdev: pci device requesting the map
+ * @paddr: system physical address to map into pci space
+ * @byte_count: # bytes to map
+ *
+ * Simply call tioce_do_dma_map() to create a map with the barrier bit clear
+ * in the address.
+ */
+static u64
+tioce_dma(struct pci_dev *pdev, unsigned long paddr, size_t byte_count, int dma_flags)
+{
+ return tioce_do_dma_map(pdev, paddr, byte_count, 0, dma_flags);
+}
+
+/**
+ * tioce_dma_consistent - consistent pci dma map interface
+ * @pdev: pci device requesting the map
+ * @paddr: system physical address to map into pci space
+ * @byte_count: # bytes to map
+ *
+ * Simply call tioce_do_dma_map() to create a map with the barrier bit set
+ * in the address.
+ */
+static u64
+tioce_dma_consistent(struct pci_dev *pdev, unsigned long paddr, size_t byte_count, int dma_flags)
+{
+ return tioce_do_dma_map(pdev, paddr, byte_count, 1, dma_flags);
+}
+
+/**
+ * tioce_error_intr_handler - SGI TIO CE error interrupt handler
+ * @irq: unused
+ * @arg: pointer to tioce_common struct for the given CE
+ *
+ * Handle a CE error interrupt. Simply a wrapper around a SAL call which
+ * defers processing to the SGI prom.
+ */
+static irqreturn_t
+tioce_error_intr_handler(int irq, void *arg)
+{
+ struct tioce_common *soft = arg;
+ struct ia64_sal_retval ret_stuff;
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+
+ SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_ERROR_INTERRUPT,
+ soft->ce_pcibus.bs_persist_segment,
+ soft->ce_pcibus.bs_persist_busnum, 0, 0, 0, 0, 0);
+
+ if (ret_stuff.v0)
+ panic("tioce_error_intr_handler: Fatal TIOCE error");
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * tioce_reserve_m32 - reserve M32 ATEs for the indicated address range
+ * @tioce_kernel: TIOCE context to reserve ATEs for
+ * @base: starting bus address to reserve
+ * @limit: last bus address to reserve
+ *
+ * If base/limit falls within the range of bus space mapped through the
+ * M32 space, reserve the resources corresponding to the range.
+ */
+static void
+tioce_reserve_m32(struct tioce_kernel *ce_kern, u64 base, u64 limit)
+{
+ int ate_index, last_ate, ps;
+ struct tioce __iomem *ce_mmr;
+
+ ce_mmr = (struct tioce __iomem *)ce_kern->ce_common->ce_pcibus.bs_base;
+ ps = ce_kern->ce_ate3240_pagesize;
+ ate_index = ATE_PAGE(base, ps);
+ last_ate = ate_index + ATE_NPAGES(base, limit-base+1, ps) - 1;
+
+ if (ate_index < 64)
+ ate_index = 64;
+
+ if (last_ate >= TIOCE_NUM_M3240_ATES)
+ last_ate = TIOCE_NUM_M3240_ATES - 1;
+
+ while (ate_index <= last_ate) {
+ u64 ate;
+
+ ate = ATE_MAKE(0xdeadbeef, ps, 0);
+ ce_kern->ce_ate3240_shadow[ate_index] = ate;
+ tioce_mmr_storei(ce_kern, &ce_mmr->ce_ure_ate3240[ate_index],
+ ate);
+ ate_index++;
+ }
+}
+
+/**
+ * tioce_kern_init - init kernel structures related to a given TIOCE
+ * @tioce_common: ptr to a cached tioce_common struct that originated in prom
+ */
+static struct tioce_kernel *
+tioce_kern_init(struct tioce_common *tioce_common)
+{
+ int i;
+ int ps;
+ int dev;
+ u32 tmp;
+ unsigned int seg, bus;
+ struct tioce __iomem *tioce_mmr;
+ struct tioce_kernel *tioce_kern;
+
+ tioce_kern = kzalloc(sizeof(struct tioce_kernel), GFP_KERNEL);
+ if (!tioce_kern) {
+ return NULL;
+ }
+
+ tioce_kern->ce_common = tioce_common;
+ spin_lock_init(&tioce_kern->ce_lock);
+ INIT_LIST_HEAD(&tioce_kern->ce_dmamap_list);
+ tioce_common->ce_kernel_private = (u64) tioce_kern;
+
+ /*
+ * Determine the secondary bus number of the port2 logical PPB.
+ * This is used to decide whether a given pci device resides on
+ * port1 or port2. Note: We don't have enough plumbing set up
+ * here to use pci_read_config_xxx() so use raw_pci_read().
+ */
+
+ seg = tioce_common->ce_pcibus.bs_persist_segment;
+ bus = tioce_common->ce_pcibus.bs_persist_busnum;
+
+ raw_pci_read(seg, bus, PCI_DEVFN(2, 0), PCI_SECONDARY_BUS, 1,&tmp);
+ tioce_kern->ce_port1_secondary = (u8) tmp;
+
+ /*
+ * Set PMU pagesize to the largest size available, and zero out
+ * the ATEs.
+ */
+
+ tioce_mmr = (struct tioce __iomem *)tioce_common->ce_pcibus.bs_base;
+ tioce_mmr_clri(tioce_kern, &tioce_mmr->ce_ure_page_map,
+ CE_URE_PAGESIZE_MASK);
+ tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_ure_page_map,
+ CE_URE_256K_PAGESIZE);
+ ps = tioce_kern->ce_ate3240_pagesize = KB(256);
+
+ for (i = 0; i < TIOCE_NUM_M40_ATES; i++) {
+ tioce_kern->ce_ate40_shadow[i] = 0;
+ tioce_mmr_storei(tioce_kern, &tioce_mmr->ce_ure_ate40[i], 0);
+ }
+
+ for (i = 0; i < TIOCE_NUM_M3240_ATES; i++) {
+ tioce_kern->ce_ate3240_shadow[i] = 0;
+ tioce_mmr_storei(tioce_kern, &tioce_mmr->ce_ure_ate3240[i], 0);
+ }
+
+ /*
+ * Reserve ATEs corresponding to reserved address ranges. These
+ * include:
+ *
+ * Memory space covered by each PPB mem base/limit register
+ * Memory space covered by each PPB prefetch base/limit register
+ *
+ * These bus ranges are for pio (downstream) traffic only, and so
+ * cannot be used for DMA.
+ */
+
+ for (dev = 1; dev <= 2; dev++) {
+ u64 base, limit;
+
+ /* mem base/limit */
+
+ raw_pci_read(seg, bus, PCI_DEVFN(dev, 0),
+ PCI_MEMORY_BASE, 2, &tmp);
+ base = (u64)tmp << 16;
+
+ raw_pci_read(seg, bus, PCI_DEVFN(dev, 0),
+ PCI_MEMORY_LIMIT, 2, &tmp);
+ limit = (u64)tmp << 16;
+ limit |= 0xfffffUL;
+
+ if (base < limit)
+ tioce_reserve_m32(tioce_kern, base, limit);
+
+ /*
+ * prefetch mem base/limit. The tioce ppb's have 64-bit
+ * decoders, so read the upper portions w/o checking the
+ * attributes.
+ */
+
+ raw_pci_read(seg, bus, PCI_DEVFN(dev, 0),
+ PCI_PREF_MEMORY_BASE, 2, &tmp);
+ base = ((u64)tmp & PCI_PREF_RANGE_MASK) << 16;
+
+ raw_pci_read(seg, bus, PCI_DEVFN(dev, 0),
+ PCI_PREF_BASE_UPPER32, 4, &tmp);
+ base |= (u64)tmp << 32;
+
+ raw_pci_read(seg, bus, PCI_DEVFN(dev, 0),
+ PCI_PREF_MEMORY_LIMIT, 2, &tmp);
+
+ limit = ((u64)tmp & PCI_PREF_RANGE_MASK) << 16;
+ limit |= 0xfffffUL;
+
+ raw_pci_read(seg, bus, PCI_DEVFN(dev, 0),
+ PCI_PREF_LIMIT_UPPER32, 4, &tmp);
+ limit |= (u64)tmp << 32;
+
+ if ((base < limit) && TIOCE_M32_ADDR(base))
+ tioce_reserve_m32(tioce_kern, base, limit);
+ }
+
+ return tioce_kern;
+}
+
+/**
+ * tioce_force_interrupt - implement altix force_interrupt() backend for CE
+ * @sn_irq_info: sn asic irq that we need an interrupt generated for
+ *
+ * Given an sn_irq_info struct, set the proper bit in ce_adm_force_int to
+ * force a secondary interrupt to be generated. This is to work around an
+ * asic issue where there is a small window of opportunity for a legacy device
+ * interrupt to be lost.
+ */
+static void
+tioce_force_interrupt(struct sn_irq_info *sn_irq_info)
+{
+ struct pcidev_info *pcidev_info;
+ struct tioce_common *ce_common;
+ struct tioce_kernel *ce_kern;
+ struct tioce __iomem *ce_mmr;
+ u64 force_int_val;
+
+ if (!sn_irq_info->irq_bridge)
+ return;
+
+ if (sn_irq_info->irq_bridge_type != PCIIO_ASIC_TYPE_TIOCE)
+ return;
+
+ pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
+ if (!pcidev_info)
+ return;
+
+ ce_common = (struct tioce_common *)pcidev_info->pdi_pcibus_info;
+ ce_mmr = (struct tioce __iomem *)ce_common->ce_pcibus.bs_base;
+ ce_kern = (struct tioce_kernel *)ce_common->ce_kernel_private;
+
+ /*
+ * TIOCE Rev A workaround (PV 945826), force an interrupt by writing
+ * the TIO_INTx register directly (1/26/2006)
+ */
+ if (ce_common->ce_rev == TIOCE_REV_A) {
+ u64 int_bit_mask = (1ULL << sn_irq_info->irq_int_bit);
+ u64 status;
+
+ tioce_mmr_load(ce_kern, &ce_mmr->ce_adm_int_status, &status);
+ if (status & int_bit_mask) {
+ u64 force_irq = (1 << 8) | sn_irq_info->irq_irq;
+ u64 ctalk = sn_irq_info->irq_xtalkaddr;
+ u64 nasid, offset;
+
+ nasid = (ctalk & CTALK_NASID_MASK) >> CTALK_NASID_SHFT;
+ offset = (ctalk & CTALK_NODE_OFFSET);
+ HUB_S(TIO_IOSPACE_ADDR(nasid, offset), force_irq);
+ }
+
+ return;
+ }
+
+ /*
+ * irq_int_bit is originally set up by prom, and holds the interrupt
+ * bit shift (not mask) as defined by the bit definitions in the
+ * ce_adm_int mmr. These shifts are not the same for the
+ * ce_adm_force_int register, so do an explicit mapping here to make
+ * things clearer.
+ */
+
+ switch (sn_irq_info->irq_int_bit) {
+ case CE_ADM_INT_PCIE_PORT1_DEV_A_SHFT:
+ force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_A_SHFT;
+ break;
+ case CE_ADM_INT_PCIE_PORT1_DEV_B_SHFT:
+ force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_B_SHFT;
+ break;
+ case CE_ADM_INT_PCIE_PORT1_DEV_C_SHFT:
+ force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_C_SHFT;
+ break;
+ case CE_ADM_INT_PCIE_PORT1_DEV_D_SHFT:
+ force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_D_SHFT;
+ break;
+ case CE_ADM_INT_PCIE_PORT2_DEV_A_SHFT:
+ force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_A_SHFT;
+ break;
+ case CE_ADM_INT_PCIE_PORT2_DEV_B_SHFT:
+ force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_B_SHFT;
+ break;
+ case CE_ADM_INT_PCIE_PORT2_DEV_C_SHFT:
+ force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_C_SHFT;
+ break;
+ case CE_ADM_INT_PCIE_PORT2_DEV_D_SHFT:
+ force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_D_SHFT;
+ break;
+ default:
+ return;
+ }
+ tioce_mmr_storei(ce_kern, &ce_mmr->ce_adm_force_int, force_int_val);
+}
+
+/**
+ * tioce_target_interrupt - implement set_irq_affinity for tioce resident
+ * functions. Note: only applies to line interrupts, not MSI's.
+ *
+ * @sn_irq_info: SN IRQ context
+ *
+ * Given an sn_irq_info, set the associated CE device's interrupt destination
+ * register. Since the interrupt destination registers are on a per-ce-slot
+ * basis, this will retarget line interrupts for all functions downstream of
+ * the slot.
+ */
+static void
+tioce_target_interrupt(struct sn_irq_info *sn_irq_info)
+{
+ struct pcidev_info *pcidev_info;
+ struct tioce_common *ce_common;
+ struct tioce_kernel *ce_kern;
+ struct tioce __iomem *ce_mmr;
+ int bit;
+ u64 vector;
+
+ pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
+ if (!pcidev_info)
+ return;
+
+ ce_common = (struct tioce_common *)pcidev_info->pdi_pcibus_info;
+ ce_mmr = (struct tioce __iomem *)ce_common->ce_pcibus.bs_base;
+ ce_kern = (struct tioce_kernel *)ce_common->ce_kernel_private;
+
+ bit = sn_irq_info->irq_int_bit;
+
+ tioce_mmr_seti(ce_kern, &ce_mmr->ce_adm_int_mask, (1UL << bit));
+ vector = (u64)sn_irq_info->irq_irq << INTR_VECTOR_SHFT;
+ vector |= sn_irq_info->irq_xtalkaddr;
+ tioce_mmr_storei(ce_kern, &ce_mmr->ce_adm_int_dest[bit], vector);
+ tioce_mmr_clri(ce_kern, &ce_mmr->ce_adm_int_mask, (1UL << bit));
+
+ tioce_force_interrupt(sn_irq_info);
+}
+
+/**
+ * tioce_bus_fixup - perform final PCI fixup for a TIO CE bus
+ * @prom_bussoft: Common prom/kernel struct representing the bus
+ *
+ * Replicates the tioce_common pointed to by @prom_bussoft in kernel
+ * space. Allocates and initializes a kernel-only area for a given CE,
+ * and sets up an irq for handling CE error interrupts.
+ *
+ * On successful setup, returns the kernel version of tioce_common back to
+ * the caller.
+ */
+static void *
+tioce_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller)
+{
+ struct tioce_common *tioce_common;
+ struct tioce_kernel *tioce_kern;
+ struct tioce __iomem *tioce_mmr;
+
+ /*
+ * Allocate kernel bus soft and copy from prom.
+ */
+
+ tioce_common = kzalloc(sizeof(struct tioce_common), GFP_KERNEL);
+ if (!tioce_common)
+ return NULL;
+
+ memcpy(tioce_common, prom_bussoft, sizeof(struct tioce_common));
+ tioce_common->ce_pcibus.bs_base = (unsigned long)
+ ioremap(REGION_OFFSET(tioce_common->ce_pcibus.bs_base),
+ sizeof(struct tioce_common));
+
+ tioce_kern = tioce_kern_init(tioce_common);
+ if (tioce_kern == NULL) {
+ kfree(tioce_common);
+ return NULL;
+ }
+
+ /*
+ * Clear out any transient errors before registering the error
+ * interrupt handler.
+ */
+
+ tioce_mmr = (struct tioce __iomem *)tioce_common->ce_pcibus.bs_base;
+ tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_adm_int_status_alias, ~0ULL);
+ tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_adm_error_summary_alias,
+ ~0ULL);
+ tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_dre_comp_err_addr, 0ULL);
+
+ if (request_irq(SGI_PCIASIC_ERROR,
+ tioce_error_intr_handler,
+ IRQF_SHARED, "TIOCE error", (void *)tioce_common))
+ printk(KERN_WARNING
+ "%s: Unable to get irq %d. "
+ "Error interrupts won't be routed for "
+ "TIOCE bus %04x:%02x\n",
+ __func__, SGI_PCIASIC_ERROR,
+ tioce_common->ce_pcibus.bs_persist_segment,
+ tioce_common->ce_pcibus.bs_persist_busnum);
+
+ irq_set_handler(SGI_PCIASIC_ERROR, handle_level_irq);
+ sn_set_err_irq_affinity(SGI_PCIASIC_ERROR);
+ return tioce_common;
+}
+
+static struct sn_pcibus_provider tioce_pci_interfaces = {
+ .dma_map = tioce_dma,
+ .dma_map_consistent = tioce_dma_consistent,
+ .dma_unmap = tioce_dma_unmap,
+ .bus_fixup = tioce_bus_fixup,
+ .force_interrupt = tioce_force_interrupt,
+ .target_interrupt = tioce_target_interrupt
+};
+
+/**
+ * tioce_init_provider - init SN PCI provider ops for TIO CE
+ */
+int
+tioce_init_provider(void)
+{
+ sn_pci_provider[PCIIO_ASIC_TYPE_TIOCE] = &tioce_pci_interfaces;
+ return 0;
+}