aboutsummaryrefslogtreecommitdiff
path: root/arch/ia64/sn
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/sn')
-rw-r--r--arch/ia64/sn/Makefile2
-rw-r--r--arch/ia64/sn/kernel/Makefile14
-rw-r--r--arch/ia64/sn/kernel/bte.c54
-rw-r--r--arch/ia64/sn/kernel/bte_error.c18
-rw-r--r--arch/ia64/sn/kernel/huberror.c41
-rw-r--r--arch/ia64/sn/kernel/io_acpi_init.c510
-rw-r--r--arch/ia64/sn/kernel/io_common.c564
-rw-r--r--arch/ia64/sn/kernel/io_init.c688
-rw-r--r--arch/ia64/sn/kernel/iomv.c18
-rw-r--r--arch/ia64/sn/kernel/irq.c368
-rw-r--r--arch/ia64/sn/kernel/klconflib.c29
-rw-r--r--arch/ia64/sn/kernel/mca.c8
-rw-r--r--arch/ia64/sn/kernel/msi_sn.c238
-rw-r--r--arch/ia64/sn/kernel/pio_phys.S71
-rw-r--r--arch/ia64/sn/kernel/setup.c147
-rw-r--r--arch/ia64/sn/kernel/sn2/Makefile2
-rw-r--r--arch/ia64/sn/kernel/sn2/cache.c15
-rw-r--r--arch/ia64/sn/kernel/sn2/prominfo_proc.c154
-rw-r--r--arch/ia64/sn/kernel/sn2/sn2_smp.c341
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_hwperf.c131
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_proc_fs.c122
-rw-r--r--arch/ia64/sn/kernel/sn2/timer.c42
-rw-r--r--arch/ia64/sn/kernel/sn2/timer_interrupt.c9
-rw-r--r--arch/ia64/sn/kernel/tiocx.c37
-rw-r--r--arch/ia64/sn/kernel/xp_main.c290
-rw-r--r--arch/ia64/sn/kernel/xp_nofault.S31
-rw-r--r--arch/ia64/sn/kernel/xpc_channel.c2356
-rw-r--r--arch/ia64/sn/kernel/xpc_main.c1416
-rw-r--r--arch/ia64/sn/kernel/xpc_partition.c1227
-rw-r--r--arch/ia64/sn/kernel/xpnet.c719
-rw-r--r--arch/ia64/sn/pci/Makefile2
-rw-r--r--arch/ia64/sn/pci/pci_dma.c214
-rw-r--r--arch/ia64/sn/pci/pcibr/Makefile2
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_ate.c41
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_dma.c90
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_provider.c75
-rw-r--r--arch/ia64/sn/pci/tioca_provider.c75
-rw-r--r--arch/ia64/sn/pci/tioce_provider.c435
38 files changed, 3014 insertions, 7582 deletions
diff --git a/arch/ia64/sn/Makefile b/arch/ia64/sn/Makefile
index a269f6d84c2..79a7df02e81 100644
--- a/arch/ia64/sn/Makefile
+++ b/arch/ia64/sn/Makefile
@@ -9,6 +9,4 @@
# Makefile for the sn ia64 subplatform
#
-CPPFLAGS += -I$(srctree)/arch/ia64/sn/include
-
obj-y += kernel/ pci/
diff --git a/arch/ia64/sn/kernel/Makefile b/arch/ia64/sn/kernel/Makefile
index 4351c4ff984..d27df1d45da 100644
--- a/arch/ia64/sn/kernel/Makefile
+++ b/arch/ia64/sn/kernel/Makefile
@@ -4,15 +4,15 @@
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
-# Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All Rights Reserved.
+# Copyright (C) 1999,2001-2006,2008 Silicon Graphics, Inc. All Rights Reserved.
#
+ccflags-y := -Iarch/ia64/sn/include
+
obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \
- huberror.o io_init.o iomv.o klconflib.o sn2/
+ huberror.o io_acpi_init.o io_common.o \
+ io_init.o iomv.o klconflib.o pio_phys.o \
+ sn2/
obj-$(CONFIG_IA64_GENERIC) += machvec.o
obj-$(CONFIG_SGI_TIOCX) += tiocx.o
-obj-$(CONFIG_IA64_SGI_SN_XP) += xp.o
-xp-y := xp_main.o xp_nofault.o
-obj-$(CONFIG_IA64_SGI_SN_XP) += xpc.o
-xpc-y := xpc_main.o xpc_channel.o xpc_partition.o
-obj-$(CONFIG_IA64_SGI_SN_XP) += xpnet.o
+obj-$(CONFIG_PCI_MSI) += msi_sn.o
diff --git a/arch/ia64/sn/kernel/bte.c b/arch/ia64/sn/kernel/bte.c
index dd73c0cb754..cad775a1a15 100644
--- a/arch/ia64/sn/kernel/bte.c
+++ b/arch/ia64/sn/kernel/bte.c
@@ -3,10 +3,9 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2000-2007 Silicon Graphics, Inc. All Rights Reserved.
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <asm/sn/nodepda.h>
#include <asm/sn/addrs.h>
@@ -20,6 +19,7 @@
#include <linux/bootmem.h>
#include <linux/string.h>
#include <linux/sched.h>
+#include <linux/slab.h>
#include <asm/sn/bte.h>
@@ -36,7 +36,7 @@ static struct bteinfo_s *bte_if_on_node(nasid_t nasid, int interface)
nodepda_t *tmp_nodepda;
if (nasid_to_cnodeid(nasid) == -1)
- return (struct bteinfo_s *)NULL;;
+ return (struct bteinfo_s *)NULL;
tmp_nodepda = NODEPDA(nasid_to_cnodeid(nasid));
return &tmp_nodepda->bte_if[interface];
@@ -64,7 +64,7 @@ static inline void bte_start_transfer(struct bteinfo_s *bte, u64 len, u64 mode)
* Use the block transfer engine to move kernel memory from src to dest
* using the assigned mode.
*
- * Paramaters:
+ * Parameters:
* src - physical address of the transfer source.
* dest - physical address of the transfer destination.
* len - number of bytes to transfer from source to dest.
@@ -98,9 +98,10 @@ bte_result_t bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
return BTE_SUCCESS;
}
- BUG_ON((len & L1_CACHE_MASK) ||
- (src & L1_CACHE_MASK) || (dest & L1_CACHE_MASK));
- BUG_ON(!(len < ((BTE_LEN_MASK + 1) << L1_CACHE_SHIFT)));
+ BUG_ON(len & L1_CACHE_MASK);
+ BUG_ON(src & L1_CACHE_MASK);
+ BUG_ON(dest & L1_CACHE_MASK);
+ BUG_ON(len > BTE_MAX_XFER);
/*
* Start with interface corresponding to cpu number
@@ -186,18 +187,13 @@ retry_bteop:
/* Initialize the notification to a known value. */
*bte->most_rcnt_na = BTE_WORD_BUSY;
- notif_phys_addr = TO_PHYS(ia64_tpa((unsigned long)bte->most_rcnt_na));
+ notif_phys_addr = (u64)bte->most_rcnt_na;
- if (is_shub2()) {
- src = SH2_TIO_PHYS_TO_DMA(src);
- dest = SH2_TIO_PHYS_TO_DMA(dest);
- notif_phys_addr = SH2_TIO_PHYS_TO_DMA(notif_phys_addr);
- }
/* Set the source and destination registers */
- BTE_PRINTKV(("IBSA = 0x%lx)\n", (TO_PHYS(src))));
- BTE_SRC_STORE(bte, TO_PHYS(src));
- BTE_PRINTKV(("IBDA = 0x%lx)\n", (TO_PHYS(dest))));
- BTE_DEST_STORE(bte, TO_PHYS(dest));
+ BTE_PRINTKV(("IBSA = 0x%lx)\n", src));
+ BTE_SRC_STORE(bte, src);
+ BTE_PRINTKV(("IBDA = 0x%lx)\n", dest));
+ BTE_DEST_STORE(bte, dest);
/* Set the notification register */
BTE_PRINTKV(("IBNA = 0x%lx)\n", notif_phys_addr));
@@ -233,7 +229,7 @@ retry_bteop:
BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na));
if (transfer_stat & IBLS_ERROR) {
- bte_status = transfer_stat & ~IBLS_ERROR;
+ bte_status = BTE_GET_ERROR_STATUS(transfer_stat);
} else {
bte_status = BTE_SUCCESS;
}
@@ -253,7 +249,7 @@ EXPORT_SYMBOL(bte_copy);
* use the block transfer engine to move kernel
* memory from src to dest using the assigned mode.
*
- * Paramaters:
+ * Parameters:
* src - physical address of the transfer source.
* dest - physical address of the transfer destination.
* len - number of bytes to transfer from source to dest.
@@ -261,7 +257,7 @@ EXPORT_SYMBOL(bte_copy);
* for IBCT0/1 in the SGI documentation.
*
* NOTE: If the source, dest, and len are all cache line aligned,
- * then it would be _FAR_ preferrable to use bte_copy instead.
+ * then it would be _FAR_ preferable to use bte_copy instead.
*/
bte_result_t bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
{
@@ -283,8 +279,7 @@ bte_result_t bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
}
/* temporary buffer used during unaligned transfers */
- bteBlock_unaligned = kmalloc(len + 3 * L1_CACHE_BYTES,
- GFP_KERNEL | GFP_DMA);
+ bteBlock_unaligned = kmalloc(len + 3 * L1_CACHE_BYTES, GFP_KERNEL);
if (bteBlock_unaligned == NULL) {
return BTEFAIL_NOTAVAIL;
}
@@ -307,7 +302,7 @@ bte_result_t bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
* a standard bte copy.
*
* One nasty exception to the above rule is when the
- * source and destination are not symetrically
+ * source and destination are not symmetrically
* mis-aligned. If the source offset from the first
* cache line is different from the destination offset,
* we make the first section be the entire transfer
@@ -344,7 +339,7 @@ bte_result_t bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
if (footBcopyDest == (headBcopyDest + headBcopyLen)) {
/*
- * We have two contigous bcopy
+ * We have two contiguous bcopy
* blocks. Merge them.
*/
headBcopyLen += footBcopyLen;
@@ -382,20 +377,19 @@ bte_result_t bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
} else {
/*
- * The transfer is not symetric, we will
+ * The transfer is not symmetric, we will
* allocate a buffer large enough for all the
* data, bte_copy into that buffer and then
* bcopy to the destination.
*/
- /* Add the leader from source */
- headBteLen = len + (src & L1_CACHE_MASK);
- /* Add the trailing bytes from footer. */
- headBteLen += L1_CACHE_BYTES - (headBteLen & L1_CACHE_MASK);
- headBteSource = src & ~L1_CACHE_MASK;
headBcopySrcOffset = src & L1_CACHE_MASK;
headBcopyDest = dest;
headBcopyLen = len;
+
+ headBteSource = src - headBcopySrcOffset;
+ /* Add the leading and trailing bytes from source */
+ headBteLen = L1_CACHE_ALIGN(len + headBcopySrcOffset);
}
if (headBcopyLen > 0) {
diff --git a/arch/ia64/sn/kernel/bte_error.c b/arch/ia64/sn/kernel/bte_error.c
index f1ec1370b3e..4cb09f3f1ef 100644
--- a/arch/ia64/sn/kernel/bte_error.c
+++ b/arch/ia64/sn/kernel/bte_error.c
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2000-2007 Silicon Graphics, Inc. All Rights Reserved.
*/
#include <linux/types.h>
@@ -78,7 +78,7 @@ int shub1_bte_error_handler(unsigned long _nodepda)
* There are errors which still need to be cleaned up by
* hubiio_crb_error_handler
*/
- mod_timer(recovery_timer, HZ * 5);
+ mod_timer(recovery_timer, jiffies + (HZ * 5));
BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda,
smp_processor_id()));
return 1;
@@ -95,7 +95,7 @@ int shub1_bte_error_handler(unsigned long _nodepda)
icrbd.ii_icrb0_d_regval =
REMOTE_HUB_L(nasid, IIO_ICRB_D(i));
if (icrbd.d_bteop) {
- mod_timer(recovery_timer, HZ * 5);
+ mod_timer(recovery_timer, jiffies + (HZ * 5));
BTE_PRINTK(("eh:%p:%d Valid %d, Giving up\n",
err_nodepda, smp_processor_id(),
i));
@@ -105,7 +105,7 @@ int shub1_bte_error_handler(unsigned long _nodepda)
}
BTE_PRINTK(("eh:%p:%d Cleaning up\n", err_nodepda, smp_processor_id()));
- /* Reenable both bte interfaces */
+ /* Re-enable both bte interfaces */
imem.ii_imem_regval = REMOTE_HUB_L(nasid, IIO_IMEM);
imem.ii_imem_fld_s.i_b0_esd = imem.ii_imem_fld_s.i_b1_esd = 1;
REMOTE_HUB_S(nasid, IIO_IMEM, imem.ii_imem_regval);
@@ -148,9 +148,13 @@ int shub2_bte_error_handler(unsigned long _nodepda)
for (i = 0; i < BTES_PER_NODE; i++) {
bte = &err_nodepda->bte_if[i];
status = BTE_LNSTAT_LOAD(bte);
- if ((status & IBLS_ERROR) || !(status & IBLS_BUSY))
+ if (status & IBLS_ERROR) {
+ bte->bh_error = BTE_SHUB2_ERROR(status);
continue;
- mod_timer(recovery_timer, HZ * 5);
+ }
+ if (!(status & IBLS_BUSY))
+ continue;
+ mod_timer(recovery_timer, jiffies + (HZ * 5));
BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda,
smp_processor_id()));
return 1;
@@ -243,7 +247,7 @@ bte_crb_error_handler(cnodeid_t cnode, int btenum,
/*
* The caller has already figured out the error type, we save that
- * in the bte handle structure for the thread excercising the
+ * in the bte handle structure for the thread exercising the
* interface to consume.
*/
bte->bh_error = ioe->ie_errortype + BTEFAIL_OFFSET;
diff --git a/arch/ia64/sn/kernel/huberror.c b/arch/ia64/sn/kernel/huberror.c
index 56ab6bae00e..f925dec2da9 100644
--- a/arch/ia64/sn/kernel/huberror.c
+++ b/arch/ia64/sn/kernel/huberror.c
@@ -3,12 +3,11 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000,2002-2005 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000,2002-2007 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <linux/interrupt.h>
-#include <linux/pci.h>
#include <asm/delay.h>
#include <asm/sn/sn_sal.h>
#include "ioerror.h"
@@ -22,7 +21,7 @@
void hubiio_crb_error_handler(struct hubdev_info *hubdev_info);
extern void bte_crb_error_handler(cnodeid_t, int, int, ioerror_t *,
int);
-static irqreturn_t hub_eint_handler(int irq, void *arg, struct pt_regs *ep)
+static irqreturn_t hub_eint_handler(int irq, void *arg)
{
struct hubdev_info *hubdev_info;
struct ia64_sal_retval ret_stuff;
@@ -38,12 +37,20 @@ static irqreturn_t hub_eint_handler(int irq, void *arg, struct pt_regs *ep)
(u64) nasid, 0, 0, 0, 0, 0, 0);
if ((int)ret_stuff.v0)
- panic("hubii_eint_handler(): Fatal TIO Error");
+ panic("%s: Fatal %s Error", __func__,
+ ((nasid & 1) ? "TIO" : "HUBII"));
if (!(nasid & 1)) /* Not a TIO, handle CRB errors */
(void)hubiio_crb_error_handler(hubdev_info);
- } else
- bte_error_handler((unsigned long)NODEPDA(nasid_to_cnodeid(nasid)));
+ } else
+ if (nasid & 1) { /* TIO errors */
+ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_HUB_ERROR_INTERRUPT,
+ (u64) nasid, 0, 0, 0, 0, 0, 0);
+
+ if ((int)ret_stuff.v0)
+ panic("%s: Fatal TIO Error", __func__);
+ } else
+ bte_error_handler((unsigned long)NODEPDA(nasid_to_cnodeid(nasid)));
return IRQ_HANDLED;
}
@@ -178,11 +185,15 @@ void hubiio_crb_error_handler(struct hubdev_info *hubdev_info)
*/
void hub_error_init(struct hubdev_info *hubdev_info)
{
- if (request_irq(SGI_II_ERROR, (void *)hub_eint_handler, SA_SHIRQ,
- "SN_hub_error", (void *)hubdev_info))
- printk("hub_error_init: Failed to request_irq for 0x%p\n",
+
+ if (request_irq(SGI_II_ERROR, hub_eint_handler, IRQF_SHARED,
+ "SN_hub_error", hubdev_info)) {
+ printk(KERN_ERR "hub_error_init: Failed to request_irq for 0x%p\n",
hubdev_info);
- return;
+ return;
+ }
+ irq_set_handler(SGI_II_ERROR, handle_level_irq);
+ sn_set_err_irq_affinity(SGI_II_ERROR);
}
@@ -195,11 +206,15 @@ void hub_error_init(struct hubdev_info *hubdev_info)
*/
void ice_error_init(struct hubdev_info *hubdev_info)
{
+
if (request_irq
- (SGI_TIO_ERROR, (void *)hub_eint_handler, SA_SHIRQ, "SN_TIO_error",
- (void *)hubdev_info))
+ (SGI_TIO_ERROR, (void *)hub_eint_handler, IRQF_SHARED, "SN_TIO_error",
+ (void *)hubdev_info)) {
printk("ice_error_init: request_irq() error hubdev_info 0x%p\n",
hubdev_info);
- return;
+ return;
+ }
+ irq_set_handler(SGI_TIO_ERROR, handle_level_irq);
+ sn_set_err_irq_affinity(SGI_TIO_ERROR);
}
diff --git a/arch/ia64/sn/kernel/io_acpi_init.c b/arch/ia64/sn/kernel/io_acpi_init.c
new file mode 100644
index 00000000000..0640739cc20
--- /dev/null
+++ b/arch/ia64/sn/kernel/io_acpi_init.c
@@ -0,0 +1,510 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <asm/sn/types.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/pcidev.h>
+#include <asm/sn/pcibus_provider_defs.h>
+#include <asm/sn/sn_sal.h>
+#include "xtalk/hubdev.h"
+#include <linux/acpi.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+
+
+/*
+ * The code in this file will only be executed when running with
+ * a PROM that has ACPI IO support. (i.e., SN_ACPI_BASE_SUPPORT() == 1)
+ */
+
+
+/*
+ * This value must match the UUID the PROM uses
+ * (io/acpi/defblk.c) when building a vendor descriptor.
+ */
+struct acpi_vendor_uuid sn_uuid = {
+ .subtype = 0,
+ .data = { 0x2c, 0xc6, 0xa6, 0xfe, 0x9c, 0x44, 0xda, 0x11,
+ 0xa2, 0x7c, 0x08, 0x00, 0x69, 0x13, 0xea, 0x51 },
+};
+
+struct sn_pcidev_match {
+ u8 bus;
+ unsigned int devfn;
+ acpi_handle handle;
+};
+
+/*
+ * Perform the early IO init in PROM.
+ */
+static long
+sal_ioif_init(u64 *result)
+{
+ struct ia64_sal_retval isrv = {0,0,0,0};
+
+ SAL_CALL_NOLOCK(isrv,
+ SN_SAL_IOIF_INIT, 0, 0, 0, 0, 0, 0, 0);
+ *result = isrv.v0;
+ return isrv.status;
+}
+
+/*
+ * sn_acpi_hubdev_init() - This function is called by acpi_ns_get_device_callback()
+ * for all SGIHUB and SGITIO acpi devices defined in the
+ * DSDT. It obtains the hubdev_info pointer from the
+ * ACPI vendor resource, which the PROM setup, and sets up the
+ * hubdev_info in the pda.
+ */
+
+static acpi_status __init
+sn_acpi_hubdev_init(acpi_handle handle, u32 depth, void *context, void **ret)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ u64 addr;
+ struct hubdev_info *hubdev;
+ struct hubdev_info *hubdev_ptr;
+ int i;
+ u64 nasid;
+ struct acpi_resource *resource;
+ acpi_status status;
+ struct acpi_resource_vendor_typed *vendor;
+ extern void sn_common_hubdev_init(struct hubdev_info *);
+
+ status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS,
+ &sn_uuid, &buffer);
+ if (ACPI_FAILURE(status)) {
+ acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer);
+ printk(KERN_ERR
+ "sn_acpi_hubdev_init: acpi_get_vendor_resource() "
+ "(0x%x) failed for: %s\n", status,
+ (char *)name_buffer.pointer);
+ kfree(name_buffer.pointer);
+ return AE_OK; /* Continue walking namespace */
+ }
+
+ resource = buffer.pointer;
+ vendor = &resource->data.vendor_typed;
+ if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) !=
+ sizeof(struct hubdev_info *)) {
+ acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer);
+ printk(KERN_ERR
+ "sn_acpi_hubdev_init: Invalid vendor data length: "
+ "%d for: %s\n",
+ vendor->byte_length, (char *)name_buffer.pointer);
+ kfree(name_buffer.pointer);
+ goto exit;
+ }
+
+ memcpy(&addr, vendor->byte_data, sizeof(struct hubdev_info *));
+ hubdev_ptr = __va((struct hubdev_info *) addr);
+
+ nasid = hubdev_ptr->hdi_nasid;
+ i = nasid_to_cnodeid(nasid);
+ hubdev = (struct hubdev_info *)(NODEPDA(i)->pdinfo);
+ *hubdev = *hubdev_ptr;
+ sn_common_hubdev_init(hubdev);
+
+exit:
+ kfree(buffer.pointer);
+ return AE_OK; /* Continue walking namespace */
+}
+
+/*
+ * sn_get_bussoft_ptr() - The pcibus_bussoft pointer is found in
+ * the ACPI Vendor resource for this bus.
+ */
+static struct pcibus_bussoft *
+sn_get_bussoft_ptr(struct pci_bus *bus)
+{
+ u64 addr;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_handle handle;
+ struct pcibus_bussoft *prom_bussoft_ptr;
+ struct acpi_resource *resource;
+ acpi_status status;
+ struct acpi_resource_vendor_typed *vendor;
+
+
+ handle = acpi_device_handle(PCI_CONTROLLER(bus)->companion);
+ status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS,
+ &sn_uuid, &buffer);
+ if (ACPI_FAILURE(status)) {
+ acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer);
+ printk(KERN_ERR "%s: "
+ "acpi_get_vendor_resource() failed (0x%x) for: %s\n",
+ __func__, status, (char *)name_buffer.pointer);
+ kfree(name_buffer.pointer);
+ return NULL;
+ }
+ resource = buffer.pointer;
+ vendor = &resource->data.vendor_typed;
+
+ if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) !=
+ sizeof(struct pcibus_bussoft *)) {
+ printk(KERN_ERR
+ "%s: Invalid vendor data length %d\n",
+ __func__, vendor->byte_length);
+ kfree(buffer.pointer);
+ return NULL;
+ }
+ memcpy(&addr, vendor->byte_data, sizeof(struct pcibus_bussoft *));
+ prom_bussoft_ptr = __va((struct pcibus_bussoft *) addr);
+ kfree(buffer.pointer);
+
+ return prom_bussoft_ptr;
+}
+
+/*
+ * sn_extract_device_info - Extract the pcidev_info and the sn_irq_info
+ * pointers from the vendor resource using the
+ * provided acpi handle, and copy the structures
+ * into the argument buffers.
+ */
+static int
+sn_extract_device_info(acpi_handle handle, struct pcidev_info **pcidev_info,
+ struct sn_irq_info **sn_irq_info)
+{
+ u64 addr;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct sn_irq_info *irq_info, *irq_info_prom;
+ struct pcidev_info *pcidev_ptr, *pcidev_prom_ptr;
+ struct acpi_resource *resource;
+ int ret = 0;
+ acpi_status status;
+ struct acpi_resource_vendor_typed *vendor;
+
+ /*
+ * The pointer to this device's pcidev_info structure in
+ * the PROM, is in the vendor resource.
+ */
+ status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS,
+ &sn_uuid, &buffer);
+ if (ACPI_FAILURE(status)) {
+ acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer);
+ printk(KERN_ERR
+ "%s: acpi_get_vendor_resource() failed (0x%x) for: %s\n",
+ __func__, status, (char *)name_buffer.pointer);
+ kfree(name_buffer.pointer);
+ return 1;
+ }
+
+ resource = buffer.pointer;
+ vendor = &resource->data.vendor_typed;
+ if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) !=
+ sizeof(struct pci_devdev_info *)) {
+ acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer);
+ printk(KERN_ERR
+ "%s: Invalid vendor data length: %d for: %s\n",
+ __func__, vendor->byte_length,
+ (char *)name_buffer.pointer);
+ kfree(name_buffer.pointer);
+ ret = 1;
+ goto exit;
+ }
+
+ pcidev_ptr = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
+ if (!pcidev_ptr)
+ panic("%s: Unable to alloc memory for pcidev_info", __func__);
+
+ memcpy(&addr, vendor->byte_data, sizeof(struct pcidev_info *));
+ pcidev_prom_ptr = __va(addr);
+ memcpy(pcidev_ptr, pcidev_prom_ptr, sizeof(struct pcidev_info));
+
+ /* Get the IRQ info */
+ irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
+ if (!irq_info)
+ panic("%s: Unable to alloc memory for sn_irq_info", __func__);
+
+ if (pcidev_ptr->pdi_sn_irq_info) {
+ irq_info_prom = __va(pcidev_ptr->pdi_sn_irq_info);
+ memcpy(irq_info, irq_info_prom, sizeof(struct sn_irq_info));
+ }
+
+ *pcidev_info = pcidev_ptr;
+ *sn_irq_info = irq_info;
+
+exit:
+ kfree(buffer.pointer);
+ return ret;
+}
+
+static unsigned int
+get_host_devfn(acpi_handle device_handle, acpi_handle rootbus_handle)
+{
+ unsigned long long adr;
+ acpi_handle child;
+ unsigned int devfn;
+ int function;
+ acpi_handle parent;
+ int slot;
+ acpi_status status;
+ struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+
+ acpi_get_name(device_handle, ACPI_FULL_PATHNAME, &name_buffer);
+
+ /*
+ * Do an upward search to find the root bus device, and
+ * obtain the host devfn from the previous child device.
+ */
+ child = device_handle;
+ while (child) {
+ status = acpi_get_parent(child, &parent);
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR "%s: acpi_get_parent() failed "
+ "(0x%x) for: %s\n", __func__, status,
+ (char *)name_buffer.pointer);
+ panic("%s: Unable to find host devfn\n", __func__);
+ }
+ if (parent == rootbus_handle)
+ break;
+ child = parent;
+ }
+ if (!child) {
+ printk(KERN_ERR "%s: Unable to find root bus for: %s\n",
+ __func__, (char *)name_buffer.pointer);
+ BUG();
+ }
+
+ status = acpi_evaluate_integer(child, METHOD_NAME__ADR, NULL, &adr);
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR "%s: Unable to get _ADR (0x%x) for: %s\n",
+ __func__, status, (char *)name_buffer.pointer);
+ panic("%s: Unable to find host devfn\n", __func__);
+ }
+
+ kfree(name_buffer.pointer);
+
+ slot = (adr >> 16) & 0xffff;
+ function = adr & 0xffff;
+ devfn = PCI_DEVFN(slot, function);
+ return devfn;
+}
+
+/*
+ * find_matching_device - Callback routine to find the ACPI device
+ * that matches up with our pci_dev device.
+ * Matching is done on bus number and devfn.
+ * To find the bus number for a particular
+ * ACPI device, we must look at the _BBN method
+ * of its parent.
+ */
+static acpi_status
+find_matching_device(acpi_handle handle, u32 lvl, void *context, void **rv)
+{
+ unsigned long long bbn = -1;
+ unsigned long long adr;
+ acpi_handle parent = NULL;
+ acpi_status status;
+ unsigned int devfn;
+ int function;
+ int slot;
+ struct sn_pcidev_match *info = context;
+ struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+
+ status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL,
+ &adr);
+ if (ACPI_SUCCESS(status)) {
+ status = acpi_get_parent(handle, &parent);
+ if (ACPI_FAILURE(status)) {
+ acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer);
+ printk(KERN_ERR
+ "%s: acpi_get_parent() failed (0x%x) for: %s\n",
+ __func__, status, (char *)name_buffer.pointer);
+ kfree(name_buffer.pointer);
+ return AE_OK;
+ }
+ status = acpi_evaluate_integer(parent, METHOD_NAME__BBN,
+ NULL, &bbn);
+ if (ACPI_FAILURE(status)) {
+ acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer);
+ printk(KERN_ERR
+ "%s: Failed to find _BBN in parent of: %s\n",
+ __func__, (char *)name_buffer.pointer);
+ kfree(name_buffer.pointer);
+ return AE_OK;
+ }
+
+ slot = (adr >> 16) & 0xffff;
+ function = adr & 0xffff;
+ devfn = PCI_DEVFN(slot, function);
+ if ((info->devfn == devfn) && (info->bus == bbn)) {
+ /* We have a match! */
+ info->handle = handle;
+ return 1;
+ }
+ }
+ return AE_OK;
+}
+
+/*
+ * sn_acpi_get_pcidev_info - Search ACPI namespace for the acpi
+ * device matching the specified pci_dev,
+ * and return the pcidev info and irq info.
+ */
+int
+sn_acpi_get_pcidev_info(struct pci_dev *dev, struct pcidev_info **pcidev_info,
+ struct sn_irq_info **sn_irq_info)
+{
+ unsigned int host_devfn;
+ struct sn_pcidev_match pcidev_match;
+ acpi_handle rootbus_handle;
+ unsigned long long segment;
+ acpi_status status;
+ struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+
+ rootbus_handle = acpi_device_handle(PCI_CONTROLLER(dev)->companion);
+ status = acpi_evaluate_integer(rootbus_handle, METHOD_NAME__SEG, NULL,
+ &segment);
+ if (ACPI_SUCCESS(status)) {
+ if (segment != pci_domain_nr(dev)) {
+ acpi_get_name(rootbus_handle, ACPI_FULL_PATHNAME,
+ &name_buffer);
+ printk(KERN_ERR
+ "%s: Segment number mismatch, 0x%llx vs 0x%x for: %s\n",
+ __func__, segment, pci_domain_nr(dev),
+ (char *)name_buffer.pointer);
+ kfree(name_buffer.pointer);
+ return 1;
+ }
+ } else {
+ acpi_get_name(rootbus_handle, ACPI_FULL_PATHNAME, &name_buffer);
+ printk(KERN_ERR "%s: Unable to get __SEG from: %s\n",
+ __func__, (char *)name_buffer.pointer);
+ kfree(name_buffer.pointer);
+ return 1;
+ }
+
+ /*
+ * We want to search all devices in this segment/domain
+ * of the ACPI namespace for the matching ACPI device,
+ * which holds the pcidev_info pointer in its vendor resource.
+ */
+ pcidev_match.bus = dev->bus->number;
+ pcidev_match.devfn = dev->devfn;
+ pcidev_match.handle = NULL;
+
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, rootbus_handle, ACPI_UINT32_MAX,
+ find_matching_device, NULL, &pcidev_match, NULL);
+
+ if (!pcidev_match.handle) {
+ printk(KERN_ERR
+ "%s: Could not find matching ACPI device for %s.\n",
+ __func__, pci_name(dev));
+ return 1;
+ }
+
+ if (sn_extract_device_info(pcidev_match.handle, pcidev_info, sn_irq_info))
+ return 1;
+
+ /* Build up the pcidev_info.pdi_slot_host_handle */
+ host_devfn = get_host_devfn(pcidev_match.handle, rootbus_handle);
+ (*pcidev_info)->pdi_slot_host_handle =
+ ((unsigned long) pci_domain_nr(dev) << 40) |
+ /* bus == 0 */
+ host_devfn;
+ return 0;
+}
+
+/*
+ * sn_acpi_slot_fixup - Obtain the pcidev_info and sn_irq_info.
+ * Perform any SN specific slot fixup.
+ * At present there does not appear to be
+ * any generic way to handle a ROM image
+ * that has been shadowed by the PROM, so
+ * we pass a pointer to it within the
+ * pcidev_info structure.
+ */
+
+void
+sn_acpi_slot_fixup(struct pci_dev *dev)
+{
+ void __iomem *addr;
+ struct pcidev_info *pcidev_info = NULL;
+ struct sn_irq_info *sn_irq_info = NULL;
+ size_t image_size, size;
+
+ if (sn_acpi_get_pcidev_info(dev, &pcidev_info, &sn_irq_info)) {
+ panic("%s: Failure obtaining pcidev_info for %s\n",
+ __func__, pci_name(dev));
+ }
+
+ if (pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE]) {
+ /*
+ * A valid ROM image exists and has been shadowed by the
+ * PROM. Setup the pci_dev ROM resource with the address
+ * of the shadowed copy, and the actual length of the ROM image.
+ */
+ size = pci_resource_len(dev, PCI_ROM_RESOURCE);
+ addr = ioremap(pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE],
+ size);
+ image_size = pci_get_rom_size(dev, addr, size);
+ dev->resource[PCI_ROM_RESOURCE].start = (unsigned long) addr;
+ dev->resource[PCI_ROM_RESOURCE].end =
+ (unsigned long) addr + image_size - 1;
+ dev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_BIOS_COPY;
+ }
+ sn_pci_fixup_slot(dev, pcidev_info, sn_irq_info);
+}
+
+EXPORT_SYMBOL(sn_acpi_slot_fixup);
+
+
+/*
+ * sn_acpi_bus_fixup - Perform SN specific setup of software structs
+ * (pcibus_bussoft, pcidev_info) and hardware
+ * registers, for the specified bus and devices under it.
+ */
+void
+sn_acpi_bus_fixup(struct pci_bus *bus)
+{
+ struct pci_dev *pci_dev = NULL;
+ struct pcibus_bussoft *prom_bussoft_ptr;
+
+ if (!bus->parent) { /* If root bus */
+ prom_bussoft_ptr = sn_get_bussoft_ptr(bus);
+ if (prom_bussoft_ptr == NULL) {
+ printk(KERN_ERR
+ "%s: 0x%04x:0x%02x Unable to "
+ "obtain prom_bussoft_ptr\n",
+ __func__, pci_domain_nr(bus), bus->number);
+ return;
+ }
+ sn_common_bus_fixup(bus, prom_bussoft_ptr);
+ }
+ list_for_each_entry(pci_dev, &bus->devices, bus_list) {
+ sn_acpi_slot_fixup(pci_dev);
+ }
+}
+
+/*
+ * sn_io_acpi_init - PROM has ACPI support for IO, defining at a minimum the
+ * nodes and root buses in the DSDT. As a result, bus scanning
+ * will be initiated by the Linux ACPI code.
+ */
+
+void __init
+sn_io_acpi_init(void)
+{
+ u64 result;
+ long status;
+
+ /* SN Altix does not follow the IOSAPIC IRQ routing model */
+ acpi_irq_model = ACPI_IRQ_MODEL_PLATFORM;
+
+ /* Setup hubdev_info for all SGIHUB/SGITIO devices */
+ acpi_get_devices("SGIHUB", sn_acpi_hubdev_init, NULL, NULL);
+ acpi_get_devices("SGITIO", sn_acpi_hubdev_init, NULL, NULL);
+
+ status = sal_ioif_init(&result);
+ if (status || result)
+ panic("sal_ioif_init failed: [%lx] %s\n",
+ status, ia64_sal_strerror(status));
+}
diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c
new file mode 100644
index 00000000000..11f2275570f
--- /dev/null
+++ b/arch/ia64/sn/kernel/io_common.c
@@ -0,0 +1,564 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/bootmem.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <asm/sn/types.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/sn_feature_sets.h>
+#include <asm/sn/geo.h>
+#include <asm/sn/io.h>
+#include <asm/sn/l1.h>
+#include <asm/sn/module.h>
+#include <asm/sn/pcibr_provider.h>
+#include <asm/sn/pcibus_provider_defs.h>
+#include <asm/sn/pcidev.h>
+#include <asm/sn/simulator.h>
+#include <asm/sn/sn_sal.h>
+#include <asm/sn/tioca_provider.h>
+#include <asm/sn/tioce_provider.h>
+#include "xtalk/hubdev.h"
+#include "xtalk/xwidgetdev.h"
+#include <linux/acpi.h>
+#include <asm/sn/sn2/sn_hwperf.h>
+#include <asm/sn/acpi.h>
+
+extern void sn_init_cpei_timer(void);
+extern void register_sn_procfs(void);
+extern void sn_io_acpi_init(void);
+extern void sn_io_init(void);
+
+
+static struct list_head sn_sysdata_list;
+
+/* sysdata list struct */
+struct sysdata_el {
+ struct list_head entry;
+ void *sysdata;
+};
+
+int sn_ioif_inited; /* SN I/O infrastructure initialized? */
+
+int sn_acpi_rev; /* SN ACPI revision */
+EXPORT_SYMBOL_GPL(sn_acpi_rev);
+
+struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */
+
+/*
+ * Hooks and struct for unsupported pci providers
+ */
+
+static dma_addr_t
+sn_default_pci_map(struct pci_dev *pdev, unsigned long paddr, size_t size, int type)
+{
+ return 0;
+}
+
+static void
+sn_default_pci_unmap(struct pci_dev *pdev, dma_addr_t addr, int direction)
+{
+ return;
+}
+
+static void *
+sn_default_pci_bus_fixup(struct pcibus_bussoft *soft, struct pci_controller *controller)
+{
+ return NULL;
+}
+
+static struct sn_pcibus_provider sn_pci_default_provider = {
+ .dma_map = sn_default_pci_map,
+ .dma_map_consistent = sn_default_pci_map,
+ .dma_unmap = sn_default_pci_unmap,
+ .bus_fixup = sn_default_pci_bus_fixup,
+};
+
+/*
+ * Retrieve the DMA Flush List given nasid, widget, and device.
+ * This list is needed to implement the WAR - Flush DMA data on PIO Reads.
+ */
+static inline u64
+sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num,
+ u64 address)
+{
+ struct ia64_sal_retval ret_stuff;
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+
+ SAL_CALL_NOLOCK(ret_stuff,
+ (u64) SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST,
+ (u64) nasid, (u64) widget_num,
+ (u64) device_num, (u64) address, 0, 0, 0);
+ return ret_stuff.status;
+}
+
+/*
+ * sn_pcidev_info_get() - Retrieve the pcidev_info struct for the specified
+ * device.
+ */
+inline struct pcidev_info *
+sn_pcidev_info_get(struct pci_dev *dev)
+{
+ struct pcidev_info *pcidev;
+
+ list_for_each_entry(pcidev,
+ &(SN_PLATFORM_DATA(dev)->pcidev_info), pdi_list) {
+ if (pcidev->pdi_linux_pcidev == dev)
+ return pcidev;
+ }
+ return NULL;
+}
+
+/* Older PROM flush WAR
+ *
+ * 01/16/06 -- This war will be in place until a new official PROM is released.
+ * Additionally note that the struct sn_flush_device_war also has to be
+ * removed from arch/ia64/sn/include/xtalk/hubdev.h
+ */
+
+static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device,
+ struct sn_flush_device_common *common)
+{
+ struct sn_flush_device_war *war_list;
+ struct sn_flush_device_war *dev_entry;
+ struct ia64_sal_retval isrv = {0,0,0,0};
+
+ printk_once(KERN_WARNING
+ "PROM version < 4.50 -- implementing old PROM flush WAR\n");
+
+ war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL);
+ BUG_ON(!war_list);
+
+ SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST,
+ nasid, widget, __pa(war_list), 0, 0, 0 ,0);
+ if (isrv.status)
+ panic("sn_device_fixup_war failed: %s\n",
+ ia64_sal_strerror(isrv.status));
+
+ dev_entry = war_list + device;
+ memcpy(common,dev_entry, sizeof(*common));
+ kfree(war_list);
+
+ return isrv.status;
+}
+
+/*
+ * sn_common_hubdev_init() - This routine is called to initialize the HUB data
+ * structure for each node in the system.
+ */
+void __init
+sn_common_hubdev_init(struct hubdev_info *hubdev)
+{
+
+ struct sn_flush_device_kernel *sn_flush_device_kernel;
+ struct sn_flush_device_kernel *dev_entry;
+ s64 status;
+ int widget, device, size;
+
+ /* Attach the error interrupt handlers */
+ if (hubdev->hdi_nasid & 1) /* If TIO */
+ ice_error_init(hubdev);
+ else
+ hub_error_init(hubdev);
+
+ for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++)
+ hubdev->hdi_xwidget_info[widget].xwi_hubinfo = hubdev;
+
+ if (!hubdev->hdi_flush_nasid_list.widget_p)
+ return;
+
+ size = (HUB_WIDGET_ID_MAX + 1) *
+ sizeof(struct sn_flush_device_kernel *);
+ hubdev->hdi_flush_nasid_list.widget_p =
+ kzalloc(size, GFP_KERNEL);
+ BUG_ON(!hubdev->hdi_flush_nasid_list.widget_p);
+
+ for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) {
+ size = DEV_PER_WIDGET *
+ sizeof(struct sn_flush_device_kernel);
+ sn_flush_device_kernel = kzalloc(size, GFP_KERNEL);
+ BUG_ON(!sn_flush_device_kernel);
+
+ dev_entry = sn_flush_device_kernel;
+ for (device = 0; device < DEV_PER_WIDGET;
+ device++, dev_entry++) {
+ size = sizeof(struct sn_flush_device_common);
+ dev_entry->common = kzalloc(size, GFP_KERNEL);
+ BUG_ON(!dev_entry->common);
+ if (sn_prom_feature_available(PRF_DEVICE_FLUSH_LIST))
+ status = sal_get_device_dmaflush_list(
+ hubdev->hdi_nasid, widget, device,
+ (u64)(dev_entry->common));
+ else
+ status = sn_device_fixup_war(hubdev->hdi_nasid,
+ widget, device,
+ dev_entry->common);
+ if (status != SALRET_OK)
+ panic("SAL call failed: %s\n",
+ ia64_sal_strerror(status));
+
+ spin_lock_init(&dev_entry->sfdl_flush_lock);
+ }
+
+ if (sn_flush_device_kernel)
+ hubdev->hdi_flush_nasid_list.widget_p[widget] =
+ sn_flush_device_kernel;
+ }
+}
+
+void sn_pci_unfixup_slot(struct pci_dev *dev)
+{
+ struct pci_dev *host_pci_dev = SN_PCIDEV_INFO(dev)->host_pci_dev;
+
+ sn_irq_unfixup(dev);
+ pci_dev_put(host_pci_dev);
+ pci_dev_put(dev);
+}
+
+/*
+ * sn_pci_fixup_slot()
+ */
+void sn_pci_fixup_slot(struct pci_dev *dev, struct pcidev_info *pcidev_info,
+ struct sn_irq_info *sn_irq_info)
+{
+ int segment = pci_domain_nr(dev->bus);
+ struct pcibus_bussoft *bs;
+ struct pci_dev *host_pci_dev;
+ unsigned int bus_no, devfn;
+
+ pci_dev_get(dev); /* for the sysdata pointer */
+
+ /* Add pcidev_info to list in pci_controller.platform_data */
+ list_add_tail(&pcidev_info->pdi_list,
+ &(SN_PLATFORM_DATA(dev->bus)->pcidev_info));
+ /*
+ * Using the PROMs values for the PCI host bus, get the Linux
+ * PCI host_pci_dev struct and set up host bus linkages
+ */
+
+ bus_no = (pcidev_info->pdi_slot_host_handle >> 32) & 0xff;
+ devfn = pcidev_info->pdi_slot_host_handle & 0xffffffff;
+ host_pci_dev = pci_get_domain_bus_and_slot(segment, bus_no, devfn);
+
+ pcidev_info->host_pci_dev = host_pci_dev;
+ pcidev_info->pdi_linux_pcidev = dev;
+ pcidev_info->pdi_host_pcidev_info = SN_PCIDEV_INFO(host_pci_dev);
+ bs = SN_PCIBUS_BUSSOFT(dev->bus);
+ pcidev_info->pdi_pcibus_info = bs;
+
+ if (bs && bs->bs_asic_type < PCIIO_ASIC_MAX_TYPES) {
+ SN_PCIDEV_BUSPROVIDER(dev) = sn_pci_provider[bs->bs_asic_type];
+ } else {
+ SN_PCIDEV_BUSPROVIDER(dev) = &sn_pci_default_provider;
+ }
+
+ /* Only set up IRQ stuff if this device has a host bus context */
+ if (bs && sn_irq_info->irq_irq) {
+ pcidev_info->pdi_sn_irq_info = sn_irq_info;
+ dev->irq = pcidev_info->pdi_sn_irq_info->irq_irq;
+ sn_irq_fixup(dev, sn_irq_info);
+ } else {
+ pcidev_info->pdi_sn_irq_info = NULL;
+ kfree(sn_irq_info);
+ }
+}
+
+/*
+ * sn_common_bus_fixup - Perform platform specific bus fixup.
+ * Execute the ASIC specific fixup routine
+ * for this bus.
+ */
+void
+sn_common_bus_fixup(struct pci_bus *bus,
+ struct pcibus_bussoft *prom_bussoft_ptr)
+{
+ int cnode;
+ struct pci_controller *controller;
+ struct hubdev_info *hubdev_info;
+ int nasid;
+ void *provider_soft;
+ struct sn_pcibus_provider *provider;
+ struct sn_platform_data *sn_platform_data;
+
+ controller = PCI_CONTROLLER(bus);
+ /*
+ * Per-provider fixup. Copies the bus soft structure from prom
+ * to local area and links SN_PCIBUS_BUSSOFT().
+ */
+
+ if (prom_bussoft_ptr->bs_asic_type >= PCIIO_ASIC_MAX_TYPES) {
+ printk(KERN_WARNING "sn_common_bus_fixup: Unsupported asic type, %d",
+ prom_bussoft_ptr->bs_asic_type);
+ return;
+ }
+
+ if (prom_bussoft_ptr->bs_asic_type == PCIIO_ASIC_TYPE_PPB)
+ return; /* no further fixup necessary */
+
+ provider = sn_pci_provider[prom_bussoft_ptr->bs_asic_type];
+ if (provider == NULL)
+ panic("sn_common_bus_fixup: No provider registered for this asic type, %d",
+ prom_bussoft_ptr->bs_asic_type);
+
+ if (provider->bus_fixup)
+ provider_soft = (*provider->bus_fixup) (prom_bussoft_ptr,
+ controller);
+ else
+ provider_soft = NULL;
+
+ /*
+ * Generic bus fixup goes here. Don't reference prom_bussoft_ptr
+ * after this point.
+ */
+ controller->platform_data = kzalloc(sizeof(struct sn_platform_data),
+ GFP_KERNEL);
+ BUG_ON(controller->platform_data == NULL);
+ sn_platform_data =
+ (struct sn_platform_data *) controller->platform_data;
+ sn_platform_data->provider_soft = provider_soft;
+ INIT_LIST_HEAD(&((struct sn_platform_data *)
+ controller->platform_data)->pcidev_info);
+ nasid = NASID_GET(SN_PCIBUS_BUSSOFT(bus)->bs_base);
+ cnode = nasid_to_cnodeid(nasid);
+ hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
+ SN_PCIBUS_BUSSOFT(bus)->bs_xwidget_info =
+ &(hubdev_info->hdi_xwidget_info[SN_PCIBUS_BUSSOFT(bus)->bs_xid]);
+
+ /*
+ * If the node information we obtained during the fixup phase is
+ * invalid then set controller->node to -1 (undetermined)
+ */
+ if (controller->node >= num_online_nodes()) {
+ struct pcibus_bussoft *b = SN_PCIBUS_BUSSOFT(bus);
+
+ printk(KERN_WARNING "Device ASIC=%u XID=%u PBUSNUM=%u "
+ "L_IO=%llx L_MEM=%llx BASE=%llx\n",
+ b->bs_asic_type, b->bs_xid, b->bs_persist_busnum,
+ b->bs_legacy_io, b->bs_legacy_mem, b->bs_base);
+ printk(KERN_WARNING "on node %d but only %d nodes online."
+ "Association set to undetermined.\n",
+ controller->node, num_online_nodes());
+ controller->node = -1;
+ }
+}
+
+void sn_bus_store_sysdata(struct pci_dev *dev)
+{
+ struct sysdata_el *element;
+
+ element = kzalloc(sizeof(struct sysdata_el), GFP_KERNEL);
+ if (!element) {
+ dev_dbg(&dev->dev, "%s: out of memory!\n", __func__);
+ return;
+ }
+ element->sysdata = SN_PCIDEV_INFO(dev);
+ list_add(&element->entry, &sn_sysdata_list);
+}
+
+void sn_bus_free_sysdata(void)
+{
+ struct sysdata_el *element;
+ struct list_head *list, *safe;
+
+ list_for_each_safe(list, safe, &sn_sysdata_list) {
+ element = list_entry(list, struct sysdata_el, entry);
+ list_del(&element->entry);
+ list_del(&(((struct pcidev_info *)
+ (element->sysdata))->pdi_list));
+ kfree(element->sysdata);
+ kfree(element);
+ }
+ return;
+}
+
+/*
+ * hubdev_init_node() - Creates the HUB data structure and link them to it's
+ * own NODE specific data area.
+ */
+void __init hubdev_init_node(nodepda_t * npda, cnodeid_t node)
+{
+ struct hubdev_info *hubdev_info;
+ int size;
+ pg_data_t *pg;
+
+ size = sizeof(struct hubdev_info);
+
+ if (node >= num_online_nodes()) /* Headless/memless IO nodes */
+ pg = NODE_DATA(0);
+ else
+ pg = NODE_DATA(node);
+
+ hubdev_info = (struct hubdev_info *)alloc_bootmem_node(pg, size);
+
+ npda->pdinfo = (void *)hubdev_info;
+}
+
+geoid_t
+cnodeid_get_geoid(cnodeid_t cnode)
+{
+ struct hubdev_info *hubdev;
+
+ hubdev = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
+ return hubdev->hdi_geoid;
+}
+
+void sn_generate_path(struct pci_bus *pci_bus, char *address)
+{
+ nasid_t nasid;
+ cnodeid_t cnode;
+ geoid_t geoid;
+ moduleid_t moduleid;
+ u16 bricktype;
+
+ nasid = NASID_GET(SN_PCIBUS_BUSSOFT(pci_bus)->bs_base);
+ cnode = nasid_to_cnodeid(nasid);
+ geoid = cnodeid_get_geoid(cnode);
+ moduleid = geo_module(geoid);
+
+ sprintf(address, "module_%c%c%c%c%.2d",
+ '0'+RACK_GET_CLASS(MODULE_GET_RACK(moduleid)),
+ '0'+RACK_GET_GROUP(MODULE_GET_RACK(moduleid)),
+ '0'+RACK_GET_NUM(MODULE_GET_RACK(moduleid)),
+ MODULE_GET_BTCHAR(moduleid), MODULE_GET_BPOS(moduleid));
+
+ /* Tollhouse requires slot id to be displayed */
+ bricktype = MODULE_GET_BTYPE(moduleid);
+ if ((bricktype == L1_BRICKTYPE_191010) ||
+ (bricktype == L1_BRICKTYPE_1932))
+ sprintf(address + strlen(address), "^%d",
+ geo_slot(geoid));
+}
+
+void sn_pci_fixup_bus(struct pci_bus *bus)
+{
+
+ if (SN_ACPI_BASE_SUPPORT())
+ sn_acpi_bus_fixup(bus);
+ else
+ sn_bus_fixup(bus);
+}
+
+/*
+ * sn_io_early_init - Perform early IO (and some non-IO) initialization.
+ * In particular, setup the sn_pci_provider[] array.
+ * This needs to be done prior to any bus scanning
+ * (acpi_scan_init()) in the ACPI case, as the SN
+ * bus fixup code will reference the array.
+ */
+static int __init
+sn_io_early_init(void)
+{
+ int i;
+
+ if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM())
+ return 0;
+
+ /* we set the acpi revision to that of the DSDT table OEM rev. */
+ {
+ struct acpi_table_header *header = NULL;
+
+ acpi_get_table(ACPI_SIG_DSDT, 1, &header);
+ BUG_ON(header == NULL);
+ sn_acpi_rev = header->oem_revision;
+ }
+
+ /*
+ * prime sn_pci_provider[]. Individual provider init routines will
+ * override their respective default entries.
+ */
+
+ for (i = 0; i < PCIIO_ASIC_MAX_TYPES; i++)
+ sn_pci_provider[i] = &sn_pci_default_provider;
+
+ pcibr_init_provider();
+ tioca_init_provider();
+ tioce_init_provider();
+
+ /*
+ * This is needed to avoid bounce limit checks in the blk layer
+ */
+ ia64_max_iommu_merge_mask = ~PAGE_MASK;
+
+ sn_irq_lh_init();
+ INIT_LIST_HEAD(&sn_sysdata_list);
+ sn_init_cpei_timer();
+
+#ifdef CONFIG_PROC_FS
+ register_sn_procfs();
+#endif
+
+ {
+ struct acpi_table_header *header;
+ (void)acpi_get_table(ACPI_SIG_DSDT, 1, &header);
+ printk(KERN_INFO "ACPI DSDT OEM Rev 0x%x\n",
+ header->oem_revision);
+ }
+ if (SN_ACPI_BASE_SUPPORT())
+ sn_io_acpi_init();
+ else
+ sn_io_init();
+ return 0;
+}
+
+arch_initcall(sn_io_early_init);
+
+/*
+ * sn_io_late_init() - Perform any final platform specific IO initialization.
+ */
+
+int __init
+sn_io_late_init(void)
+{
+ struct pci_bus *bus;
+ struct pcibus_bussoft *bussoft;
+ cnodeid_t cnode;
+ nasid_t nasid;
+ cnodeid_t near_cnode;
+
+ if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM())
+ return 0;
+
+ /*
+ * Setup closest node in pci_controller->node for
+ * PIC, TIOCP, TIOCE (TIOCA does it during bus fixup using
+ * info from the PROM).
+ */
+ bus = NULL;
+ while ((bus = pci_find_next_bus(bus)) != NULL) {
+ bussoft = SN_PCIBUS_BUSSOFT(bus);
+ nasid = NASID_GET(bussoft->bs_base);
+ cnode = nasid_to_cnodeid(nasid);
+ if ((bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCP) ||
+ (bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCE) ||
+ (bussoft->bs_asic_type == PCIIO_ASIC_TYPE_PIC)) {
+ /* PCI Bridge: find nearest node with CPUs */
+ int e = sn_hwperf_get_nearest_node(cnode, NULL,
+ &near_cnode);
+ if (e < 0) {
+ near_cnode = (cnodeid_t)-1; /* use any node */
+ printk(KERN_WARNING "sn_io_late_init: failed "
+ "to find near node with CPUs for "
+ "node %d, err=%d\n", cnode, e);
+ }
+ PCI_CONTROLLER(bus)->node = near_cnode;
+ }
+ }
+
+ sn_ioif_inited = 1; /* SN I/O infrastructure now initialized */
+
+ return 0;
+}
+
+fs_initcall(sn_io_late_init);
+
+EXPORT_SYMBOL(sn_pci_unfixup_slot);
+EXPORT_SYMBOL(sn_bus_store_sysdata);
+EXPORT_SYMBOL(sn_bus_free_sysdata);
+EXPORT_SYMBOL(sn_generate_path);
+
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index a4c78152b33..0b5ce82d203 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -3,106 +3,36 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000-2006 Silicon Graphics, Inc. All rights reserved.
*/
-#include <linux/bootmem.h>
-#include <linux/nodemask.h>
+#include <linux/slab.h>
+#include <linux/export.h>
#include <asm/sn/types.h>
#include <asm/sn/addrs.h>
-#include <asm/sn/sn_feature_sets.h>
-#include <asm/sn/geo.h>
#include <asm/sn/io.h>
-#include <asm/sn/pcibr_provider.h>
+#include <asm/sn/module.h>
+#include <asm/sn/intr.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h>
-#include <asm/sn/simulator.h>
#include <asm/sn/sn_sal.h>
-#include <asm/sn/tioca_provider.h>
-#include <asm/sn/tioce_provider.h>
#include "xtalk/hubdev.h"
-#include "xtalk/xwidgetdev.h"
-
-static struct list_head sn_sysdata_list;
-
-/* sysdata list struct */
-struct sysdata_el {
- struct list_head entry;
- void *sysdata;
-};
-
-struct slab_info {
- struct hubdev_info hubdev;
-};
-
-struct brick {
- moduleid_t id; /* Module ID of this module */
- struct slab_info slab_info[MAX_SLABS + 1];
-};
-
-int sn_ioif_inited = 0; /* SN I/O infrastructure initialized? */
-
-struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */
-
-static int max_segment_number = 0; /* Default highest segment number */
-static int max_pcibus_number = 255; /* Default highest pci bus number */
/*
- * Hooks and struct for unsupported pci providers
+ * The code in this file will only be executed when running with
+ * a PROM that does _not_ have base ACPI IO support.
+ * (i.e., SN_ACPI_BASE_SUPPORT() == 0)
*/
-static dma_addr_t
-sn_default_pci_map(struct pci_dev *pdev, unsigned long paddr, size_t size)
-{
- return 0;
-}
-
-static void
-sn_default_pci_unmap(struct pci_dev *pdev, dma_addr_t addr, int direction)
-{
- return;
-}
+static int max_segment_number; /* Default highest segment number */
+static int max_pcibus_number = 255; /* Default highest pci bus number */
-static void *
-sn_default_pci_bus_fixup(struct pcibus_bussoft *soft, struct pci_controller *controller)
-{
- return NULL;
-}
-
-static struct sn_pcibus_provider sn_pci_default_provider = {
- .dma_map = sn_default_pci_map,
- .dma_map_consistent = sn_default_pci_map,
- .dma_unmap = sn_default_pci_unmap,
- .bus_fixup = sn_default_pci_bus_fixup,
-};
-
-/*
- * Retrieve the DMA Flush List given nasid, widget, and device.
- * This list is needed to implement the WAR - Flush DMA data on PIO Reads.
- */
-static inline u64
-sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num,
- u64 address)
-{
-
- struct ia64_sal_retval ret_stuff;
- ret_stuff.status = 0;
- ret_stuff.v0 = 0;
-
- SAL_CALL_NOLOCK(ret_stuff,
- (u64) SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST,
- (u64) nasid, (u64) widget_num,
- (u64) device_num, (u64) address, 0, 0, 0);
- return ret_stuff.status;
-
-}
/*
* Retrieve the hub device info structure for the given nasid.
*/
static inline u64 sal_get_hubdev_info(u64 handle, u64 address)
{
-
struct ia64_sal_retval ret_stuff;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
@@ -118,7 +48,6 @@ static inline u64 sal_get_hubdev_info(u64 handle, u64 address)
*/
static inline u64 sal_get_pcibus_info(u64 segment, u64 busnum, u64 address)
{
-
struct ia64_sal_retval ret_stuff;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
@@ -148,74 +77,20 @@ sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
return ret_stuff.v0;
}
-/*
- * sn_pcidev_info_get() - Retrieve the pcidev_info struct for the specified
- * device.
- */
-inline struct pcidev_info *
-sn_pcidev_info_get(struct pci_dev *dev)
-{
- struct pcidev_info *pcidev;
-
- list_for_each_entry(pcidev,
- &(SN_PCI_CONTROLLER(dev)->pcidev_info), pdi_list) {
- if (pcidev->pdi_linux_pcidev == dev) {
- return pcidev;
- }
- }
- return NULL;
-}
-
-/* Older PROM flush WAR
- *
- * 01/16/06 -- This war will be in place until a new official PROM is released.
- * Additionally note that the struct sn_flush_device_war also has to be
- * removed from arch/ia64/sn/include/xtalk/hubdev.h
- */
-static u8 war_implemented = 0;
-
-static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device,
- struct sn_flush_device_common *common)
-{
- struct sn_flush_device_war *war_list;
- struct sn_flush_device_war *dev_entry;
- struct ia64_sal_retval isrv = {0,0,0,0};
-
- if (!war_implemented) {
- printk(KERN_WARNING "PROM version < 4.50 -- implementing old "
- "PROM flush WAR\n");
- war_implemented = 1;
- }
-
- war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL);
- if (!war_list)
- BUG();
-
- SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST,
- nasid, widget, __pa(war_list), 0, 0, 0 ,0);
- if (isrv.status)
- panic("sn_device_fixup_war failed: %s\n",
- ia64_sal_strerror(isrv.status));
-
- dev_entry = war_list + device;
- memcpy(common,dev_entry, sizeof(*common));
- kfree(war_list);
-
- return isrv.status;
-}
/*
- * sn_fixup_ionodes() - This routine initializes the HUB data strcuture for
- * each node in the system.
+ * sn_fixup_ionodes() - This routine initializes the HUB data structure for
+ * each node in the system. This function is only
+ * executed when running with a non-ACPI capable PROM.
*/
-static void sn_fixup_ionodes(void)
+static void __init sn_fixup_ionodes(void)
{
- struct sn_flush_device_kernel *sn_flush_device_kernel;
- struct sn_flush_device_kernel *dev_entry;
+
struct hubdev_info *hubdev;
u64 status;
u64 nasid;
- int i, widget, device;
+ int i;
+ extern void sn_common_hubdev_init(struct hubdev_info *);
/*
* Get SGI Specific HUB chipset information.
@@ -238,177 +113,70 @@ static void sn_fixup_ionodes(void)
max_segment_number = hubdev->max_segment_number;
max_pcibus_number = hubdev->max_pcibus_number;
}
-
- /* Attach the error interrupt handlers */
- if (nasid & 1)
- ice_error_init(hubdev);
- else
- hub_error_init(hubdev);
-
- for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++)
- hubdev->hdi_xwidget_info[widget].xwi_hubinfo = hubdev;
-
- if (!hubdev->hdi_flush_nasid_list.widget_p)
- continue;
-
- hubdev->hdi_flush_nasid_list.widget_p =
- kmalloc((HUB_WIDGET_ID_MAX + 1) *
- sizeof(struct sn_flush_device_kernel *),
- GFP_KERNEL);
- memset(hubdev->hdi_flush_nasid_list.widget_p, 0x0,
- (HUB_WIDGET_ID_MAX + 1) *
- sizeof(struct sn_flush_device_kernel *));
-
- for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) {
- sn_flush_device_kernel = kmalloc(DEV_PER_WIDGET *
- sizeof(struct
- sn_flush_device_kernel),
- GFP_KERNEL);
- if (!sn_flush_device_kernel)
- BUG();
- memset(sn_flush_device_kernel, 0x0,
- DEV_PER_WIDGET *
- sizeof(struct sn_flush_device_kernel));
-
- dev_entry = sn_flush_device_kernel;
- for (device = 0; device < DEV_PER_WIDGET;
- device++,dev_entry++) {
- dev_entry->common = kmalloc(sizeof(struct
- sn_flush_device_common),
- GFP_KERNEL);
- if (!dev_entry->common)
- BUG();
- memset(dev_entry->common, 0x0, sizeof(struct
- sn_flush_device_common));
-
- if (sn_prom_feature_available(
- PRF_DEVICE_FLUSH_LIST))
- status = sal_get_device_dmaflush_list(
- nasid,
- widget,
- device,
- (u64)(dev_entry->common));
- else
- status = sn_device_fixup_war(nasid,
- widget,
- device,
- dev_entry->common);
- if (status != SALRET_OK)
- panic("SAL call failed: %s\n",
- ia64_sal_strerror(status));
-
- spin_lock_init(&dev_entry->sfdl_flush_lock);
- }
-
- if (sn_flush_device_kernel)
- hubdev->hdi_flush_nasid_list.widget_p[widget] =
- sn_flush_device_kernel;
- }
+ sn_common_hubdev_init(hubdev);
}
}
/*
- * sn_pci_window_fixup() - Create a pci_window for each device resource.
- * Until ACPI support is added, we need this code
- * to setup pci_windows for use by
- * pcibios_bus_to_resource(),
- * pcibios_resource_to_bus(), etc.
+ * sn_pci_legacy_window_fixup - Setup PCI resources for
+ * legacy IO and MEM space. This needs to
+ * be done here, as the PROM does not have
+ * ACPI support defining the root buses
+ * and their resources (_CRS),
*/
static void
-sn_pci_window_fixup(struct pci_dev *dev, unsigned int count,
- s64 * pci_addrs)
+sn_legacy_pci_window_fixup(struct resource *res,
+ u64 legacy_io, u64 legacy_mem)
{
- struct pci_controller *controller = PCI_CONTROLLER(dev->bus);
- unsigned int i;
- unsigned int idx;
- unsigned int new_count;
- struct pci_window *new_window;
-
- if (count == 0)
- return;
- idx = controller->windows;
- new_count = controller->windows + count;
- new_window = kcalloc(new_count, sizeof(struct pci_window), GFP_KERNEL);
- if (new_window == NULL)
- BUG();
- if (controller->window) {
- memcpy(new_window, controller->window,
- sizeof(struct pci_window) * controller->windows);
- kfree(controller->window);
- }
-
- /* Setup a pci_window for each device resource. */
- for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
- if (pci_addrs[i] == -1)
- continue;
-
- new_window[idx].offset = dev->resource[i].start - pci_addrs[i];
- new_window[idx].resource = dev->resource[i];
- idx++;
- }
-
- controller->windows = new_count;
- controller->window = new_window;
-}
-
-void sn_pci_unfixup_slot(struct pci_dev *dev)
-{
- struct pci_dev *host_pci_dev = SN_PCIDEV_INFO(dev)->host_pci_dev;
-
- sn_irq_unfixup(dev);
- pci_dev_put(host_pci_dev);
- pci_dev_put(dev);
+ res[0].name = "legacy_io";
+ res[0].flags = IORESOURCE_IO;
+ res[0].start = legacy_io;
+ res[0].end = res[0].start + 0xffff;
+ res[0].parent = &ioport_resource;
+ res[1].name = "legacy_mem";
+ res[1].flags = IORESOURCE_MEM;
+ res[1].start = legacy_mem;
+ res[1].end = res[1].start + (1024 * 1024) - 1;
+ res[1].parent = &iomem_resource;
}
/*
- * sn_pci_fixup_slot() - This routine sets up a slot's resources
- * consistent with the Linux PCI abstraction layer. Resources acquired
- * from our PCI provider include PIO maps to BAR space and interrupt
- * objects.
+ * sn_io_slot_fixup() - We are not running with an ACPI capable PROM,
+ * and need to convert the pci_dev->resource
+ * 'start' and 'end' addresses to mapped addresses,
+ * and setup the pci_controller->window array entries.
*/
-void sn_pci_fixup_slot(struct pci_dev *dev)
+void
+sn_io_slot_fixup(struct pci_dev *dev)
{
- unsigned int count = 0;
int idx;
- int segment = pci_domain_nr(dev->bus);
- int status = 0;
- struct pcibus_bussoft *bs;
- struct pci_bus *host_pci_bus;
- struct pci_dev *host_pci_dev;
+ unsigned long addr, end, size, start;
struct pcidev_info *pcidev_info;
- s64 pci_addrs[PCI_ROM_RESOURCE + 1];
- struct sn_irq_info *sn_irq_info;
- unsigned long size;
- unsigned int bus_no, devfn;
+ struct sn_irq_info *sn_irq_info;
+ int status;
- pci_dev_get(dev); /* for the sysdata pointer */
pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
- if (pcidev_info <= 0)
- BUG(); /* Cannot afford to run out of memory */
+ if (!pcidev_info)
+ panic("%s: Unable to alloc memory for pcidev_info", __func__);
- sn_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
- if (sn_irq_info <= 0)
- BUG(); /* Cannot afford to run out of memory */
- memset(sn_irq_info, 0, sizeof(struct sn_irq_info));
+ sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
+ if (!sn_irq_info)
+ panic("%s: Unable to alloc memory for sn_irq_info", __func__);
/* Call to retrieve pci device information needed by kernel. */
- status = sal_get_pcidev_info((u64) segment, (u64) dev->bus->number,
- dev->devfn,
- (u64) __pa(pcidev_info),
- (u64) __pa(sn_irq_info));
- if (status)
- BUG(); /* Cannot get platform pci device information */
+ status = sal_get_pcidev_info((u64) pci_domain_nr(dev),
+ (u64) dev->bus->number,
+ dev->devfn,
+ (u64) __pa(pcidev_info),
+ (u64) __pa(sn_irq_info));
+
+ BUG_ON(status); /* Cannot get platform pci device information */
- /* Add pcidev_info to list in sn_pci_controller struct */
- list_add_tail(&pcidev_info->pdi_list,
- &(SN_PCI_CONTROLLER(dev->bus)->pcidev_info));
/* Copy over PIO Mapped Addresses */
for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
- unsigned long start, end, addr;
if (!pcidev_info->pdi_pio_mapped_addr[idx]) {
- pci_addrs[idx] = -1;
continue;
}
@@ -416,73 +184,60 @@ void sn_pci_fixup_slot(struct pci_dev *dev)
end = dev->resource[idx].end;
size = end - start;
if (size == 0) {
- pci_addrs[idx] = -1;
continue;
}
- pci_addrs[idx] = start;
- count++;
addr = pcidev_info->pdi_pio_mapped_addr[idx];
addr = ((addr << 4) >> 4) | __IA64_UNCACHED_OFFSET;
dev->resource[idx].start = addr;
dev->resource[idx].end = addr + size;
+
+ /*
+ * if it's already in the device structure, remove it before
+ * inserting
+ */
+ if (dev->resource[idx].parent && dev->resource[idx].parent->child)
+ release_resource(&dev->resource[idx]);
+
if (dev->resource[idx].flags & IORESOURCE_IO)
- dev->resource[idx].parent = &ioport_resource;
+ insert_resource(&ioport_resource, &dev->resource[idx]);
else
- dev->resource[idx].parent = &iomem_resource;
- }
- /* Create a pci_window in the pci_controller struct for
- * each device resource.
- */
- if (count > 0)
- sn_pci_window_fixup(dev, count, pci_addrs);
-
- /*
- * Using the PROMs values for the PCI host bus, get the Linux
- * PCI host_pci_dev struct and set up host bus linkages
- */
-
- bus_no = (pcidev_info->pdi_slot_host_handle >> 32) & 0xff;
- devfn = pcidev_info->pdi_slot_host_handle & 0xffffffff;
- host_pci_bus = pci_find_bus(segment, bus_no);
- host_pci_dev = pci_get_slot(host_pci_bus, devfn);
-
- pcidev_info->host_pci_dev = host_pci_dev;
- pcidev_info->pdi_linux_pcidev = dev;
- pcidev_info->pdi_host_pcidev_info = SN_PCIDEV_INFO(host_pci_dev);
- bs = SN_PCIBUS_BUSSOFT(dev->bus);
- pcidev_info->pdi_pcibus_info = bs;
-
- if (bs && bs->bs_asic_type < PCIIO_ASIC_MAX_TYPES) {
- SN_PCIDEV_BUSPROVIDER(dev) = sn_pci_provider[bs->bs_asic_type];
- } else {
- SN_PCIDEV_BUSPROVIDER(dev) = &sn_pci_default_provider;
+ insert_resource(&iomem_resource, &dev->resource[idx]);
+ /*
+ * If ROM, set the actual ROM image size, and mark as
+ * shadowed in PROM.
+ */
+ if (idx == PCI_ROM_RESOURCE) {
+ size_t image_size;
+ void __iomem *rom;
+
+ rom = ioremap(pci_resource_start(dev, PCI_ROM_RESOURCE),
+ size + 1);
+ image_size = pci_get_rom_size(dev, rom, size + 1);
+ dev->resource[PCI_ROM_RESOURCE].end =
+ dev->resource[PCI_ROM_RESOURCE].start +
+ image_size - 1;
+ dev->resource[PCI_ROM_RESOURCE].flags |=
+ IORESOURCE_ROM_BIOS_COPY;
+ }
}
- /* Only set up IRQ stuff if this device has a host bus context */
- if (bs && sn_irq_info->irq_irq) {
- pcidev_info->pdi_sn_irq_info = sn_irq_info;
- dev->irq = pcidev_info->pdi_sn_irq_info->irq_irq;
- sn_irq_fixup(dev, sn_irq_info);
- } else {
- pcidev_info->pdi_sn_irq_info = NULL;
- kfree(sn_irq_info);
- }
+ sn_pci_fixup_slot(dev, pcidev_info, sn_irq_info);
}
+EXPORT_SYMBOL(sn_io_slot_fixup);
+
/*
* sn_pci_controller_fixup() - This routine sets up a bus's resources
- * consistent with the Linux PCI abstraction layer.
+ * consistent with the Linux PCI abstraction layer.
*/
-void sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus)
+static void __init
+sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus)
{
- int status = 0;
- int nasid, cnode;
+ s64 status = 0;
struct pci_controller *controller;
- struct sn_pci_controller *sn_controller;
struct pcibus_bussoft *prom_bussoft_ptr;
- struct hubdev_info *hubdev_info;
- void *provider_soft = NULL;
- struct sn_pcibus_provider *provider;
+ struct resource *res;
+ LIST_HEAD(resources);
status = sal_get_pcibus_info((u64) segment, (u64) busnum,
(u64) ia64_tpa(&prom_bussoft_ptr));
@@ -490,240 +245,75 @@ void sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus)
return; /*bus # does not exist */
prom_bussoft_ptr = __va(prom_bussoft_ptr);
- /* Allocate a sn_pci_controller, which has a pci_controller struct
- * as the first member.
- */
- sn_controller = kzalloc(sizeof(struct sn_pci_controller), GFP_KERNEL);
- if (!sn_controller)
- BUG();
- INIT_LIST_HEAD(&sn_controller->pcidev_info);
- controller = &sn_controller->pci_controller;
+ controller = kzalloc(sizeof(*controller), GFP_KERNEL);
+ BUG_ON(!controller);
controller->segment = segment;
- if (bus == NULL) {
- bus = pci_scan_bus(busnum, &pci_root_ops, controller);
- if (bus == NULL)
- goto error_return; /* error, or bus already scanned */
- bus->sysdata = NULL;
- }
-
- if (bus->sysdata)
- goto error_return; /* sysdata already alloc'd */
+ res = kcalloc(2, sizeof(struct resource), GFP_KERNEL);
+ BUG_ON(!res);
/*
- * Per-provider fixup. Copies the contents from prom to local
- * area and links SN_PCIBUS_BUSSOFT().
+ * Temporarily save the prom_bussoft_ptr for use by sn_bus_fixup().
+ * (platform_data will be overwritten later in sn_common_bus_fixup())
*/
-
- if (prom_bussoft_ptr->bs_asic_type >= PCIIO_ASIC_MAX_TYPES)
- goto error_return; /* unsupported asic type */
-
- if (prom_bussoft_ptr->bs_asic_type == PCIIO_ASIC_TYPE_PPB)
- goto error_return; /* no further fixup necessary */
-
- provider = sn_pci_provider[prom_bussoft_ptr->bs_asic_type];
- if (provider == NULL)
- goto error_return; /* no provider registerd for this asic */
-
- bus->sysdata = controller;
- if (provider->bus_fixup)
- provider_soft = (*provider->bus_fixup) (prom_bussoft_ptr, controller);
-
- if (provider_soft == NULL) {
- /* fixup failed or not applicable */
- bus->sysdata = NULL;
- goto error_return;
+ controller->platform_data = prom_bussoft_ptr;
+
+ sn_legacy_pci_window_fixup(res,
+ prom_bussoft_ptr->bs_legacy_io,
+ prom_bussoft_ptr->bs_legacy_mem);
+ pci_add_resource_offset(&resources, &res[0],
+ prom_bussoft_ptr->bs_legacy_io);
+ pci_add_resource_offset(&resources, &res[1],
+ prom_bussoft_ptr->bs_legacy_mem);
+
+ bus = pci_scan_root_bus(NULL, busnum, &pci_root_ops, controller,
+ &resources);
+ if (bus == NULL) {
+ kfree(res);
+ kfree(controller);
}
-
- /*
- * Setup pci_windows for legacy IO and MEM space.
- * (Temporary until ACPI support is in place.)
- */
- controller->window = kcalloc(2, sizeof(struct pci_window), GFP_KERNEL);
- if (controller->window == NULL)
- BUG();
- controller->window[0].offset = prom_bussoft_ptr->bs_legacy_io;
- controller->window[0].resource.name = "legacy_io";
- controller->window[0].resource.flags = IORESOURCE_IO;
- controller->window[0].resource.start = prom_bussoft_ptr->bs_legacy_io;
- controller->window[0].resource.end =
- controller->window[0].resource.start + 0xffff;
- controller->window[0].resource.parent = &ioport_resource;
- controller->window[1].offset = prom_bussoft_ptr->bs_legacy_mem;
- controller->window[1].resource.name = "legacy_mem";
- controller->window[1].resource.flags = IORESOURCE_MEM;
- controller->window[1].resource.start = prom_bussoft_ptr->bs_legacy_mem;
- controller->window[1].resource.end =
- controller->window[1].resource.start + (1024 * 1024) - 1;
- controller->window[1].resource.parent = &iomem_resource;
- controller->windows = 2;
-
- /*
- * Generic bus fixup goes here. Don't reference prom_bussoft_ptr
- * after this point.
- */
-
- PCI_CONTROLLER(bus)->platform_data = provider_soft;
- nasid = NASID_GET(SN_PCIBUS_BUSSOFT(bus)->bs_base);
- cnode = nasid_to_cnodeid(nasid);
- hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
- SN_PCIBUS_BUSSOFT(bus)->bs_xwidget_info =
- &(hubdev_info->hdi_xwidget_info[SN_PCIBUS_BUSSOFT(bus)->bs_xid]);
-
- /*
- * If the node information we obtained during the fixup phase is invalid
- * then set controller->node to -1 (undetermined)
- */
- if (controller->node >= num_online_nodes()) {
- struct pcibus_bussoft *b = SN_PCIBUS_BUSSOFT(bus);
-
- printk(KERN_WARNING "Device ASIC=%u XID=%u PBUSNUM=%u"
- "L_IO=%lx L_MEM=%lx BASE=%lx\n",
- b->bs_asic_type, b->bs_xid, b->bs_persist_busnum,
- b->bs_legacy_io, b->bs_legacy_mem, b->bs_base);
- printk(KERN_WARNING "on node %d but only %d nodes online."
- "Association set to undetermined.\n",
- controller->node, num_online_nodes());
- controller->node = -1;
- }
- return;
-
-error_return:
-
- kfree(sn_controller);
- return;
}
-void sn_bus_store_sysdata(struct pci_dev *dev)
+/*
+ * sn_bus_fixup
+ */
+void
+sn_bus_fixup(struct pci_bus *bus)
{
- struct sysdata_el *element;
+ struct pci_dev *pci_dev = NULL;
+ struct pcibus_bussoft *prom_bussoft_ptr;
- element = kzalloc(sizeof(struct sysdata_el), GFP_KERNEL);
- if (!element) {
- dev_dbg(dev, "%s: out of memory!\n", __FUNCTION__);
- return;
- }
- element->sysdata = SN_PCIDEV_INFO(dev);
- list_add(&element->entry, &sn_sysdata_list);
-}
+ if (!bus->parent) { /* If root bus */
+ prom_bussoft_ptr = PCI_CONTROLLER(bus)->platform_data;
+ if (prom_bussoft_ptr == NULL) {
+ printk(KERN_ERR
+ "sn_bus_fixup: 0x%04x:0x%02x Unable to "
+ "obtain prom_bussoft_ptr\n",
+ pci_domain_nr(bus), bus->number);
+ return;
+ }
+ sn_common_bus_fixup(bus, prom_bussoft_ptr);
+ }
+ list_for_each_entry(pci_dev, &bus->devices, bus_list) {
+ sn_io_slot_fixup(pci_dev);
+ }
-void sn_bus_free_sysdata(void)
-{
- struct sysdata_el *element;
- struct list_head *list;
-
-sn_sysdata_free_start:
- list_for_each(list, &sn_sysdata_list) {
- element = list_entry(list, struct sysdata_el, entry);
- list_del(&element->entry);
- kfree(element->sysdata);
- kfree(element);
- goto sn_sysdata_free_start;
- }
- return;
}
/*
- * Ugly hack to get PCI setup until we have a proper ACPI namespace.
+ * sn_io_init - PROM does not have ACPI support to define nodes or root buses,
+ * so we need to do things the hard way, including initiating the
+ * bus scanning ourselves.
*/
-#define PCI_BUSES_TO_SCAN 256
-
-static int __init sn_pci_init(void)
+void __init sn_io_init(void)
{
- int i = 0;
- int j = 0;
- struct pci_dev *pci_dev = NULL;
- extern void sn_init_cpei_timer(void);
-#ifdef CONFIG_PROC_FS
- extern void register_sn_procfs(void);
-#endif
-
- if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM())
- return 0;
-
- /*
- * prime sn_pci_provider[]. Individial provider init routines will
- * override their respective default entries.
- */
-
- for (i = 0; i < PCIIO_ASIC_MAX_TYPES; i++)
- sn_pci_provider[i] = &sn_pci_default_provider;
-
- pcibr_init_provider();
- tioca_init_provider();
- tioce_init_provider();
+ int i, j;
- /*
- * This is needed to avoid bounce limit checks in the blk layer
- */
- ia64_max_iommu_merge_mask = ~PAGE_MASK;
sn_fixup_ionodes();
- sn_irq_lh_init();
- INIT_LIST_HEAD(&sn_sysdata_list);
- sn_init_cpei_timer();
-
-#ifdef CONFIG_PROC_FS
- register_sn_procfs();
-#endif
/* busses are not known yet ... */
for (i = 0; i <= max_segment_number; i++)
for (j = 0; j <= max_pcibus_number; j++)
sn_pci_controller_fixup(i, j, NULL);
-
- /*
- * Generic Linux PCI Layer has created the pci_bus and pci_dev
- * structures - time for us to add our SN PLatform specific
- * information.
- */
-
- while ((pci_dev =
- pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pci_dev)) != NULL)
- sn_pci_fixup_slot(pci_dev);
-
- sn_ioif_inited = 1; /* sn I/O infrastructure now initialized */
-
- return 0;
-}
-
-/*
- * hubdev_init_node() - Creates the HUB data structure and link them to it's
- * own NODE specific data area.
- */
-void hubdev_init_node(nodepda_t * npda, cnodeid_t node)
-{
-
- struct hubdev_info *hubdev_info;
-
- if (node >= num_online_nodes()) /* Headless/memless IO nodes */
- hubdev_info =
- (struct hubdev_info *)alloc_bootmem_node(NODE_DATA(0),
- sizeof(struct
- hubdev_info));
- else
- hubdev_info =
- (struct hubdev_info *)alloc_bootmem_node(NODE_DATA(node),
- sizeof(struct
- hubdev_info));
- npda->pdinfo = (void *)hubdev_info;
-
}
-
-geoid_t
-cnodeid_get_geoid(cnodeid_t cnode)
-{
-
- struct hubdev_info *hubdev;
-
- hubdev = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
- return hubdev->hdi_geoid;
-
-}
-
-subsys_initcall(sn_pci_init);
-EXPORT_SYMBOL(sn_pci_fixup_slot);
-EXPORT_SYMBOL(sn_pci_unfixup_slot);
-EXPORT_SYMBOL(sn_pci_controller_fixup);
-EXPORT_SYMBOL(sn_bus_store_sysdata);
-EXPORT_SYMBOL(sn_bus_free_sysdata);
diff --git a/arch/ia64/sn/kernel/iomv.c b/arch/ia64/sn/kernel/iomv.c
index 7ce3cdad627..c77ebdf9811 100644
--- a/arch/ia64/sn/kernel/iomv.c
+++ b/arch/ia64/sn/kernel/iomv.c
@@ -1,12 +1,13 @@
-/*
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2000-2003, 2006 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/module.h>
+#include <linux/acpi.h>
#include <asm/io.h>
#include <asm/delay.h>
#include <asm/vga.h>
@@ -15,6 +16,7 @@
#include <asm/sn/pda.h>
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/shub_mmr.h>
+#include <asm/sn/acpi.h>
#define IS_LEGACY_VGA_IOPORT(p) \
(((p) >= 0x3b0 && (p) <= 0x3bb) || ((p) >= 0x3c0 && (p) <= 0x3df))
@@ -24,18 +26,22 @@
* @port: port to convert
*
* Legacy in/out instructions are converted to ld/st instructions
- * on IA64. This routine will convert a port number into a valid
+ * on IA64. This routine will convert a port number into a valid
* SN i/o address. Used by sn_in*() and sn_out*().
*/
+
void *sn_io_addr(unsigned long port)
{
if (!IS_RUNNING_ON_SIMULATOR()) {
if (IS_LEGACY_VGA_IOPORT(port))
- port += vga_console_iobase;
+ return (__ia64_mk_io_addr(port));
/* On sn2, legacy I/O ports don't point at anything */
if (port < (64 * 1024))
return NULL;
- return ((void *)(port | __IA64_UNCACHED_OFFSET));
+ if (SN_ACPI_BASE_SUPPORT())
+ return (__ia64_mk_io_addr(port));
+ else
+ return ((void *)(port | __IA64_UNCACHED_OFFSET));
} else {
/* but the simulator uses them... */
unsigned long addr;
@@ -57,7 +63,7 @@ EXPORT_SYMBOL(sn_io_addr);
/**
* __sn_mmiowb - I/O space memory barrier
*
- * See include/asm-ia64/io.h and Documentation/DocBook/deviceiobook.tmpl
+ * See arch/ia64/include/asm/io.h and Documentation/DocBook/deviceiobook.tmpl
* for details.
*
* On SN2, we wait for the PIO_WRITE_STATUS SHub register to clear.
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index ec37084bdc1..85d09515490 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -5,11 +5,14 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2000-2008 Silicon Graphics, Inc. All Rights Reserved.
*/
#include <linux/irq.h>
#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/rculist.h>
+#include <linux/slab.h>
#include <asm/sn/addrs.h>
#include <asm/sn/arch.h>
#include <asm/sn/intr.h>
@@ -18,18 +21,17 @@
#include <asm/sn/pcidev.h>
#include <asm/sn/shub_mmr.h>
#include <asm/sn/sn_sal.h>
+#include <asm/sn/sn_feature_sets.h>
-static void force_interrupt(int irq);
static void register_intr_pda(struct sn_irq_info *sn_irq_info);
static void unregister_intr_pda(struct sn_irq_info *sn_irq_info);
-int sn_force_interrupt_flag = 1;
extern int sn_ioif_inited;
-static struct list_head **sn_irq_lh;
-static spinlock_t sn_irq_info_lock = SPIN_LOCK_UNLOCKED; /* non-IRQ lock */
+struct list_head **sn_irq_lh;
+static DEFINE_SPINLOCK(sn_irq_info_lock); /* non-IRQ lock */
-static inline u64 sn_intr_alloc(nasid_t local_nasid, int local_widget,
- u64 sn_irq_info,
+u64 sn_intr_alloc(nasid_t local_nasid, int local_widget,
+ struct sn_irq_info *sn_irq_info,
int req_irq, nasid_t req_nasid,
int req_slice)
{
@@ -39,12 +41,13 @@ static inline u64 sn_intr_alloc(nasid_t local_nasid, int local_widget,
SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
(u64) SAL_INTR_ALLOC, (u64) local_nasid,
- (u64) local_widget, (u64) sn_irq_info, (u64) req_irq,
+ (u64) local_widget, __pa(sn_irq_info), (u64) req_irq,
(u64) req_nasid, (u64) req_slice);
+
return ret_stuff.status;
}
-static inline void sn_intr_free(nasid_t local_nasid, int local_widget,
+void sn_intr_free(nasid_t local_nasid, int local_widget,
struct sn_irq_info *sn_irq_info)
{
struct ia64_sal_retval ret_stuff;
@@ -57,142 +60,208 @@ static inline void sn_intr_free(nasid_t local_nasid, int local_widget,
(u64) sn_irq_info->irq_cookie, 0, 0);
}
-static unsigned int sn_startup_irq(unsigned int irq)
+u64 sn_intr_redirect(nasid_t local_nasid, int local_widget,
+ struct sn_irq_info *sn_irq_info,
+ nasid_t req_nasid, int req_slice)
+{
+ struct ia64_sal_retval ret_stuff;
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+
+ SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
+ (u64) SAL_INTR_REDIRECT, (u64) local_nasid,
+ (u64) local_widget, __pa(sn_irq_info),
+ (u64) req_nasid, (u64) req_slice, 0);
+
+ return ret_stuff.status;
+}
+
+static unsigned int sn_startup_irq(struct irq_data *data)
{
return 0;
}
-static void sn_shutdown_irq(unsigned int irq)
+static void sn_shutdown_irq(struct irq_data *data)
{
}
-static void sn_disable_irq(unsigned int irq)
+extern void ia64_mca_register_cpev(int);
+
+static void sn_disable_irq(struct irq_data *data)
{
+ if (data->irq == local_vector_to_irq(IA64_CPE_VECTOR))
+ ia64_mca_register_cpev(0);
}
-static void sn_enable_irq(unsigned int irq)
+static void sn_enable_irq(struct irq_data *data)
{
+ if (data->irq == local_vector_to_irq(IA64_CPE_VECTOR))
+ ia64_mca_register_cpev(data->irq);
}
-static void sn_ack_irq(unsigned int irq)
+static void sn_ack_irq(struct irq_data *data)
{
- u64 event_occurred, mask = 0;
+ u64 event_occurred, mask;
+ unsigned int irq = data->irq & 0xff;
- irq = irq & 0xff;
- event_occurred =
- HUB_L((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED));
+ event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED));
mask = event_occurred & SH_ALL_INT_MASK;
- HUB_S((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS),
- mask);
+ HUB_S((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), mask);
__set_bit(irq, (volatile void *)pda->sn_in_service_ivecs);
- move_irq(irq);
+ irq_move_irq(data);
}
-static void sn_end_irq(unsigned int irq)
+struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
+ nasid_t nasid, int slice)
{
- int ivec;
- u64 event_occurred;
-
- ivec = irq & 0xff;
- if (ivec == SGI_UART_VECTOR) {
- event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR (SH_EVENT_OCCURRED));
- /* If the UART bit is set here, we may have received an
- * interrupt from the UART that the driver missed. To
- * make sure, we IPI ourselves to force us to look again.
- */
- if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) {
- platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR,
- IA64_IPI_DM_INT, 0);
- }
+ int vector;
+ int cpuid;
+#ifdef CONFIG_SMP
+ int cpuphys;
+#endif
+ int64_t bridge;
+ int local_widget, status;
+ nasid_t local_nasid;
+ struct sn_irq_info *new_irq_info;
+ struct sn_pcibus_provider *pci_provider;
+
+ bridge = (u64) sn_irq_info->irq_bridge;
+ if (!bridge) {
+ return NULL; /* irq is not a device interrupt */
}
- __clear_bit(ivec, (volatile void *)pda->sn_in_service_ivecs);
- if (sn_force_interrupt_flag)
- force_interrupt(irq);
-}
-static void sn_irq_info_free(struct rcu_head *head);
+ local_nasid = NASID_GET(bridge);
-static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
-{
- struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
- int cpuid, cpuphys;
+ if (local_nasid & 1)
+ local_widget = TIO_SWIN_WIDGETNUM(bridge);
+ else
+ local_widget = SWIN_WIDGETNUM(bridge);
+ vector = sn_irq_info->irq_irq;
- cpuid = first_cpu(mask);
- cpuphys = cpu_physical_id(cpuid);
+ /* Make use of SAL_INTR_REDIRECT if PROM supports it */
+ status = sn_intr_redirect(local_nasid, local_widget, sn_irq_info, nasid, slice);
+ if (!status) {
+ new_irq_info = sn_irq_info;
+ goto finish_up;
+ }
- list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
- sn_irq_lh[irq], list) {
- u64 bridge;
- int local_widget, status;
- nasid_t local_nasid;
- struct sn_irq_info *new_irq_info;
- struct sn_pcibus_provider *pci_provider;
-
- new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC);
- if (new_irq_info == NULL)
- break;
- memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info));
-
- bridge = (u64) new_irq_info->irq_bridge;
- if (!bridge) {
- kfree(new_irq_info);
- break; /* irq is not a device interrupt */
- }
+ /*
+ * PROM does not support SAL_INTR_REDIRECT, or it failed.
+ * Revert to old method.
+ */
+ new_irq_info = kmemdup(sn_irq_info, sizeof(struct sn_irq_info),
+ GFP_ATOMIC);
+ if (new_irq_info == NULL)
+ return NULL;
+
+ /* Free the old PROM new_irq_info structure */
+ sn_intr_free(local_nasid, local_widget, new_irq_info);
+ unregister_intr_pda(new_irq_info);
+
+ /* allocate a new PROM new_irq_info struct */
+ status = sn_intr_alloc(local_nasid, local_widget,
+ new_irq_info, vector,
+ nasid, slice);
+
+ /* SAL call failed */
+ if (status) {
+ kfree(new_irq_info);
+ return NULL;
+ }
- local_nasid = NASID_GET(bridge);
+ register_intr_pda(new_irq_info);
+ spin_lock(&sn_irq_info_lock);
+ list_replace_rcu(&sn_irq_info->list, &new_irq_info->list);
+ spin_unlock(&sn_irq_info_lock);
+ kfree_rcu(sn_irq_info, rcu);
- if (local_nasid & 1)
- local_widget = TIO_SWIN_WIDGETNUM(bridge);
- else
- local_widget = SWIN_WIDGETNUM(bridge);
- /* Free the old PROM new_irq_info structure */
- sn_intr_free(local_nasid, local_widget, new_irq_info);
- /* Update kernels new_irq_info with new target info */
- unregister_intr_pda(new_irq_info);
+finish_up:
+ /* Update kernels new_irq_info with new target info */
+ cpuid = nasid_slice_to_cpuid(new_irq_info->irq_nasid,
+ new_irq_info->irq_slice);
+ new_irq_info->irq_cpuid = cpuid;
- /* allocate a new PROM new_irq_info struct */
- status = sn_intr_alloc(local_nasid, local_widget,
- __pa(new_irq_info), irq,
- cpuid_to_nasid(cpuid),
- cpuid_to_slice(cpuid));
+ pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type];
- /* SAL call failed */
- if (status) {
- kfree(new_irq_info);
- break;
- }
+ /*
+ * If this represents a line interrupt, target it. If it's
+ * an msi (irq_int_bit < 0), it's already targeted.
+ */
+ if (new_irq_info->irq_int_bit >= 0 &&
+ pci_provider && pci_provider->target_interrupt)
+ (pci_provider->target_interrupt)(new_irq_info);
- new_irq_info->irq_cpuid = cpuid;
- register_intr_pda(new_irq_info);
+#ifdef CONFIG_SMP
+ cpuphys = cpu_physical_id(cpuid);
+ set_irq_affinity_info((vector & 0xff), cpuphys, 0);
+#endif
+
+ return new_irq_info;
+}
- pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type];
- if (pci_provider && pci_provider->target_interrupt)
- (pci_provider->target_interrupt)(new_irq_info);
+static int sn_set_affinity_irq(struct irq_data *data,
+ const struct cpumask *mask, bool force)
+{
+ struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
+ unsigned int irq = data->irq;
+ nasid_t nasid;
+ int slice;
+
+ nasid = cpuid_to_nasid(cpumask_first_and(mask, cpu_online_mask));
+ slice = cpuid_to_slice(cpumask_first_and(mask, cpu_online_mask));
+
+ list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
+ sn_irq_lh[irq], list)
+ (void)sn_retarget_vector(sn_irq_info, nasid, slice);
- spin_lock(&sn_irq_info_lock);
- list_replace_rcu(&sn_irq_info->list, &new_irq_info->list);
- spin_unlock(&sn_irq_info_lock);
- call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
+ return 0;
+}
#ifdef CONFIG_SMP
- set_irq_affinity_info((irq & 0xff), cpuphys, 0);
+void sn_set_err_irq_affinity(unsigned int irq)
+{
+ /*
+ * On systems which support CPU disabling (SHub2), all error interrupts
+ * are targeted at the boot CPU.
+ */
+ if (is_shub2() && sn_prom_feature_available(PRF_CPU_DISABLE_SUPPORT))
+ set_irq_affinity_info(irq, cpu_physical_id(0), 0);
+}
+#else
+void sn_set_err_irq_affinity(unsigned int irq) { }
#endif
- }
+
+static void
+sn_mask_irq(struct irq_data *data)
+{
}
-struct hw_interrupt_type irq_type_sn = {
- .typename = "SN hub",
- .startup = sn_startup_irq,
- .shutdown = sn_shutdown_irq,
- .enable = sn_enable_irq,
- .disable = sn_disable_irq,
- .ack = sn_ack_irq,
- .end = sn_end_irq,
- .set_affinity = sn_set_affinity_irq
+static void
+sn_unmask_irq(struct irq_data *data)
+{
+}
+
+struct irq_chip irq_type_sn = {
+ .name = "SN hub",
+ .irq_startup = sn_startup_irq,
+ .irq_shutdown = sn_shutdown_irq,
+ .irq_enable = sn_enable_irq,
+ .irq_disable = sn_disable_irq,
+ .irq_ack = sn_ack_irq,
+ .irq_mask = sn_mask_irq,
+ .irq_unmask = sn_unmask_irq,
+ .irq_set_affinity = sn_set_affinity_irq
};
+ia64_vector sn_irq_to_vector(int irq)
+{
+ if (irq >= IA64_NUM_VECTORS)
+ return 0;
+ return (ia64_vector)irq;
+}
+
unsigned int sn_local_vector_to_irq(u8 vector)
{
return (CPU_VECTOR_TO_IRQ(smp_processor_id(), vector));
@@ -201,12 +270,13 @@ unsigned int sn_local_vector_to_irq(u8 vector)
void sn_irq_init(void)
{
int i;
- irq_desc_t *base_desc = irq_desc;
+
+ ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR;
+ ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR;
for (i = 0; i < NR_IRQS; i++) {
- if (base_desc[i].handler == &no_irq_type) {
- base_desc[i].handler = &irq_type_sn;
- }
+ if (irq_get_chip(i) == &no_irq_chip)
+ irq_set_chip(i, &irq_type_sn);
}
}
@@ -219,9 +289,8 @@ static void register_intr_pda(struct sn_irq_info *sn_irq_info)
pdacpu(cpu)->sn_last_irq = irq;
}
- if (pdacpu(cpu)->sn_first_irq == 0 || pdacpu(cpu)->sn_first_irq > irq) {
+ if (pdacpu(cpu)->sn_first_irq == 0 || pdacpu(cpu)->sn_first_irq > irq)
pdacpu(cpu)->sn_first_irq = irq;
- }
}
static void unregister_intr_pda(struct sn_irq_info *sn_irq_info)
@@ -266,19 +335,14 @@ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info)
rcu_read_unlock();
}
-static void sn_irq_info_free(struct rcu_head *head)
-{
- struct sn_irq_info *sn_irq_info;
-
- sn_irq_info = container_of(head, struct sn_irq_info, rcu);
- kfree(sn_irq_info);
-}
-
void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
{
nasid_t nasid = sn_irq_info->irq_nasid;
int slice = sn_irq_info->irq_slice;
int cpu = nasid_slice_to_cpuid(nasid, slice);
+#ifdef CONFIG_SMP
+ int cpuphys;
+#endif
pci_dev_get(pci_dev);
sn_irq_info->irq_cpuid = cpu;
@@ -287,9 +351,21 @@ void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
/* link it into the sn_irq[irq] list */
spin_lock(&sn_irq_info_lock);
list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]);
+ reserve_irq_vector(sn_irq_info->irq_irq);
+ if (sn_irq_info->irq_int_bit != -1)
+ irq_set_handler(sn_irq_info->irq_irq, handle_level_irq);
spin_unlock(&sn_irq_info_lock);
- (void)register_intr_pda(sn_irq_info);
+ register_intr_pda(sn_irq_info);
+#ifdef CONFIG_SMP
+ cpuphys = cpu_physical_id(cpu);
+ set_irq_affinity_info(sn_irq_info->irq_irq, cpuphys, 0);
+ /*
+ * Affinity was set by the PROM, prevent it from
+ * being reset by the request_irq() path.
+ */
+ irqd_mark_affinity_was_set(irq_get_irq_data(sn_irq_info->irq_irq));
+#endif
}
void sn_irq_unfixup(struct pci_dev *pci_dev)
@@ -301,7 +377,9 @@ void sn_irq_unfixup(struct pci_dev *pci_dev)
return;
sn_irq_info = SN_PCIDEV_INFO(pci_dev)->pdi_sn_irq_info;
- if (!sn_irq_info || !sn_irq_info->irq_irq) {
+ if (!sn_irq_info)
+ return;
+ if (!sn_irq_info->irq_irq) {
kfree(sn_irq_info);
return;
}
@@ -310,8 +388,11 @@ void sn_irq_unfixup(struct pci_dev *pci_dev)
spin_lock(&sn_irq_info_lock);
list_del_rcu(&sn_irq_info->list);
spin_unlock(&sn_irq_info_lock);
- call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
+ if (list_empty(sn_irq_lh[sn_irq_info->irq_irq]))
+ free_irq_vector(sn_irq_info->irq_irq);
+ kfree_rcu(sn_irq_info, rcu);
pci_dev_put(pci_dev);
+
}
static inline void
@@ -320,22 +401,11 @@ sn_call_force_intr_provider(struct sn_irq_info *sn_irq_info)
struct sn_pcibus_provider *pci_provider;
pci_provider = sn_pci_provider[sn_irq_info->irq_bridge_type];
- if (pci_provider && pci_provider->force_interrupt)
- (*pci_provider->force_interrupt)(sn_irq_info);
-}
-static void force_interrupt(int irq)
-{
- struct sn_irq_info *sn_irq_info;
-
- if (!sn_ioif_inited)
- return;
-
- rcu_read_lock();
- list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[irq], list)
- sn_call_force_intr_provider(sn_irq_info);
-
- rcu_read_unlock();
+ /* Don't force an interrupt if the irq has been disabled */
+ if (!irqd_irq_disabled(irq_get_irq_data(sn_irq_info->irq_irq)) &&
+ pci_provider && pci_provider->force_interrupt)
+ (*pci_provider->force_interrupt)(sn_irq_info);
}
/*
@@ -350,16 +420,13 @@ static void force_interrupt(int irq)
static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info)
{
u64 regval;
- int irr_reg_num;
- int irr_bit;
- u64 irr_reg;
struct pcidev_info *pcidev_info;
struct pcibus_info *pcibus_info;
/*
* Bridge types attached to TIO (anything but PIC) do not need this WAR
* since they do not target Shub II interrupt registers. If that
- * ever changes, this check needs to accomodate.
+ * ever changes, this check needs to accommodate.
*/
if (sn_irq_info->irq_bridge_type != PCIIO_ASIC_TYPE_PIC)
return;
@@ -373,23 +440,7 @@ static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info)
pdi_pcibus_info;
regval = pcireg_intr_status_get(pcibus_info);
- irr_reg_num = irq_to_vector(irq) / 64;
- irr_bit = irq_to_vector(irq) % 64;
- switch (irr_reg_num) {
- case 0:
- irr_reg = ia64_getreg(_IA64_REG_CR_IRR0);
- break;
- case 1:
- irr_reg = ia64_getreg(_IA64_REG_CR_IRR1);
- break;
- case 2:
- irr_reg = ia64_getreg(_IA64_REG_CR_IRR2);
- break;
- case 3:
- irr_reg = ia64_getreg(_IA64_REG_CR_IRR3);
- break;
- }
- if (!test_bit(irr_bit, &irr_reg)) {
+ if (!ia64_get_irr(irq_to_vector(irq))) {
if (!test_bit(irq, pda->sn_in_service_ivecs)) {
regval &= 0xff;
if (sn_irq_info->irq_int_bit & regval &
@@ -419,7 +470,7 @@ void sn_lb_int_war_check(void)
rcu_read_unlock();
}
-void sn_irq_lh_init(void)
+void __init sn_irq_lh_init(void)
{
int i;
@@ -434,5 +485,4 @@ void sn_irq_lh_init(void)
INIT_LIST_HEAD(sn_irq_lh[i]);
}
-
}
diff --git a/arch/ia64/sn/kernel/klconflib.c b/arch/ia64/sn/kernel/klconflib.c
index 0f11a3299cd..87682b48ef8 100644
--- a/arch/ia64/sn/kernel/klconflib.c
+++ b/arch/ia64/sn/kernel/klconflib.c
@@ -78,31 +78,30 @@ format_module_id(char *buffer, moduleid_t m, int fmt)
position = MODULE_GET_BPOS(m);
if ((fmt == MODULE_FORMAT_BRIEF) || (fmt == MODULE_FORMAT_LCD)) {
- /* Brief module number format, eg. 002c15 */
+ /* Brief module number format, eg. 002c15 */
- /* Decompress the rack number */
- *buffer++ = '0' + RACK_GET_CLASS(rack);
- *buffer++ = '0' + RACK_GET_GROUP(rack);
- *buffer++ = '0' + RACK_GET_NUM(rack);
+ /* Decompress the rack number */
+ *buffer++ = '0' + RACK_GET_CLASS(rack);
+ *buffer++ = '0' + RACK_GET_GROUP(rack);
+ *buffer++ = '0' + RACK_GET_NUM(rack);
- /* Add the brick type */
- *buffer++ = brickchar;
+ /* Add the brick type */
+ *buffer++ = brickchar;
}
else if (fmt == MODULE_FORMAT_LONG) {
- /* Fuller hwgraph format, eg. rack/002/bay/15 */
+ /* Fuller hwgraph format, eg. rack/002/bay/15 */
- strcpy(buffer, "rack" "/"); buffer += strlen(buffer);
+ strcpy(buffer, "rack" "/"); buffer += strlen(buffer);
- *buffer++ = '0' + RACK_GET_CLASS(rack);
- *buffer++ = '0' + RACK_GET_GROUP(rack);
- *buffer++ = '0' + RACK_GET_NUM(rack);
+ *buffer++ = '0' + RACK_GET_CLASS(rack);
+ *buffer++ = '0' + RACK_GET_GROUP(rack);
+ *buffer++ = '0' + RACK_GET_NUM(rack);
- strcpy(buffer, "/" "bay" "/"); buffer += strlen(buffer);
+ strcpy(buffer, "/" "bay" "/"); buffer += strlen(buffer);
}
/* Add the bay position, using at least two digits */
if (position < 10)
- *buffer++ = '0';
+ *buffer++ = '0';
sprintf(buffer, "%d", position);
-
}
diff --git a/arch/ia64/sn/kernel/mca.c b/arch/ia64/sn/kernel/mca.c
index 9ab684d1bb5..27793f7aa99 100644
--- a/arch/ia64/sn/kernel/mca.c
+++ b/arch/ia64/sn/kernel/mca.c
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc. All Rights Reserved.
*/
#include <linux/types.h>
@@ -98,8 +98,9 @@ sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata,
while (*sn_oemdata_size > sn_oemdata_bufsize) {
u8 *newbuf = vmalloc(*sn_oemdata_size);
if (!newbuf) {
+ mutex_unlock(&sn_oemdata_mutex);
printk(KERN_ERR "%s: unable to extend sn_oemdata\n",
- __FUNCTION__);
+ __func__);
return 1;
}
vfree(*sn_oemdata);
@@ -137,7 +138,8 @@ int sn_salinfo_platform_oemdata(const u8 *sect_header, u8 **oemdata, u64 *oemdat
static int __init sn_salinfo_init(void)
{
- salinfo_platform_oemdata = &sn_salinfo_platform_oemdata;
+ if (ia64_platform_is("sn2"))
+ salinfo_platform_oemdata = &sn_salinfo_platform_oemdata;
return 0;
}
diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c
new file mode 100644
index 00000000000..afc58d2799a
--- /dev/null
+++ b/arch/ia64/sn/kernel/msi_sn.c
@@ -0,0 +1,238 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/irq.h>
+#include <linux/pci.h>
+#include <linux/cpumask.h>
+#include <linux/msi.h>
+#include <linux/slab.h>
+
+#include <asm/sn/addrs.h>
+#include <asm/sn/intr.h>
+#include <asm/sn/pcibus_provider_defs.h>
+#include <asm/sn/pcidev.h>
+#include <asm/sn/nodepda.h>
+
+struct sn_msi_info {
+ u64 pci_addr;
+ struct sn_irq_info *sn_irq_info;
+};
+
+static struct sn_msi_info sn_msi_info[NR_IRQS];
+
+static struct irq_chip sn_msi_chip;
+
+void sn_teardown_msi_irq(unsigned int irq)
+{
+ nasid_t nasid;
+ int widget;
+ struct pci_dev *pdev;
+ struct pcidev_info *sn_pdev;
+ struct sn_irq_info *sn_irq_info;
+ struct pcibus_bussoft *bussoft;
+ struct sn_pcibus_provider *provider;
+
+ sn_irq_info = sn_msi_info[irq].sn_irq_info;
+ if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0)
+ return;
+
+ sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
+ pdev = sn_pdev->pdi_linux_pcidev;
+ provider = SN_PCIDEV_BUSPROVIDER(pdev);
+
+ (*provider->dma_unmap)(pdev,
+ sn_msi_info[irq].pci_addr,
+ PCI_DMA_FROMDEVICE);
+ sn_msi_info[irq].pci_addr = 0;
+
+ bussoft = SN_PCIDEV_BUSSOFT(pdev);
+ nasid = NASID_GET(bussoft->bs_base);
+ widget = (nasid & 1) ?
+ TIO_SWIN_WIDGETNUM(bussoft->bs_base) :
+ SWIN_WIDGETNUM(bussoft->bs_base);
+
+ sn_intr_free(nasid, widget, sn_irq_info);
+ sn_msi_info[irq].sn_irq_info = NULL;
+
+ destroy_irq(irq);
+}
+
+int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry)
+{
+ struct msi_msg msg;
+ int widget;
+ int status;
+ nasid_t nasid;
+ u64 bus_addr;
+ struct sn_irq_info *sn_irq_info;
+ struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(pdev);
+ struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
+ int irq;
+
+ if (!entry->msi_attrib.is_64)
+ return -EINVAL;
+
+ if (bussoft == NULL)
+ return -EINVAL;
+
+ if (provider == NULL || provider->dma_map_consistent == NULL)
+ return -EINVAL;
+
+ irq = create_irq();
+ if (irq < 0)
+ return irq;
+
+ /*
+ * Set up the vector plumbing. Let the prom (via sn_intr_alloc)
+ * decide which cpu to direct this msi at by default.
+ */
+
+ nasid = NASID_GET(bussoft->bs_base);
+ widget = (nasid & 1) ?
+ TIO_SWIN_WIDGETNUM(bussoft->bs_base) :
+ SWIN_WIDGETNUM(bussoft->bs_base);
+
+ sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
+ if (! sn_irq_info) {
+ destroy_irq(irq);
+ return -ENOMEM;
+ }
+
+ status = sn_intr_alloc(nasid, widget, sn_irq_info, irq, -1, -1);
+ if (status) {
+ kfree(sn_irq_info);
+ destroy_irq(irq);
+ return -ENOMEM;
+ }
+
+ sn_irq_info->irq_int_bit = -1; /* mark this as an MSI irq */
+ sn_irq_fixup(pdev, sn_irq_info);
+
+ /* Prom probably should fill these in, but doesn't ... */
+ sn_irq_info->irq_bridge_type = bussoft->bs_asic_type;
+ sn_irq_info->irq_bridge = (void *)bussoft->bs_base;
+
+ /*
+ * Map the xio address into bus space
+ */
+ bus_addr = (*provider->dma_map_consistent)(pdev,
+ sn_irq_info->irq_xtalkaddr,
+ sizeof(sn_irq_info->irq_xtalkaddr),
+ SN_DMA_MSI|SN_DMA_ADDR_XIO);
+ if (! bus_addr) {
+ sn_intr_free(nasid, widget, sn_irq_info);
+ kfree(sn_irq_info);
+ destroy_irq(irq);
+ return -ENOMEM;
+ }
+
+ sn_msi_info[irq].sn_irq_info = sn_irq_info;
+ sn_msi_info[irq].pci_addr = bus_addr;
+
+ msg.address_hi = (u32)(bus_addr >> 32);
+ msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);
+
+ /*
+ * In the SN platform, bit 16 is a "send vector" bit which
+ * must be present in order to move the vector through the system.
+ */
+ msg.data = 0x100 + irq;
+
+ irq_set_msi_desc(irq, entry);
+ write_msi_msg(irq, &msg);
+ irq_set_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq);
+
+ return 0;
+}
+
+#ifdef CONFIG_SMP
+static int sn_set_msi_irq_affinity(struct irq_data *data,
+ const struct cpumask *cpu_mask, bool force)
+{
+ struct msi_msg msg;
+ int slice;
+ nasid_t nasid;
+ u64 bus_addr;
+ struct pci_dev *pdev;
+ struct pcidev_info *sn_pdev;
+ struct sn_irq_info *sn_irq_info;
+ struct sn_irq_info *new_irq_info;
+ struct sn_pcibus_provider *provider;
+ unsigned int cpu, irq = data->irq;
+
+ cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
+ sn_irq_info = sn_msi_info[irq].sn_irq_info;
+ if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0)
+ return -1;
+
+ /*
+ * Release XIO resources for the old MSI PCI address
+ */
+
+ get_cached_msi_msg(irq, &msg);
+ sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
+ pdev = sn_pdev->pdi_linux_pcidev;
+ provider = SN_PCIDEV_BUSPROVIDER(pdev);
+
+ bus_addr = (u64)(msg.address_hi) << 32 | (u64)(msg.address_lo);
+ (*provider->dma_unmap)(pdev, bus_addr, PCI_DMA_FROMDEVICE);
+ sn_msi_info[irq].pci_addr = 0;
+
+ nasid = cpuid_to_nasid(cpu);
+ slice = cpuid_to_slice(cpu);
+
+ new_irq_info = sn_retarget_vector(sn_irq_info, nasid, slice);
+ sn_msi_info[irq].sn_irq_info = new_irq_info;
+ if (new_irq_info == NULL)
+ return -1;
+
+ /*
+ * Map the xio address into bus space
+ */
+
+ bus_addr = (*provider->dma_map_consistent)(pdev,
+ new_irq_info->irq_xtalkaddr,
+ sizeof(new_irq_info->irq_xtalkaddr),
+ SN_DMA_MSI|SN_DMA_ADDR_XIO);
+
+ sn_msi_info[irq].pci_addr = bus_addr;
+ msg.address_hi = (u32)(bus_addr >> 32);
+ msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);
+
+ write_msi_msg(irq, &msg);
+ cpumask_copy(data->affinity, cpu_mask);
+
+ return 0;
+}
+#endif /* CONFIG_SMP */
+
+static void sn_ack_msi_irq(struct irq_data *data)
+{
+ irq_move_irq(data);
+ ia64_eoi();
+}
+
+static int sn_msi_retrigger_irq(struct irq_data *data)
+{
+ unsigned int vector = data->irq;
+ ia64_resend_irq(vector);
+
+ return 1;
+}
+
+static struct irq_chip sn_msi_chip = {
+ .name = "PCI-MSI",
+ .irq_mask = mask_msi_irq,
+ .irq_unmask = unmask_msi_irq,
+ .irq_ack = sn_ack_msi_irq,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = sn_set_msi_irq_affinity,
+#endif
+ .irq_retrigger = sn_msi_retrigger_irq,
+};
diff --git a/arch/ia64/sn/kernel/pio_phys.S b/arch/ia64/sn/kernel/pio_phys.S
new file mode 100644
index 00000000000..3c7d48d6ecb
--- /dev/null
+++ b/arch/ia64/sn/kernel/pio_phys.S
@@ -0,0 +1,71 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved.
+ *
+ * This file contains macros used to access MMR registers via
+ * uncached physical addresses.
+ * pio_phys_read_mmr - read an MMR
+ * pio_phys_write_mmr - write an MMR
+ * pio_atomic_phys_write_mmrs - atomically write 1 or 2 MMRs with psr.ic=0
+ * Second MMR will be skipped if address is NULL
+ *
+ * Addresses passed to these routines should be uncached physical addresses
+ * ie., 0x80000....
+ */
+
+
+
+#include <asm/asmmacro.h>
+#include <asm/page.h>
+
+GLOBAL_ENTRY(pio_phys_read_mmr)
+ .prologue
+ .regstk 1,0,0,0
+ .body
+ mov r2=psr
+ rsm psr.i | psr.dt
+ ;;
+ srlz.d
+ ld8.acq r8=[r32]
+ ;;
+ mov psr.l=r2;;
+ srlz.d
+ br.ret.sptk.many rp
+END(pio_phys_read_mmr)
+
+GLOBAL_ENTRY(pio_phys_write_mmr)
+ .prologue
+ .regstk 2,0,0,0
+ .body
+ mov r2=psr
+ rsm psr.i | psr.dt
+ ;;
+ srlz.d
+ st8.rel [r32]=r33
+ ;;
+ mov psr.l=r2;;
+ srlz.d
+ br.ret.sptk.many rp
+END(pio_phys_write_mmr)
+
+GLOBAL_ENTRY(pio_atomic_phys_write_mmrs)
+ .prologue
+ .regstk 4,0,0,0
+ .body
+ mov r2=psr
+ cmp.ne p9,p0=r34,r0;
+ rsm psr.i | psr.dt | psr.ic
+ ;;
+ srlz.d
+ st8.rel [r32]=r33
+(p9) st8.rel [r34]=r35
+ ;;
+ mov psr.l=r2;;
+ srlz.d
+ br.ret.sptk.many rp
+END(pio_atomic_phys_write_mmrs)
+
+
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index e510dce9971..53b01b8e2f1 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -3,17 +3,16 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1999,2001-2006 Silicon Graphics, Inc. All rights reserved.
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/kdev_t.h>
#include <linux/string.h>
-#include <linux/tty.h>
+#include <linux/screen_info.h>
#include <linux/console.h>
#include <linux/timex.h>
#include <linux/sched.h>
@@ -26,7 +25,6 @@
#include <linux/interrupt.h>
#include <linux/acpi.h>
#include <linux/compiler.h>
-#include <linux/sched.h>
#include <linux/root_dev.h>
#include <linux/nodemask.h>
#include <linux/pm.h>
@@ -35,9 +33,9 @@
#include <asm/io.h>
#include <asm/sal.h>
#include <asm/machvec.h>
-#include <asm/system.h>
#include <asm/processor.h>
#include <asm/vga.h>
+#include <asm/setup.h>
#include <asm/sn/arch.h>
#include <asm/sn/addrs.h>
#include <asm/sn/pda.h>
@@ -66,7 +64,6 @@ extern void sn_timer_init(void);
extern unsigned long last_time_offset;
extern void (*ia64_mark_idle) (int);
extern void snidle(int);
-extern unsigned char acpi_kbd_controller_present;
unsigned long sn_rtc_cycles_per_second;
EXPORT_SYMBOL(sn_rtc_cycles_per_second);
@@ -74,7 +71,7 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second);
DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
EXPORT_PER_CPU_SYMBOL(__sn_hub_info);
-DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_NUMNODES]);
+DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]);
EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid);
DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda);
@@ -125,20 +122,6 @@ struct screen_info sn_screen_info = {
};
/*
- * This is here so we can use the CMOS detection in ide-probe.c to
- * determine what drives are present. In theory, we don't need this
- * as the auto-detection could be done via ide-probe.c:do_probe() but
- * in practice that would be much slower, which is painful when
- * running in the simulator. Note that passing zeroes in DRIVE_INFO
- * is sufficient (the IDE driver will autodetect the drive geometry).
- */
-#ifdef CONFIG_IA64_GENERIC
-extern char drive_info[4 * 16];
-#else
-char drive_info[4 * 16];
-#endif
-
-/*
* This routine can only be used during init, since
* smp_boot_data is an init data structure.
* We have to use smp_boot_data.cpu_phys_id to find
@@ -152,7 +135,7 @@ static int __init pxm_to_nasid(int pxm)
int i;
int nid;
- nid = pxm_to_nid_map[pxm];
+ nid = pxm_to_node(pxm);
for (i = 0; i < num_node_memblks; i++) {
if (node_memblk[i].nid == nid) {
return NASID_GET(node_memblk[i].start_paddr);
@@ -182,7 +165,7 @@ void __init early_sn_setup(void)
* IO on SN2 is done via SAL calls, early_printk won't work without this.
*
* This code duplicates some of the ACPI table parsing that is in efi.c & sal.c.
- * Any changes to those file may have to be made hereas well.
+ * Any changes to those file may have to be made here as well.
*/
efi_systab = (efi_system_table_t *) __va(ia64_boot_param->efi_systab);
config_tables = __va(efi_systab->tables);
@@ -209,7 +192,7 @@ void __init early_sn_setup(void)
}
extern int platform_intr_list[];
-static int __initdata shub_1_1_found = 0;
+static int shub_1_1_found;
/*
* sn_check_for_wars
@@ -217,7 +200,7 @@ static int __initdata shub_1_1_found = 0;
* Set flag for enabling shub specific wars
*/
-static inline int __init is_shub_1_1(int nasid)
+static inline int is_shub_1_1(int nasid)
{
unsigned long id;
int rev;
@@ -229,7 +212,7 @@ static inline int __init is_shub_1_1(int nasid)
return rev <= 2;
}
-static void __init sn_check_for_wars(void)
+static void sn_check_for_wars(void)
{
int cnode;
@@ -258,7 +241,7 @@ static void __init sn_check_for_wars(void)
* Note: This stuff is duped here because Altix requires the PCDP to
* locate a usable VGA device due to lack of proper ACPI support. Structures
* could be used from drivers/firmware/pcdp.h, but it was decided that moving
- * this file to a more public location just for Altix use was undesireable.
+ * this file to a more public location just for Altix use was undesirable.
*/
struct hcdp_uart_desc {
@@ -330,6 +313,7 @@ struct pcdp_vga_device {
#define PCDP_PCI_TRANS_IOPORT 0x02
#define PCDP_PCI_TRANS_MMIO 0x01
+#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
static void
sn_scan_pcdp(void)
{
@@ -339,10 +323,11 @@ sn_scan_pcdp(void)
struct pcdp_interface_pci if_pci;
extern struct efi efi;
- pcdp = efi.hcdp;
- if (! pcdp)
+ if (efi.hcdp == EFI_INVALID_TABLE_ADDR)
return; /* no hcdp/pcdp table */
+ pcdp = __va(efi.hcdp);
+
if (pcdp->rev < 3)
return; /* only support PCDP (rev >= 3) */
@@ -361,8 +346,7 @@ sn_scan_pcdp(void)
continue; /* not PCI interconnect */
if (if_pci.translation & PCDP_PCI_TRANS_IOPORT)
- vga_console_iobase =
- if_pci.ioport_tra | __IA64_UNCACHED_OFFSET;
+ vga_console_iobase = if_pci.ioport_tra;
if (if_pci.translation & PCDP_PCI_TRANS_MMIO)
vga_console_membase =
@@ -371,6 +355,9 @@ sn_scan_pcdp(void)
break; /* once we find the primary, we're done */
}
}
+#endif
+
+static unsigned long sn2_rtc_initial;
/**
* sn_setup - SN platform setup routine
@@ -386,10 +373,21 @@ void __init sn_setup(char **cmdline_p)
u32 version = sn_sal_rev();
extern void sn_cpu_init(void);
+ sn2_rtc_initial = rtc_time();
ia64_sn_plat_set_error_handling_features(); // obsolete
ia64_sn_set_os_feature(OSF_MCA_SLV_TO_OS_INIT_SLV);
ia64_sn_set_os_feature(OSF_FEAT_LOG_SBES);
+ /*
+ * Note: The calls to notify the PROM of ACPI and PCI Segment
+ * support must be done prior to acpi_load_tables(), as
+ * an ACPI capable PROM will rebuild the DSDT as result
+ * of the call.
+ */
+ ia64_sn_set_os_feature(OSF_PCISEGMENT_ENABLE);
+ ia64_sn_set_os_feature(OSF_ACPI_ENABLE);
+ /* Load the new DSDT and SSDT tables into the global table list. */
+ acpi_table_init();
#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
/*
@@ -414,6 +412,17 @@ void __init sn_setup(char **cmdline_p)
if (! vga_console_membase)
sn_scan_pcdp();
+ /*
+ * Setup legacy IO space.
+ * vga_console_iobase maps to PCI IO Space address 0 on the
+ * bus containing the VGA console.
+ */
+ if (vga_console_iobase) {
+ io_space[0].mmio_base =
+ (unsigned long) ioremap(vga_console_iobase, 0);
+ io_space[0].sparse = 0;
+ }
+
if (vga_console_membase) {
/* usable vga ... make tty0 the preferred default console */
if (!strstr(*cmdline_p, "console="))
@@ -437,19 +446,6 @@ void __init sn_setup(char **cmdline_p)
*/
build_cnode_tables();
- /*
- * Old PROMs do not provide an ACPI FADT. Disable legacy keyboard
- * support here so we don't have to listen to failed keyboard probe
- * messages.
- */
- if (version <= 0x0209 && acpi_kbd_controller_present) {
- printk(KERN_INFO "Disabling legacy keyboard support as prom "
- "is too old and doesn't provide FADT\n");
- acpi_kbd_controller_present = 0;
- }
-
- printk("SGI SAL version %x.%02x\n", version >> 8, version & 0x00FF);
-
status =
ia64_sal_freq_base(SAL_FREQ_BASE_REALTIME_CLOCK, &ticks_per_sec,
&drift);
@@ -463,6 +459,8 @@ void __init sn_setup(char **cmdline_p)
platform_intr_list[ACPI_INTERRUPT_CPEI] = IA64_CPE_VECTOR;
+ printk("SGI SAL version %x.%02x\n", version >> 8, version & 0x00FF);
+
/*
* we set the default root device to /dev/hda
* to make simulation easy
@@ -496,6 +494,7 @@ void __init sn_setup(char **cmdline_p)
* for sn.
*/
pm_power_off = ia64_sn_power_down;
+ current->thread.flags |= IA64_THREAD_MIGRATION;
}
/**
@@ -508,12 +507,11 @@ static void __init sn_init_pdas(char **cmdline_p)
cnodeid_t cnode;
/*
- * Allocate & initalize the nodepda for each node.
+ * Allocate & initialize the nodepda for each node.
*/
for_each_online_node(cnode) {
nodepdaindr[cnode] =
alloc_bootmem_node(NODE_DATA(cnode), sizeof(nodepda_t));
- memset(nodepdaindr[cnode], 0, sizeof(nodepda_t));
memset(nodepdaindr[cnode]->phys_cpuid, -1,
sizeof(nodepdaindr[cnode]->phys_cpuid));
spin_lock_init(&nodepdaindr[cnode]->ptc_lock);
@@ -522,11 +520,9 @@ static void __init sn_init_pdas(char **cmdline_p)
/*
* Allocate & initialize nodepda for TIOs. For now, put them on node 0.
*/
- for (cnode = num_online_nodes(); cnode < num_cnodes; cnode++) {
+ for (cnode = num_online_nodes(); cnode < num_cnodes; cnode++)
nodepdaindr[cnode] =
alloc_bootmem_node(NODE_DATA(0), sizeof(nodepda_t));
- memset(nodepdaindr[cnode], 0, sizeof(nodepda_t));
- }
/*
* Now copy the array of nodepda pointers to each nodepda.
@@ -562,7 +558,7 @@ static void __init sn_init_pdas(char **cmdline_p)
* Also sets up a few fields in the nodepda. Also known as
* platform_cpu_init() by the ia64 machvec code.
*/
-void __init sn_cpu_init(void)
+void sn_cpu_init(void)
{
int cpuid;
int cpuphyid;
@@ -571,24 +567,43 @@ void __init sn_cpu_init(void)
int slice;
int cnode;
int i;
- static int wars_have_been_checked;
+ static int wars_have_been_checked, set_cpu0_number;
- if (smp_processor_id() == 0 && IS_MEDUSA()) {
+ cpuid = smp_processor_id();
+ if (cpuid == 0 && IS_MEDUSA()) {
if (ia64_sn_is_fake_prom())
sn_prom_type = 2;
else
sn_prom_type = 1;
- printk("Running on medusa with %s PROM\n", (sn_prom_type == 1) ? "real" : "fake");
+ printk(KERN_INFO "Running on medusa with %s PROM\n",
+ (sn_prom_type == 1) ? "real" : "fake");
}
memset(pda, 0, sizeof(pda));
- if (ia64_sn_get_sn_info(0, &sn_hub_info->shub2, &sn_hub_info->nasid_bitmask, &sn_hub_info->nasid_shift,
- &sn_system_size, &sn_sharing_domain_size, &sn_partition_id,
- &sn_coherency_id, &sn_region_size))
+ if (ia64_sn_get_sn_info(0, &sn_hub_info->shub2,
+ &sn_hub_info->nasid_bitmask,
+ &sn_hub_info->nasid_shift,
+ &sn_system_size, &sn_sharing_domain_size,
+ &sn_partition_id, &sn_coherency_id,
+ &sn_region_size))
BUG();
sn_hub_info->as_shift = sn_hub_info->nasid_shift - 2;
/*
+ * Don't check status. The SAL call is not supported on all PROMs
+ * but a failure is harmless.
+ * Architecturally, cpu_init is always called twice on cpu 0. We
+ * should set cpu_number on cpu 0 once.
+ */
+ if (cpuid == 0) {
+ if (!set_cpu0_number) {
+ (void) ia64_sn_set_cpu_number(cpuid);
+ set_cpu0_number = 1;
+ }
+ } else
+ (void) ia64_sn_set_cpu_number(cpuid);
+
+ /*
* The boot cpu makes this call again after platform initialization is
* complete.
*/
@@ -599,7 +614,6 @@ void __init sn_cpu_init(void)
if (ia64_sn_get_prom_feature_set(i, &sn_prom_features[i]) != 0)
break;
- cpuid = smp_processor_id();
cpuphyid = get_sapicid();
if (ia64_sn_get_sapic_info(cpuphyid, &nasid, &subnode, &slice))
@@ -654,7 +668,8 @@ void __init sn_cpu_init(void)
SH2_PIO_WRITE_STATUS_1, SH2_PIO_WRITE_STATUS_3};
u64 *pio;
pio = is_shub1() ? pio1 : pio2;
- pda->pio_write_status_addr = (volatile unsigned long *) LOCAL_MMR_ADDR(pio[slice]);
+ pda->pio_write_status_addr =
+ (volatile unsigned long *)GLOBAL_MMR_ADDR(nasid, pio[slice]);
pda->pio_write_status_val = is_shub1() ? SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK : 0;
}
@@ -695,7 +710,7 @@ void __init build_cnode_tables(void)
* cnode == node for all C & M bricks.
*/
for_each_online_node(node) {
- nasid = pxm_to_nasid(nid_to_pxm_map[node]);
+ nasid = pxm_to_nasid(node_to_pxm(node));
sn_cnodeid_to_nasid[node] = nasid;
physical_node_map[nasid] = node;
}
@@ -716,8 +731,8 @@ void __init build_cnode_tables(void)
for_each_online_node(node) {
kl_config_hdr_t *klgraph_header;
nasid = cnodeid_to_nasid(node);
- if ((klgraph_header = ia64_sn_get_klconfig_addr(nasid)) == NULL)
- BUG();
+ klgraph_header = ia64_sn_get_klconfig_addr(nasid);
+ BUG_ON(klgraph_header == NULL);
brd = NODE_OFFSET_TO_LBOARD(nasid, klgraph_header->ch_board_info);
while (brd) {
if (board_needs_cnode(brd->brd_type) && physical_node_map[brd->brd_nasid] < 0) {
@@ -734,7 +749,7 @@ nasid_slice_to_cpuid(int nasid, int slice)
{
long cpu;
- for (cpu=0; cpu < NR_CPUS; cpu++)
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++)
if (cpuid_to_nasid(cpu) == nasid &&
cpuid_to_slice(cpu) == slice)
return cpu;
@@ -748,5 +763,13 @@ int sn_prom_feature_available(int id)
return 0;
return test_bit(id, sn_prom_features);
}
+
+void
+sn_kernel_launch_event(void)
+{
+ /* ignore status until we understand possible failure, if any*/
+ if (ia64_sn_kernel_launch_event())
+ printk(KERN_ERR "KEXEC is not supported in this PROM, Please update the PROM.\n");
+}
EXPORT_SYMBOL(sn_prom_feature_available);
diff --git a/arch/ia64/sn/kernel/sn2/Makefile b/arch/ia64/sn/kernel/sn2/Makefile
index 170bde4549d..3d09108d427 100644
--- a/arch/ia64/sn/kernel/sn2/Makefile
+++ b/arch/ia64/sn/kernel/sn2/Makefile
@@ -9,5 +9,7 @@
# sn2 specific kernel files
#
+ccflags-y := -Iarch/ia64/sn/include
+
obj-y += cache.o io.o ptc_deadlock.o sn2_smp.o sn_proc_fs.o \
prominfo_proc.o timer.o timer_interrupt.o sn_hwperf.o
diff --git a/arch/ia64/sn/kernel/sn2/cache.c b/arch/ia64/sn/kernel/sn2/cache.c
index bc3cfa17cd0..2862cb33026 100644
--- a/arch/ia64/sn/kernel/sn2/cache.c
+++ b/arch/ia64/sn/kernel/sn2/cache.c
@@ -3,11 +3,12 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2001-2003, 2006 Silicon Graphics, Inc. All rights reserved.
*
*/
#include <linux/module.h>
#include <asm/pgalloc.h>
+#include <asm/sn/arch.h>
/**
* sn_flush_all_caches - flush a range of address from all caches (incl. L4)
@@ -17,18 +18,24 @@
* Flush a range of addresses from all caches including L4.
* All addresses fully or partially contained within
* @flush_addr to @flush_addr + @bytes are flushed
- * from the all caches.
+ * from all caches.
*/
void
sn_flush_all_caches(long flush_addr, long bytes)
{
- flush_icache_range(flush_addr, flush_addr+bytes);
+ unsigned long addr = flush_addr;
+
+ /* SHub1 requires a cached address */
+ if (is_shub1() && (addr & RGN_BITS) == RGN_BASE(RGN_UNCACHED))
+ addr = (addr - RGN_BASE(RGN_UNCACHED)) + RGN_BASE(RGN_KERNEL);
+
+ flush_icache_range(addr, addr + bytes);
/*
* The last call may have returned before the caches
* were actually flushed, so we call it again to make
* sure.
*/
- flush_icache_range(flush_addr, flush_addr+bytes);
+ flush_icache_range(addr, addr + bytes);
mb();
}
EXPORT_SYMBOL(sn_flush_all_caches);
diff --git a/arch/ia64/sn/kernel/sn2/prominfo_proc.c b/arch/ia64/sn/kernel/sn2/prominfo_proc.c
index 81c63b2f8ae..ec4de2b0965 100644
--- a/arch/ia64/sn/kernel/sn2/prominfo_proc.c
+++ b/arch/ia64/sn/kernel/sn2/prominfo_proc.c
@@ -3,17 +3,16 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1999,2001-2004 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (C) 1999,2001-2004, 2006 Silicon Graphics, Inc. All Rights Reserved.
*
* Module to export the system's Firmware Interface Tables, including
* PROM revision numbers and banners, in /proc
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/nodemask.h>
-#include <asm/system.h>
#include <asm/io.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/sn_cpuid.h>
@@ -103,18 +102,18 @@ get_fit_entry(unsigned long nasid, int index, unsigned long *fentry,
/*
* These two routines display the FIT table for each node.
*/
-static int dump_fit_entry(char *page, unsigned long *fentry)
+static void dump_fit_entry(struct seq_file *m, unsigned long *fentry)
{
unsigned type;
type = FIT_TYPE(fentry[1]);
- return sprintf(page, "%02x %-25s %x.%02x %016lx %u\n",
- type,
- fit_type_name(type),
- FIT_MAJOR(fentry[1]), FIT_MINOR(fentry[1]),
- fentry[0],
- /* mult by sixteen to get size in bytes */
- (unsigned)(fentry[1] & 0xffffff) * 16);
+ seq_printf(m, "%02x %-25s %x.%02x %016lx %u\n",
+ type,
+ fit_type_name(type),
+ FIT_MAJOR(fentry[1]), FIT_MINOR(fentry[1]),
+ fentry[0],
+ /* mult by sixteen to get size in bytes */
+ (unsigned)(fentry[1] & 0xffffff) * 16);
}
@@ -126,31 +125,39 @@ static int dump_fit_entry(char *page, unsigned long *fentry)
* OK except for 4kB pages (and no one is going to do that on SN
* anyway).
*/
-static int
-dump_fit(char *page, unsigned long nasid)
+static int proc_fit_show(struct seq_file *m, void *v)
{
+ unsigned long nasid = (unsigned long)m->private;
unsigned long fentry[2];
int index;
- char *p;
- p = page;
for (index=0;;index++) {
BUG_ON(index * 60 > PAGE_SIZE);
if (get_fit_entry(nasid, index, fentry, NULL, 0))
break;
- p += dump_fit_entry(p, fentry);
+ dump_fit_entry(m, fentry);
}
+ return 0;
+}
- return p - page;
+static int proc_fit_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, proc_fit_show, PDE_DATA(inode));
}
-static int
-dump_version(char *page, unsigned long nasid)
+static const struct file_operations proc_fit_fops = {
+ .open = proc_fit_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int proc_version_show(struct seq_file *m, void *v)
{
+ unsigned long nasid = (unsigned long)m->private;
unsigned long fentry[2];
char banner[128];
int index;
- int len;
for (index = 0; ; index++) {
if (get_fit_entry(nasid, index, fentry, banner,
@@ -160,56 +167,24 @@ dump_version(char *page, unsigned long nasid)
break;
}
- len = sprintf(page, "%x.%02x\n", FIT_MAJOR(fentry[1]),
- FIT_MINOR(fentry[1]));
- page += len;
+ seq_printf(m, "%x.%02x\n", FIT_MAJOR(fentry[1]), FIT_MINOR(fentry[1]));
if (banner[0])
- len += snprintf(page, PAGE_SIZE-len, "%s\n", banner);
-
- return len;
-}
-
-/* same as in proc_misc.c */
-static int
-proc_calc_metrics(char *page, char **start, off_t off, int count, int *eof,
- int len)
-{
- if (len <= off + count)
- *eof = 1;
- *start = page + off;
- len -= off;
- if (len > count)
- len = count;
- if (len < 0)
- len = 0;
- return len;
+ seq_printf(m, "%s\n", banner);
+ return 0;
}
-static int
-read_version_entry(char *page, char **start, off_t off, int count, int *eof,
- void *data)
+static int proc_version_open(struct inode *inode, struct file *file)
{
- int len = 0;
-
- /* data holds the NASID of the node */
- len = dump_version(page, (unsigned long)data);
- len = proc_calc_metrics(page, start, off, count, eof, len);
- return len;
+ return single_open(file, proc_version_show, PDE_DATA(inode));
}
-static int
-read_fit_entry(char *page, char **start, off_t off, int count, int *eof,
- void *data)
-{
- int len = 0;
-
- /* data holds the NASID of the node */
- len = dump_fit(page, (unsigned long)data);
- len = proc_calc_metrics(page, start, off, count, eof, len);
-
- return len;
-}
+static const struct file_operations proc_version_fops = {
+ .open = proc_version_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
/* module entry points */
int __init prominfo_init(void);
@@ -218,62 +193,39 @@ void __exit prominfo_exit(void);
module_init(prominfo_init);
module_exit(prominfo_exit);
-static struct proc_dir_entry **proc_entries;
-static struct proc_dir_entry *sgi_prominfo_entry;
-
#define NODE_NAME_LEN 11
int __init prominfo_init(void)
{
- struct proc_dir_entry **entp;
- struct proc_dir_entry *p;
+ struct proc_dir_entry *sgi_prominfo_entry;
cnodeid_t cnodeid;
- unsigned long nasid;
- char name[NODE_NAME_LEN];
if (!ia64_platform_is("sn2"))
return 0;
- proc_entries = kmalloc(num_online_nodes() * sizeof(struct proc_dir_entry *),
- GFP_KERNEL);
-
sgi_prominfo_entry = proc_mkdir("sgi_prominfo", NULL);
+ if (!sgi_prominfo_entry)
+ return -ENOMEM;
- entp = proc_entries;
for_each_online_node(cnodeid) {
+ struct proc_dir_entry *dir;
+ unsigned long nasid;
+ char name[NODE_NAME_LEN];
+
sprintf(name, "node%d", cnodeid);
- *entp = proc_mkdir(name, sgi_prominfo_entry);
+ dir = proc_mkdir(name, sgi_prominfo_entry);
+ if (!dir)
+ continue;
nasid = cnodeid_to_nasid(cnodeid);
- p = create_proc_read_entry(
- "fit", 0, *entp, read_fit_entry,
- (void *)nasid);
- if (p)
- p->owner = THIS_MODULE;
- p = create_proc_read_entry(
- "version", 0, *entp, read_version_entry,
- (void *)nasid);
- if (p)
- p->owner = THIS_MODULE;
- entp++;
+ proc_create_data("fit", 0, dir,
+ &proc_fit_fops, (void *)nasid);
+ proc_create_data("version", 0, dir,
+ &proc_version_fops, (void *)nasid);
}
-
return 0;
}
void __exit prominfo_exit(void)
{
- struct proc_dir_entry **entp;
- unsigned cnodeid;
- char name[NODE_NAME_LEN];
-
- entp = proc_entries;
- for_each_online_node(cnodeid) {
- remove_proc_entry("fit", *entp);
- remove_proc_entry("version", *entp);
- sprintf(name, "node%d", cnodeid);
- remove_proc_entry(name, sgi_prominfo_entry);
- entp++;
- }
- remove_proc_entry("sgi_prominfo", NULL);
- kfree(proc_entries);
+ remove_proc_subtree("sgi_prominfo", NULL);
}
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c
index 471bbaa65d1..68c84541162 100644
--- a/arch/ia64/sn/kernel/sn2/sn2_smp.c
+++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c
@@ -5,7 +5,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2000-2006 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/init.h>
@@ -26,7 +26,6 @@
#include <asm/processor.h>
#include <asm/irq.h>
#include <asm/sal.h>
-#include <asm/system.h>
#include <asm/delay.h>
#include <asm/io.h>
#include <asm/smp.h>
@@ -40,110 +39,34 @@
#include <asm/sn/shub_mmr.h>
#include <asm/sn/nodepda.h>
#include <asm/sn/rw_mmr.h>
+#include <asm/sn/sn_feature_sets.h>
DEFINE_PER_CPU(struct ptc_stats, ptcstats);
DECLARE_PER_CPU(struct ptc_stats, ptcstats);
static __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock);
-void sn2_ptc_deadlock_recovery(short *, short, int, volatile unsigned long *, unsigned long data0,
- volatile unsigned long *, unsigned long data1);
+/* 0 = old algorithm (no IPI flushes), 1 = ipi deadlock flush, 2 = ipi instead of SHUB ptc, >2 = always ipi */
+static int sn2_flush_opt = 0;
+
+extern unsigned long
+sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long,
+ volatile unsigned long *, unsigned long,
+ volatile unsigned long *, unsigned long);
+void
+sn2_ptc_deadlock_recovery(short *, short, short, int,
+ volatile unsigned long *, unsigned long,
+ volatile unsigned long *, unsigned long);
-#ifdef DEBUG_PTC
/*
- * ptctest:
- *
- * xyz - 3 digit hex number:
- * x - Force PTC purges to use shub:
- * 0 - no force
- * 1 - force
- * y - interupt enable
- * 0 - disable interrupts
- * 1 - leave interuupts enabled
- * z - type of lock:
- * 0 - global lock
- * 1 - node local lock
- * 2 - no lock
- *
- * Note: on shub1, only ptctest == 0 is supported. Don't try other values!
+ * Note: some is the following is captured here to make degugging easier
+ * (the macros make more sense if you see the debug patch - not posted)
*/
-
-static unsigned int sn2_ptctest = 0;
-
-static int __init ptc_test(char *str)
-{
- get_option(&str, &sn2_ptctest);
- return 1;
-}
-__setup("ptctest=", ptc_test);
-
-static inline int ptc_lock(unsigned long *flagp)
-{
- unsigned long opt = sn2_ptctest & 255;
-
- switch (opt) {
- case 0x00:
- spin_lock_irqsave(&sn2_global_ptc_lock, *flagp);
- break;
- case 0x01:
- spin_lock_irqsave(&sn_nodepda->ptc_lock, *flagp);
- break;
- case 0x02:
- local_irq_save(*flagp);
- break;
- case 0x10:
- spin_lock(&sn2_global_ptc_lock);
- break;
- case 0x11:
- spin_lock(&sn_nodepda->ptc_lock);
- break;
- case 0x12:
- break;
- default:
- BUG();
- }
- return opt;
-}
-
-static inline void ptc_unlock(unsigned long flags, int opt)
-{
- switch (opt) {
- case 0x00:
- spin_unlock_irqrestore(&sn2_global_ptc_lock, flags);
- break;
- case 0x01:
- spin_unlock_irqrestore(&sn_nodepda->ptc_lock, flags);
- break;
- case 0x02:
- local_irq_restore(flags);
- break;
- case 0x10:
- spin_unlock(&sn2_global_ptc_lock);
- break;
- case 0x11:
- spin_unlock(&sn_nodepda->ptc_lock);
- break;
- case 0x12:
- break;
- default:
- BUG();
- }
-}
-#else
-
#define sn2_ptctest 0
-
-static inline int ptc_lock(unsigned long *flagp)
-{
- spin_lock_irqsave(&sn2_global_ptc_lock, *flagp);
- return 0;
-}
-
-static inline void ptc_unlock(unsigned long flags, int opt)
-{
- spin_unlock_irqrestore(&sn2_global_ptc_lock, flags);
-}
-#endif
+#define local_node_uses_ptc_ga(sh1) ((sh1) ? 1 : 0)
+#define max_active_pio(sh1) ((sh1) ? 32 : 7)
+#define reset_max_active_on_deadlock() 1
+#define PTC_LOCK(sh1) ((sh1) ? &sn2_global_ptc_lock : &sn_nodepda->ptc_lock)
struct ptc_stats {
unsigned long ptc_l;
@@ -151,30 +74,70 @@ struct ptc_stats {
unsigned long shub_ptc_flushes;
unsigned long nodes_flushed;
unsigned long deadlocks;
+ unsigned long deadlocks2;
unsigned long lock_itc_clocks;
unsigned long shub_itc_clocks;
unsigned long shub_itc_clocks_max;
+ unsigned long shub_ptc_flushes_not_my_mm;
+ unsigned long shub_ipi_flushes;
+ unsigned long shub_ipi_flushes_itc_clocks;
};
+#define sn2_ptctest 0
+
static inline unsigned long wait_piowc(void)
{
- volatile unsigned long *piows, zeroval;
- unsigned long ws;
+ volatile unsigned long *piows;
+ unsigned long zeroval, ws;
piows = pda->pio_write_status_addr;
zeroval = pda->pio_write_status_val;
do {
cpu_relax();
} while (((ws = *piows) & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) != zeroval);
- return ws;
+ return (ws & SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK) != 0;
+}
+
+/**
+ * sn_migrate - SN-specific task migration actions
+ * @task: Task being migrated to new CPU
+ *
+ * SN2 PIO writes from separate CPUs are not guaranteed to arrive in order.
+ * Context switching user threads which have memory-mapped MMIO may cause
+ * PIOs to issue from separate CPUs, thus the PIO writes must be drained
+ * from the previous CPU's Shub before execution resumes on the new CPU.
+ */
+void sn_migrate(struct task_struct *task)
+{
+ pda_t *last_pda = pdacpu(task_thread_info(task)->last_cpu);
+ volatile unsigned long *adr = last_pda->pio_write_status_addr;
+ unsigned long val = last_pda->pio_write_status_val;
+
+ /* Drain PIO writes from old CPU's Shub */
+ while (unlikely((*adr & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK)
+ != val))
+ cpu_relax();
}
void sn_tlb_migrate_finish(struct mm_struct *mm)
{
- if (mm == current->mm)
+ /* flush_tlb_mm is inefficient if more than 1 users of mm */
+ if (mm == current->mm && mm && atomic_read(&mm->mm_users) == 1)
flush_tlb_mm(mm);
}
+static void
+sn2_ipi_flush_all_tlb(struct mm_struct *mm)
+{
+ unsigned long itc;
+
+ itc = ia64_get_itc();
+ smp_flush_tlb_cpumask(*mm_cpumask(mm));
+ itc = ia64_get_itc() - itc;
+ __get_cpu_var(ptcstats).shub_ipi_flushes_itc_clocks += itc;
+ __get_cpu_var(ptcstats).shub_ipi_flushes++;
+}
+
/**
* sn2_global_tlb_purge - globally purge translation cache of virtual address range
* @mm: mm_struct containing virtual address range
@@ -201,17 +164,24 @@ void
sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned long nbits)
{
- int i, opt, shub1, cnode, mynasid, cpu, lcpu = 0, nasid, flushed = 0;
- int mymm = (mm == current->active_mm && current->mm);
+ int i, ibegin, shub1, cnode, mynasid, cpu, lcpu = 0, nasid;
+ int mymm = (mm == current->active_mm && mm == current->mm);
+ int use_cpu_ptcga;
volatile unsigned long *ptc0, *ptc1;
- unsigned long itc, itc2, flags, data0 = 0, data1 = 0, rr_value;
+ unsigned long itc, itc2, flags, data0 = 0, data1 = 0, rr_value, old_rr = 0;
short nasids[MAX_NUMNODES], nix;
nodemask_t nodes_flushed;
+ int active, max_active, deadlock, flush_opt = sn2_flush_opt;
+
+ if (flush_opt > 2) {
+ sn2_ipi_flush_all_tlb(mm);
+ return;
+ }
nodes_clear(nodes_flushed);
i = 0;
- for_each_cpu_mask(cpu, mm->cpu_vm_mask) {
+ for_each_cpu(cpu, mm_cpumask(mm)) {
cnode = cpu_to_node(cpu);
node_set(cnode, nodes_flushed);
lcpu = cpu;
@@ -241,6 +211,12 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
return;
}
+ if (flush_opt == 2) {
+ sn2_ipi_flush_all_tlb(mm);
+ preempt_enable();
+ return;
+ }
+
itc = ia64_get_itc();
nix = 0;
for_each_node_mask(cnode, nodes_flushed)
@@ -267,49 +243,77 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
mynasid = get_nasid();
+ use_cpu_ptcga = local_node_uses_ptc_ga(shub1);
+ max_active = max_active_pio(shub1);
itc = ia64_get_itc();
- opt = ptc_lock(&flags);
+ spin_lock_irqsave(PTC_LOCK(shub1), flags);
itc2 = ia64_get_itc();
+
__get_cpu_var(ptcstats).lock_itc_clocks += itc2 - itc;
__get_cpu_var(ptcstats).shub_ptc_flushes++;
__get_cpu_var(ptcstats).nodes_flushed += nix;
+ if (!mymm)
+ __get_cpu_var(ptcstats).shub_ptc_flushes_not_my_mm++;
+
+ if (use_cpu_ptcga && !mymm) {
+ old_rr = ia64_get_rr(start);
+ ia64_set_rr(start, (old_rr & 0xff) | (rr_value << 8));
+ ia64_srlz_d();
+ }
+ wait_piowc();
do {
if (shub1)
data1 = start | (1UL << SH1_PTC_1_START_SHFT);
else
data0 = (data0 & ~SH2_PTC_ADDR_MASK) | (start & SH2_PTC_ADDR_MASK);
- for (i = 0; i < nix; i++) {
+ deadlock = 0;
+ active = 0;
+ for (ibegin = 0, i = 0; i < nix; i++) {
nasid = nasids[i];
- if ((!(sn2_ptctest & 3)) && unlikely(nasid == mynasid && mymm)) {
+ if (use_cpu_ptcga && unlikely(nasid == mynasid)) {
ia64_ptcga(start, nbits << 2);
ia64_srlz_i();
} else {
ptc0 = CHANGE_NASID(nasid, ptc0);
if (ptc1)
ptc1 = CHANGE_NASID(nasid, ptc1);
- pio_atomic_phys_write_mmrs(ptc0, data0, ptc1,
- data1);
- flushed = 1;
+ pio_atomic_phys_write_mmrs(ptc0, data0, ptc1, data1);
+ active++;
+ }
+ if (active >= max_active || i == (nix - 1)) {
+ if ((deadlock = wait_piowc())) {
+ if (flush_opt == 1)
+ goto done;
+ sn2_ptc_deadlock_recovery(nasids, ibegin, i, mynasid, ptc0, data0, ptc1, data1);
+ if (reset_max_active_on_deadlock())
+ max_active = 1;
+ }
+ active = 0;
+ ibegin = i + 1;
}
}
- if (flushed
- && (wait_piowc() &
- (SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK))) {
- sn2_ptc_deadlock_recovery(nasids, nix, mynasid, ptc0, data0, ptc1, data1);
- }
-
start += (1UL << nbits);
-
} while (start < end);
+done:
itc2 = ia64_get_itc() - itc2;
__get_cpu_var(ptcstats).shub_itc_clocks += itc2;
if (itc2 > __get_cpu_var(ptcstats).shub_itc_clocks_max)
__get_cpu_var(ptcstats).shub_itc_clocks_max = itc2;
- ptc_unlock(flags, opt);
+ if (old_rr) {
+ ia64_set_rr(start, old_rr);
+ ia64_srlz_d();
+ }
+
+ spin_unlock_irqrestore(PTC_LOCK(shub1), flags);
+
+ if (flush_opt == 1 && deadlock) {
+ __get_cpu_var(ptcstats).deadlocks++;
+ sn2_ipi_flush_all_tlb(mm);
+ }
preempt_enable();
}
@@ -321,27 +325,31 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
* TLB flush transaction. The recovery sequence is somewhat tricky & is
* coded in assembly language.
*/
-void sn2_ptc_deadlock_recovery(short *nasids, short nix, int mynasid, volatile unsigned long *ptc0, unsigned long data0,
- volatile unsigned long *ptc1, unsigned long data1)
+
+void
+sn2_ptc_deadlock_recovery(short *nasids, short ib, short ie, int mynasid,
+ volatile unsigned long *ptc0, unsigned long data0,
+ volatile unsigned long *ptc1, unsigned long data1)
{
- extern void sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long,
- volatile unsigned long *, unsigned long, volatile unsigned long *, unsigned long);
short nasid, i;
- unsigned long *piows, zeroval;
+ unsigned long *piows, zeroval, n;
__get_cpu_var(ptcstats).deadlocks++;
piows = (unsigned long *) pda->pio_write_status_addr;
zeroval = pda->pio_write_status_val;
- for (i=0; i < nix; i++) {
+
+ for (i=ib; i <= ie; i++) {
nasid = nasids[i];
- if (!(sn2_ptctest & 3) && nasid == mynasid)
+ if (local_node_uses_ptc_ga(is_shub1()) && nasid == mynasid)
continue;
ptc0 = CHANGE_NASID(nasid, ptc0);
if (ptc1)
ptc1 = CHANGE_NASID(nasid, ptc1);
- sn2_ptc_deadlock_recovery_core(ptc0, data0, ptc1, data1, piows, zeroval);
+
+ n = sn2_ptc_deadlock_recovery_core(ptc0, data0, ptc1, data1, piows, zeroval);
+ __get_cpu_var(ptcstats).deadlocks2 += n;
}
}
@@ -421,13 +429,38 @@ void sn2_send_IPI(int cpuid, int vector, int delivery_mode, int redirect)
sn_send_IPI_phys(nasid, physid, vector, delivery_mode);
}
+#ifdef CONFIG_HOTPLUG_CPU
+/**
+ * sn_cpu_disable_allowed - Determine if a CPU can be disabled.
+ * @cpu - CPU that is requested to be disabled.
+ *
+ * CPU disable is only allowed on SHub2 systems running with a PROM
+ * that supports CPU disable. It is not permitted to disable the boot processor.
+ */
+bool sn_cpu_disable_allowed(int cpu)
+{
+ if (is_shub2() && sn_prom_feature_available(PRF_CPU_DISABLE_SUPPORT)) {
+ if (cpu != 0)
+ return true;
+ else
+ printk(KERN_WARNING
+ "Disabling the boot processor is not allowed.\n");
+
+ } else
+ printk(KERN_WARNING
+ "CPU disable is not supported on this system.\n");
+
+ return false;
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+
#ifdef CONFIG_PROC_FS
#define PTC_BASENAME "sgi_sn/ptc_statistics"
static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset)
{
- if (*offset < NR_CPUS)
+ if (*offset < nr_cpu_ids)
return offset;
return NULL;
}
@@ -435,7 +468,7 @@ static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset)
static void *sn2_ptc_seq_next(struct seq_file *file, void *data, loff_t * offset)
{
(*offset)++;
- if (*offset < NR_CPUS)
+ if (*offset < nr_cpu_ids)
return offset;
return NULL;
}
@@ -452,38 +485,61 @@ static int sn2_ptc_seq_show(struct seq_file *file, void *data)
cpu = *(loff_t *) data;
if (!cpu) {
- seq_printf(file, "# ptc_l change_rid shub_ptc_flushes shub_nodes_flushed deadlocks lock_nsec shub_nsec shub_nsec_max\n");
- seq_printf(file, "# ptctest %d\n", sn2_ptctest);
+ seq_printf(file,
+ "# cpu ptc_l newrid ptc_flushes nodes_flushed deadlocks lock_nsec shub_nsec shub_nsec_max not_my_mm deadlock2 ipi_fluches ipi_nsec\n");
+ seq_printf(file, "# ptctest %d, flushopt %d\n", sn2_ptctest, sn2_flush_opt);
}
- if (cpu < NR_CPUS && cpu_online(cpu)) {
+ if (cpu < nr_cpu_ids && cpu_online(cpu)) {
stat = &per_cpu(ptcstats, cpu);
- seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l,
+ seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l,
stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed,
stat->deadlocks,
- 1000 * stat->lock_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
- 1000 * stat->shub_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
- 1000 * stat->shub_itc_clocks_max / per_cpu(cpu_info, cpu).cyc_per_usec);
+ 1000 * stat->lock_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
+ 1000 * stat->shub_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
+ 1000 * stat->shub_itc_clocks_max / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
+ stat->shub_ptc_flushes_not_my_mm,
+ stat->deadlocks2,
+ stat->shub_ipi_flushes,
+ 1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec);
}
-
return 0;
}
-static struct seq_operations sn2_ptc_seq_ops = {
+static ssize_t sn2_ptc_proc_write(struct file *file, const char __user *user, size_t count, loff_t *data)
+{
+ int cpu;
+ char optstr[64];
+
+ if (count == 0 || count > sizeof(optstr))
+ return -EINVAL;
+ if (copy_from_user(optstr, user, count))
+ return -EFAULT;
+ optstr[count - 1] = '\0';
+ sn2_flush_opt = simple_strtoul(optstr, NULL, 0);
+
+ for_each_online_cpu(cpu)
+ memset(&per_cpu(ptcstats, cpu), 0, sizeof(struct ptc_stats));
+
+ return count;
+}
+
+static const struct seq_operations sn2_ptc_seq_ops = {
.start = sn2_ptc_seq_start,
.next = sn2_ptc_seq_next,
.stop = sn2_ptc_seq_stop,
.show = sn2_ptc_seq_show
};
-int sn2_ptc_proc_open(struct inode *inode, struct file *file)
+static int sn2_ptc_proc_open(struct inode *inode, struct file *file)
{
return seq_open(file, &sn2_ptc_seq_ops);
}
-static struct file_operations proc_sn2_ptc_operations = {
+static const struct file_operations proc_sn2_ptc_operations = {
.open = sn2_ptc_proc_open,
.read = seq_read,
+ .write = sn2_ptc_proc_write,
.llseek = seq_lseek,
.release = seq_release,
};
@@ -493,13 +549,14 @@ static struct proc_dir_entry *proc_sn2_ptc;
static int __init sn2_ptc_init(void)
{
if (!ia64_platform_is("sn2"))
- return -ENOSYS;
+ return 0;
- if (!(proc_sn2_ptc = create_proc_entry(PTC_BASENAME, 0444, NULL))) {
+ proc_sn2_ptc = proc_create(PTC_BASENAME, 0444,
+ NULL, &proc_sn2_ptc_operations);
+ if (!proc_sn2_ptc) {
printk(KERN_ERR "unable to create %s proc entry", PTC_BASENAME);
return -EINVAL;
}
- proc_sn2_ptc->proc_fops = &proc_sn2_ptc_operations;
spin_lock_init(&sn2_global_ptc_lock);
return 0;
}
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
index 19b54fbcd7e..b9992571c03 100644
--- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c
+++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2004-2005 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2004-2006 Silicon Graphics, Inc. All rights reserved.
*
* SGI Altix topology and hardware performance monitoring API.
* Mark Goodwin <markgw@sgi.com>.
@@ -25,17 +25,18 @@
#include <linux/fs.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <linux/vmalloc.h>
#include <linux/seq_file.h>
#include <linux/miscdevice.h>
#include <linux/utsname.h>
#include <linux/cpumask.h>
-#include <linux/smp_lock.h>
#include <linux/nodemask.h>
+#include <linux/smp.h>
+#include <linux/mutex.h>
+
#include <asm/processor.h>
#include <asm/topology.h>
-#include <asm/smp.h>
-#include <asm/semaphore.h>
#include <asm/uaccess.h>
#include <asm/sal.h>
#include <asm/sn/io.h>
@@ -49,7 +50,9 @@ static void *sn_hwperf_salheap = NULL;
static int sn_hwperf_obj_cnt = 0;
static nasid_t sn_hwperf_master_nasid = INVALID_NASID;
static int sn_hwperf_init(void);
-static DECLARE_MUTEX(sn_hwperf_init_mutex);
+static DEFINE_MUTEX(sn_hwperf_init_mutex);
+
+#define cnode_possible(n) ((n) < num_cnodes)
static int sn_hwperf_enum_objects(int *nobj, struct sn_hwperf_object_info **ret)
{
@@ -63,7 +66,8 @@ static int sn_hwperf_enum_objects(int *nobj, struct sn_hwperf_object_info **ret)
}
sz = sn_hwperf_obj_cnt * sizeof(struct sn_hwperf_object_info);
- if ((objbuf = (struct sn_hwperf_object_info *) vmalloc(sz)) == NULL) {
+ objbuf = vmalloc(sz);
+ if (objbuf == NULL) {
printk("sn_hwperf_enum_objects: vmalloc(%d) failed\n", (int)sz);
e = -ENOMEM;
goto out;
@@ -110,7 +114,11 @@ static int sn_hwperf_geoid_to_cnode(char *location)
if (sn_hwperf_location_to_bpos(location, &rack, &bay, &slot, &slab))
return -1;
- for_each_node(cnode) {
+ /*
+ * FIXME: replace with cleaner for_each_XXX macro which addresses
+ * both compute and IO nodes once ACPI3.0 is available.
+ */
+ for (cnode = 0; cnode < num_cnodes; cnode++) {
geoid = cnodeid_get_geoid(cnode);
module_id = geo_module(geoid);
this_rack = MODULE_GET_RACK(module_id);
@@ -123,14 +131,14 @@ static int sn_hwperf_geoid_to_cnode(char *location)
}
}
- return node_possible(cnode) ? cnode : -1;
+ return cnode_possible(cnode) ? cnode : -1;
}
static int sn_hwperf_obj_to_cnode(struct sn_hwperf_object_info * obj)
{
if (!SN_HWPERF_IS_NODE(obj) && !SN_HWPERF_IS_IONODE(obj))
BUG();
- if (!obj->sn_hwp_this_part)
+ if (SN_HWPERF_FOREIGN(obj))
return -1;
return sn_hwperf_geoid_to_cnode(obj->location);
}
@@ -182,7 +190,7 @@ static void print_pci_topology(struct seq_file *s)
int e;
for (sz = PAGE_SIZE; sz < 16 * PAGE_SIZE; sz += PAGE_SIZE) {
- if (!(p = (char *)kmalloc(sz, GFP_KERNEL)))
+ if (!(p = kmalloc(sz, GFP_KERNEL)))
break;
e = ia64_sn_ioif_get_pci_topology(__pa(p), sz);
if (e == SALRET_OK)
@@ -195,12 +203,12 @@ static void print_pci_topology(struct seq_file *s)
static inline int sn_hwperf_has_cpus(cnodeid_t node)
{
- return node_online(node) && nr_cpus_node(node);
+ return node < MAX_NUMNODES && node_online(node) && nr_cpus_node(node);
}
static inline int sn_hwperf_has_mem(cnodeid_t node)
{
- return node_online(node) && NODE_DATA(node)->node_present_pages;
+ return node < MAX_NUMNODES && node_online(node) && NODE_DATA(node)->node_present_pages;
}
static struct sn_hwperf_object_info *
@@ -233,7 +241,7 @@ static int sn_hwperf_get_nearest_node_objdata(struct sn_hwperf_object_info *objb
int found_mem = 0;
int found_cpu = 0;
- if (!node_possible(node))
+ if (!cnode_possible(node))
return -EINVAL;
if (sn_hwperf_has_cpus(node)) {
@@ -267,8 +275,7 @@ static int sn_hwperf_get_nearest_node_objdata(struct sn_hwperf_object_info *objb
/* get it's interconnect topology */
sz = op->ports * sizeof(struct sn_hwperf_port_info);
- if (sz > sizeof(ptdata))
- BUG();
+ BUG_ON(sz > sizeof(ptdata));
e = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
SN_HWPERF_ENUM_PORTS, nodeobj->id, sz,
(u64)&ptdata, 0, 0, NULL);
@@ -280,6 +287,8 @@ static int sn_hwperf_get_nearest_node_objdata(struct sn_hwperf_object_info *objb
/* find nearest node with cpus and nearest memory */
for (router=NULL, j=0; j < op->ports; j++) {
dest = sn_hwperf_findobj_id(objbuf, nobj, ptdata[j].conn_id);
+ if (dest && SN_HWPERF_IS_ROUTER(dest))
+ router = dest;
if (!dest || SN_HWPERF_FOREIGN(dest) ||
!SN_HWPERF_IS_NODE(dest) || SN_HWPERF_IS_IONODE(dest)) {
continue;
@@ -295,15 +304,12 @@ static int sn_hwperf_get_nearest_node_objdata(struct sn_hwperf_object_info *objb
*near_mem_node = c;
found_mem++;
}
- if (SN_HWPERF_IS_ROUTER(dest))
- router = dest;
}
if (router && (!found_cpu || !found_mem)) {
/* search for a node connected to the same router */
sz = router->ports * sizeof(struct sn_hwperf_port_info);
- if (sz > sizeof(ptdata))
- BUG();
+ BUG_ON(sz > sizeof(ptdata));
e = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
SN_HWPERF_ENUM_PORTS, router->id, sz,
(u64)&ptdata, 0, 0, NULL);
@@ -377,7 +383,6 @@ static int sn_topology_show(struct seq_file *s, void *d)
int j;
const char *slabname;
int ordinal;
- cpumask_t cpumask;
char slice;
struct cpuinfo_ia64 *c;
struct sn_hwperf_port_info *ptdata;
@@ -409,14 +414,14 @@ static int sn_topology_show(struct seq_file *s, void *d)
}
seq_printf(s, "partition %u %s local "
"shubtype %s, "
- "nasid_mask 0x%016lx, "
+ "nasid_mask 0x%016llx, "
"nasid_bits %d:%d, "
"system_size %d, "
"sharing_size %d, "
"coherency_domain %d, "
"region_size %d\n",
- partid, system_utsname.nodename,
+ partid, utsname()->nodename,
shubtype ? "shub2" : "shub1",
(u64)nasid_mask << nasid_shift, nasid_msb, nasid_shift,
system_size, sharing_size, coher, region_size);
@@ -438,7 +443,7 @@ static int sn_topology_show(struct seq_file *s, void *d)
seq_printf(s, "%s %d %s %s asic %s", slabname, ordinal, obj->location,
obj->sn_hwp_this_part ? "local" : "shared", obj->name);
- if (!SN_HWPERF_IS_NODE(obj) && !SN_HWPERF_IS_IONODE(obj))
+ if (ordinal < 0 || (!SN_HWPERF_IS_NODE(obj) && !SN_HWPERF_IS_IONODE(obj)))
seq_putc(s, '\n');
else {
cnodeid_t near_mem = -1;
@@ -464,20 +469,20 @@ static int sn_topology_show(struct seq_file *s, void *d)
/*
* CPUs on this node, if any
*/
- cpumask = node_to_cpumask(ordinal);
- for_each_online_cpu(i) {
- if (cpu_isset(i, cpumask)) {
+ if (!SN_HWPERF_IS_IONODE(obj)) {
+ for_each_cpu_and(i, cpu_online_mask,
+ cpumask_of_node(ordinal)) {
slice = 'a' + cpuid_to_slice(i);
c = cpu_data(i);
seq_printf(s, "cpu %d %s%c local"
- " freq %luMHz, arch ia64",
- i, obj->location, slice,
- c->proc_freq / 1000000);
+ " freq %luMHz, arch ia64",
+ i, obj->location, slice,
+ c->proc_freq / 1000000);
for_each_online_cpu(j) {
seq_printf(s, j ? ":%d" : ", dist %d",
- node_distance(
- cpu_to_node(i),
- cpu_to_node(j)));
+ node_distance(
+ cpu_to_node(i),
+ cpu_to_node(j)));
}
seq_putc(s, '\n');
}
@@ -489,7 +494,7 @@ static int sn_topology_show(struct seq_file *s, void *d)
* numalink ports
*/
sz = obj->ports * sizeof(struct sn_hwperf_port_info);
- if ((ptdata = vmalloc(sz)) == NULL)
+ if ((ptdata = kmalloc(sz, GFP_KERNEL)) == NULL)
return -ENOMEM;
e = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
SN_HWPERF_ENUM_PORTS, obj->id, sz,
@@ -519,7 +524,7 @@ static int sn_topology_show(struct seq_file *s, void *d)
if (obj->sn_hwp_this_part && p->sn_hwp_this_part)
/* both ends local to this partition */
seq_puts(s, " local");
- else if (!obj->sn_hwp_this_part && !p->sn_hwp_this_part)
+ else if (SN_HWPERF_FOREIGN(p))
/* both ends of the link in foreign partiton */
seq_puts(s, " foreign");
else
@@ -537,7 +542,7 @@ static int sn_topology_show(struct seq_file *s, void *d)
(SN_HWPERF_IS_NL3ROUTER(obj) ||
SN_HWPERF_IS_NL3ROUTER(p)) ? "LLP3" : "LLP4");
}
- vfree(ptdata);
+ kfree(ptdata);
}
return 0;
@@ -567,7 +572,7 @@ static void sn_topology_stop(struct seq_file *m, void *v)
/*
* /proc/sgi_sn/sn_topology, read-only using seq_file
*/
-static struct seq_operations sn_topology_seq_ops = {
+static const struct seq_operations sn_topology_seq_ops = {
.start = sn_topology_start,
.next = sn_topology_next,
.stop = sn_topology_stop,
@@ -605,28 +610,32 @@ static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info)
op_info->a->arg &= SN_HWPERF_ARG_OBJID_MASK;
if (cpu != SN_HWPERF_ARG_ANY_CPU) {
- if (cpu >= num_online_cpus() || !cpu_online(cpu)) {
+ if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
r = -EINVAL;
goto out;
}
}
- if (cpu == SN_HWPERF_ARG_ANY_CPU || cpu == get_cpu()) {
- /* don't care, or already on correct cpu */
+ if (cpu == SN_HWPERF_ARG_ANY_CPU) {
+ /* don't care which cpu */
sn_hwperf_call_sal(op_info);
- }
- else {
+ } else if (cpu == get_cpu()) {
+ /* already on correct cpu */
+ sn_hwperf_call_sal(op_info);
+ put_cpu();
+ } else {
+ put_cpu();
if (use_ipi) {
/* use an interprocessor interrupt to call SAL */
smp_call_function_single(cpu, sn_hwperf_call_sal,
- op_info, 1, 1);
+ op_info, 1);
}
else {
/* migrate the task before calling SAL */
save_allowed = current->cpus_allowed;
- set_cpus_allowed(current, cpumask_of_cpu(cpu));
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
sn_hwperf_call_sal(op_info);
- set_cpus_allowed(current, save_allowed);
+ set_cpus_allowed_ptr(current, &save_allowed);
}
}
r = op_info->ret;
@@ -677,8 +686,7 @@ static int sn_hwperf_map_err(int hwperf_err)
/*
* ioctl for "sn_hwperf" misc device
*/
-static int
-sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, u64 arg)
+static long sn_hwperf_ioctl(struct file *fp, u32 op, unsigned long arg)
{
struct sn_hwperf_ioctl_args a;
struct cpuinfo_ia64 *cdata;
@@ -694,8 +702,6 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, u64 arg)
int i;
int j;
- unlock_kernel();
-
/* only user requests are allowed here */
if ((op & SN_HWPERF_OP_MASK) < 10) {
r = -EINVAL;
@@ -741,9 +747,10 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, u64 arg)
goto error;
} else
if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) {
+ int cpuobj_index = 0;
+
memset(p, 0, a.sz);
for (i = 0; i < nobj; i++) {
- int cpuobj_index = 0;
if (!SN_HWPERF_IS_NODE(objs + i))
continue;
node = sn_hwperf_obj_to_cnode(objs + i);
@@ -772,7 +779,7 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, u64 arg)
case SN_HWPERF_GET_NODE_NASID:
if (a.sz != sizeof(u64) ||
- (node = a.arg) < 0 || !node_possible(node)) {
+ (node = a.arg) < 0 || !cnode_possible(node)) {
r = -EINVAL;
goto error;
}
@@ -780,17 +787,18 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, u64 arg)
break;
case SN_HWPERF_GET_OBJ_NODE:
- if (a.sz != sizeof(u64) || a.arg < 0) {
+ i = a.arg;
+ if (a.sz != sizeof(u64) || i < 0) {
r = -EINVAL;
goto error;
}
if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) {
- if (a.arg >= nobj) {
+ if (i >= nobj) {
r = -EINVAL;
vfree(objs);
goto error;
}
- if (objs[(i = a.arg)].id != a.arg) {
+ if (objs[i].id != a.arg) {
for (i = 0; i < nobj; i++) {
if (objs[i].id == a.arg)
break;
@@ -852,12 +860,12 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, u64 arg)
error:
vfree(p);
- lock_kernel();
return r;
}
-static struct file_operations sn_hwperf_fops = {
- .ioctl = sn_hwperf_ioctl,
+static const struct file_operations sn_hwperf_fops = {
+ .unlocked_ioctl = sn_hwperf_ioctl,
+ .llseek = noop_llseek,
};
static struct miscdevice sn_hwperf_dev = {
@@ -873,10 +881,10 @@ static int sn_hwperf_init(void)
int e = 0;
/* single threaded, once-only initialization */
- down(&sn_hwperf_init_mutex);
+ mutex_lock(&sn_hwperf_init_mutex);
if (sn_hwperf_salheap) {
- up(&sn_hwperf_init_mutex);
+ mutex_unlock(&sn_hwperf_init_mutex);
return e;
}
@@ -925,7 +933,7 @@ out:
sn_hwperf_salheap = NULL;
sn_hwperf_obj_cnt = 0;
}
- up(&sn_hwperf_init_mutex);
+ mutex_unlock(&sn_hwperf_init_mutex);
return e;
}
@@ -969,10 +977,13 @@ int sn_hwperf_get_nearest_node(cnodeid_t node,
return e;
}
-static int __devinit sn_hwperf_misc_register_init(void)
+static int sn_hwperf_misc_register_init(void)
{
int e;
+ if (!ia64_platform_is("sn2"))
+ return 0;
+
sn_hwperf_init();
/*
diff --git a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c
index a06719d752a..7aab87f4806 100644
--- a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c
+++ b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c
@@ -5,12 +5,11 @@
*
* Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved.
*/
-#include <linux/config.h>
-#include <asm/uaccess.h>
#ifdef CONFIG_PROC_FS
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <asm/uaccess.h>
#include <asm/sn/sn_sal.h>
static int partition_id_show(struct seq_file *s, void *p)
@@ -37,7 +36,7 @@ static int system_serial_number_open(struct inode *inode, struct file *file)
static int licenseID_show(struct seq_file *s, void *p)
{
- seq_printf(s, "0x%lx\n", sn_partition_serial_number_val());
+ seq_printf(s, "0x%llx\n", sn_partition_serial_number_val());
return 0;
}
@@ -46,38 +45,6 @@ static int licenseID_open(struct inode *inode, struct file *file)
return single_open(file, licenseID_show, NULL);
}
-/*
- * Enable forced interrupt by default.
- * When set, the sn interrupt handler writes the force interrupt register on
- * the bridge chip. The hardware will then send an interrupt message if the
- * interrupt line is active. This mimics a level sensitive interrupt.
- */
-extern int sn_force_interrupt_flag;
-
-static int sn_force_interrupt_show(struct seq_file *s, void *p)
-{
- seq_printf(s, "Force interrupt is %s\n",
- sn_force_interrupt_flag ? "enabled" : "disabled");
- return 0;
-}
-
-static ssize_t sn_force_interrupt_write_proc(struct file *file,
- const char __user *buffer, size_t count, loff_t *data)
-{
- char val;
-
- if (copy_from_user(&val, buffer, 1))
- return -EFAULT;
-
- sn_force_interrupt_flag = (val == '0') ? 0 : 1;
- return count;
-}
-
-static int sn_force_interrupt_open(struct inode *inode, struct file *file)
-{
- return single_open(file, sn_force_interrupt_show, NULL);
-}
-
static int coherence_id_show(struct seq_file *s, void *p)
{
seq_printf(s, "%d\n", partition_coherence_id());
@@ -90,60 +57,61 @@ static int coherence_id_open(struct inode *inode, struct file *file)
return single_open(file, coherence_id_show, NULL);
}
-static struct proc_dir_entry *sn_procfs_create_entry(
- const char *name, struct proc_dir_entry *parent,
- int (*openfunc)(struct inode *, struct file *),
- int (*releasefunc)(struct inode *, struct file *))
-{
- struct proc_dir_entry *e = create_proc_entry(name, 0444, parent);
-
- if (e) {
- e->proc_fops = (struct file_operations *)kmalloc(
- sizeof(struct file_operations), GFP_KERNEL);
- if (e->proc_fops) {
- memset(e->proc_fops, 0, sizeof(struct file_operations));
- e->proc_fops->open = openfunc;
- e->proc_fops->read = seq_read;
- e->proc_fops->llseek = seq_lseek;
- e->proc_fops->release = releasefunc;
- }
- }
-
- return e;
-}
-
/* /proc/sgi_sn/sn_topology uses seq_file, see sn_hwperf.c */
extern int sn_topology_open(struct inode *, struct file *);
extern int sn_topology_release(struct inode *, struct file *);
+static const struct file_operations proc_partition_id_fops = {
+ .open = partition_id_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations proc_system_sn_fops = {
+ .open = system_serial_number_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations proc_license_id_fops = {
+ .open = licenseID_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations proc_coherence_id_fops = {
+ .open = coherence_id_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations proc_sn_topo_fops = {
+ .open = sn_topology_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = sn_topology_release,
+};
+
void register_sn_procfs(void)
{
static struct proc_dir_entry *sgi_proc_dir = NULL;
- struct proc_dir_entry *e;
BUG_ON(sgi_proc_dir != NULL);
if (!(sgi_proc_dir = proc_mkdir("sgi_sn", NULL)))
return;
- sn_procfs_create_entry("partition_id", sgi_proc_dir,
- partition_id_open, single_release);
-
- sn_procfs_create_entry("system_serial_number", sgi_proc_dir,
- system_serial_number_open, single_release);
-
- sn_procfs_create_entry("licenseID", sgi_proc_dir,
- licenseID_open, single_release);
-
- e = sn_procfs_create_entry("sn_force_interrupt", sgi_proc_dir,
- sn_force_interrupt_open, single_release);
- if (e)
- e->proc_fops->write = sn_force_interrupt_write_proc;
-
- sn_procfs_create_entry("coherence_id", sgi_proc_dir,
- coherence_id_open, single_release);
-
- sn_procfs_create_entry("sn_topology", sgi_proc_dir,
- sn_topology_open, sn_topology_release);
+ proc_create("partition_id", 0444, sgi_proc_dir,
+ &proc_partition_id_fops);
+ proc_create("system_serial_number", 0444, sgi_proc_dir,
+ &proc_system_sn_fops);
+ proc_create("licenseID", 0444, sgi_proc_dir, &proc_license_id_fops);
+ proc_create("coherence_id", 0444, sgi_proc_dir,
+ &proc_coherence_id_fops);
+ proc_create("sn_topology", 0444, sgi_proc_dir, &proc_sn_topo_fops);
}
#endif /* CONFIG_PROC_FS */
diff --git a/arch/ia64/sn/kernel/sn2/timer.c b/arch/ia64/sn/kernel/sn2/timer.c
index deb9baf4d47..abab8f99e91 100644
--- a/arch/ia64/sn/kernel/sn2/timer.c
+++ b/arch/ia64/sn/kernel/sn2/timer.c
@@ -11,9 +11,10 @@
#include <linux/sched.h>
#include <linux/time.h>
#include <linux/interrupt.h>
+#include <linux/clocksource.h>
#include <asm/hw_irq.h>
-#include <asm/system.h>
+#include <asm/timex.h>
#include <asm/sn/leds.h>
#include <asm/sn/shub_mmr.h>
@@ -21,16 +22,39 @@
extern unsigned long sn_rtc_cycles_per_second;
-static struct time_interpolator sn2_interpolator = {
- .drift = -1,
- .shift = 10,
- .mask = (1LL << 55) - 1,
- .source = TIME_SOURCE_MMIO64
+static cycle_t read_sn2(struct clocksource *cs)
+{
+ return (cycle_t)readq(RTC_COUNTER_ADDR);
+}
+
+static struct clocksource clocksource_sn2 = {
+ .name = "sn2_rtc",
+ .rating = 450,
+ .read = read_sn2,
+ .mask = (1LL << 55) - 1,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
+/*
+ * sn udelay uses the RTC instead of the ITC because the ITC is not
+ * synchronized across all CPUs, and the thread may migrate to another CPU
+ * if preemption is enabled.
+ */
+static void
+ia64_sn_udelay (unsigned long usecs)
+{
+ unsigned long start = rtc_time();
+ unsigned long end = start +
+ usecs * sn_rtc_cycles_per_second / 1000000;
+
+ while (time_before((unsigned long)rtc_time(), end))
+ cpu_relax();
+}
+
void __init sn_timer_init(void)
{
- sn2_interpolator.frequency = sn_rtc_cycles_per_second;
- sn2_interpolator.addr = RTC_COUNTER_ADDR;
- register_time_interpolator(&sn2_interpolator);
+ clocksource_sn2.archdata.fsys_mmio = RTC_COUNTER_ADDR;
+ clocksource_register_hz(&clocksource_sn2, sn_rtc_cycles_per_second);
+
+ ia64_udelay = &ia64_sn_udelay;
}
diff --git a/arch/ia64/sn/kernel/sn2/timer_interrupt.c b/arch/ia64/sn/kernel/sn2/timer_interrupt.c
index adf5db2e2af..103d6ea8e94 100644
--- a/arch/ia64/sn/kernel/sn2/timer_interrupt.c
+++ b/arch/ia64/sn/kernel/sn2/timer_interrupt.c
@@ -1,7 +1,7 @@
/*
*
*
- * Copyright (c) 2005 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2005, 2006 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
@@ -22,11 +22,6 @@
* License along with this program; if not, write the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
- * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
- * Mountain View, CA 94043, or:
- *
- * http://www.sgi.com
- *
* For further information regarding this notice, see:
*
* http://oss.sgi.com/projects/GenInfo/NoticeExplan
@@ -41,7 +36,7 @@ extern irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs);
#define SN_LB_INT_WAR_INTERVAL 100
-void sn_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+void sn_timer_interrupt(int irq, void *dev_id)
{
/* LED blinking */
if (!pda->hb_count--) {
diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c
index d263d3e8fbb..e35f6485c1f 100644
--- a/arch/ia64/sn/kernel/tiocx.c
+++ b/arch/ia64/sn/kernel/tiocx.c
@@ -14,7 +14,6 @@
#include <linux/capability.h>
#include <linux/device.h>
#include <linux/delay.h>
-#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/addrs.h>
@@ -66,8 +65,7 @@ static int tiocx_match(struct device *dev, struct device_driver *drv)
}
-static int tiocx_uevent(struct device *dev, char **envp, int num_envp,
- char *buffer, int buffer_size)
+static int tiocx_uevent(struct device *dev, struct kobj_uevent_env *env)
{
return -ENODEV;
}
@@ -192,6 +190,7 @@ cx_device_register(nasid_t nasid, int part_num, int mfg_num,
struct hubdev_info *hubdev, int bt)
{
struct cx_dev *cx_dev;
+ int r;
cx_dev = kzalloc(sizeof(struct cx_dev), GFP_KERNEL);
DBG("cx_dev= 0x%p\n", cx_dev);
@@ -207,9 +206,12 @@ cx_device_register(nasid_t nasid, int part_num, int mfg_num,
cx_dev->dev.parent = NULL;
cx_dev->dev.bus = &tiocx_bus_type;
cx_dev->dev.release = tiocx_bus_release;
- snprintf(cx_dev->dev.bus_id, BUS_ID_SIZE, "%d",
- cx_dev->cx_id.nasid);
- device_register(&cx_dev->dev);
+ dev_set_name(&cx_dev->dev, "%d", cx_dev->cx_id.nasid);
+ r = device_register(&cx_dev->dev);
+ if (r) {
+ kfree(cx_dev);
+ return r;
+ }
get_device(&cx_dev->dev);
device_create_file(&cx_dev->dev, &dev_attr_cxdev_control);
@@ -284,12 +286,10 @@ struct sn_irq_info *tiocx_irq_alloc(nasid_t nasid, int widget, int irq,
if ((nasid & 1) == 0)
return NULL;
- sn_irq_info = kmalloc(sn_irq_size, GFP_KERNEL);
+ sn_irq_info = kzalloc(sn_irq_size, GFP_KERNEL);
if (sn_irq_info == NULL)
return NULL;
- memset(sn_irq_info, 0x0, sn_irq_size);
-
status = tiocx_intr_alloc(nasid, widget, __pa(sn_irq_info), irq,
req_nasid, slice);
if (status) {
@@ -371,9 +371,15 @@ static void tio_corelet_reset(nasid_t nasid, int corelet)
static int is_fpga_tio(int nasid, int *bt)
{
- int ioboard_type;
+ u16 uninitialized_var(ioboard_type); /* GCC be quiet */
+ long rc;
- ioboard_type = ia64_sn_sysctl_ioboard_get(nasid);
+ rc = ia64_sn_sysctl_ioboard_get(nasid, &ioboard_type);
+ if (rc) {
+ printk(KERN_WARNING "ia64_sn_sysctl_ioboard_get failed: %ld\n",
+ rc);
+ return 0;
+ }
switch (ioboard_type) {
case L1_BRICKTYPE_SA:
@@ -484,11 +490,14 @@ static int __init tiocx_init(void)
{
cnodeid_t cnodeid;
int found_tiocx_device = 0;
+ int err;
if (!ia64_platform_is("sn2"))
- return -ENODEV;
+ return 0;
- bus_register(&tiocx_bus_type);
+ err = bus_register(&tiocx_bus_type);
+ if (err)
+ return err;
for (cnodeid = 0; cnodeid < num_cnodes; cnodeid++) {
nasid_t nasid;
@@ -548,7 +557,7 @@ static void __exit tiocx_exit(void)
bus_unregister(&tiocx_bus_type);
}
-subsys_initcall(tiocx_init);
+fs_initcall(tiocx_init);
module_exit(tiocx_exit);
/************************************************************************
diff --git a/arch/ia64/sn/kernel/xp_main.c b/arch/ia64/sn/kernel/xp_main.c
deleted file mode 100644
index b7ea46645e1..00000000000
--- a/arch/ia64/sn/kernel/xp_main.c
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved.
- */
-
-
-/*
- * Cross Partition (XP) base.
- *
- * XP provides a base from which its users can interact
- * with XPC, yet not be dependent on XPC.
- *
- */
-
-
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <asm/sn/intr.h>
-#include <asm/sn/sn_sal.h>
-#include <asm/sn/xp.h>
-
-
-/*
- * Target of nofault PIO read.
- */
-u64 xp_nofault_PIOR_target;
-
-
-/*
- * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level
- * users of XPC.
- */
-struct xpc_registration xpc_registrations[XPC_NCHANNELS];
-
-
-/*
- * Initialize the XPC interface to indicate that XPC isn't loaded.
- */
-static enum xpc_retval xpc_notloaded(void) { return xpcNotLoaded; }
-
-struct xpc_interface xpc_interface = {
- (void (*)(int)) xpc_notloaded,
- (void (*)(int)) xpc_notloaded,
- (enum xpc_retval (*)(partid_t, int, u32, void **)) xpc_notloaded,
- (enum xpc_retval (*)(partid_t, int, void *)) xpc_notloaded,
- (enum xpc_retval (*)(partid_t, int, void *, xpc_notify_func, void *))
- xpc_notloaded,
- (void (*)(partid_t, int, void *)) xpc_notloaded,
- (enum xpc_retval (*)(partid_t, void *)) xpc_notloaded
-};
-
-
-/*
- * XPC calls this when it (the XPC module) has been loaded.
- */
-void
-xpc_set_interface(void (*connect)(int),
- void (*disconnect)(int),
- enum xpc_retval (*allocate)(partid_t, int, u32, void **),
- enum xpc_retval (*send)(partid_t, int, void *),
- enum xpc_retval (*send_notify)(partid_t, int, void *,
- xpc_notify_func, void *),
- void (*received)(partid_t, int, void *),
- enum xpc_retval (*partid_to_nasids)(partid_t, void *))
-{
- xpc_interface.connect = connect;
- xpc_interface.disconnect = disconnect;
- xpc_interface.allocate = allocate;
- xpc_interface.send = send;
- xpc_interface.send_notify = send_notify;
- xpc_interface.received = received;
- xpc_interface.partid_to_nasids = partid_to_nasids;
-}
-
-
-/*
- * XPC calls this when it (the XPC module) is being unloaded.
- */
-void
-xpc_clear_interface(void)
-{
- xpc_interface.connect = (void (*)(int)) xpc_notloaded;
- xpc_interface.disconnect = (void (*)(int)) xpc_notloaded;
- xpc_interface.allocate = (enum xpc_retval (*)(partid_t, int, u32,
- void **)) xpc_notloaded;
- xpc_interface.send = (enum xpc_retval (*)(partid_t, int, void *))
- xpc_notloaded;
- xpc_interface.send_notify = (enum xpc_retval (*)(partid_t, int, void *,
- xpc_notify_func, void *)) xpc_notloaded;
- xpc_interface.received = (void (*)(partid_t, int, void *))
- xpc_notloaded;
- xpc_interface.partid_to_nasids = (enum xpc_retval (*)(partid_t, void *))
- xpc_notloaded;
-}
-
-
-/*
- * Register for automatic establishment of a channel connection whenever
- * a partition comes up.
- *
- * Arguments:
- *
- * ch_number - channel # to register for connection.
- * func - function to call for asynchronous notification of channel
- * state changes (i.e., connection, disconnection, error) and
- * the arrival of incoming messages.
- * key - pointer to optional user-defined value that gets passed back
- * to the user on any callouts made to func.
- * payload_size - size in bytes of the XPC message's payload area which
- * contains a user-defined message. The user should make
- * this large enough to hold their largest message.
- * nentries - max #of XPC message entries a message queue can contain.
- * The actual number, which is determined when a connection
- * is established and may be less then requested, will be
- * passed to the user via the xpcConnected callout.
- * assigned_limit - max number of kthreads allowed to be processing
- * messages (per connection) at any given instant.
- * idle_limit - max number of kthreads allowed to be idle at any given
- * instant.
- */
-enum xpc_retval
-xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
- u16 nentries, u32 assigned_limit, u32 idle_limit)
-{
- struct xpc_registration *registration;
-
-
- DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
- DBUG_ON(payload_size == 0 || nentries == 0);
- DBUG_ON(func == NULL);
- DBUG_ON(assigned_limit == 0 || idle_limit > assigned_limit);
-
- registration = &xpc_registrations[ch_number];
-
- if (mutex_lock_interruptible(&registration->mutex) != 0) {
- return xpcInterrupted;
- }
-
- /* if XPC_CHANNEL_REGISTERED(ch_number) */
- if (registration->func != NULL) {
- mutex_unlock(&registration->mutex);
- return xpcAlreadyRegistered;
- }
-
- /* register the channel for connection */
- registration->msg_size = XPC_MSG_SIZE(payload_size);
- registration->nentries = nentries;
- registration->assigned_limit = assigned_limit;
- registration->idle_limit = idle_limit;
- registration->key = key;
- registration->func = func;
-
- mutex_unlock(&registration->mutex);
-
- xpc_interface.connect(ch_number);
-
- return xpcSuccess;
-}
-
-
-/*
- * Remove the registration for automatic connection of the specified channel
- * when a partition comes up.
- *
- * Before returning this xpc_disconnect() will wait for all connections on the
- * specified channel have been closed/torndown. So the caller can be assured
- * that they will not be receiving any more callouts from XPC to their
- * function registered via xpc_connect().
- *
- * Arguments:
- *
- * ch_number - channel # to unregister.
- */
-void
-xpc_disconnect(int ch_number)
-{
- struct xpc_registration *registration;
-
-
- DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
-
- registration = &xpc_registrations[ch_number];
-
- /*
- * We've decided not to make this a down_interruptible(), since we
- * figured XPC's users will just turn around and call xpc_disconnect()
- * again anyways, so we might as well wait, if need be.
- */
- mutex_lock(&registration->mutex);
-
- /* if !XPC_CHANNEL_REGISTERED(ch_number) */
- if (registration->func == NULL) {
- mutex_unlock(&registration->mutex);
- return;
- }
-
- /* remove the connection registration for the specified channel */
- registration->func = NULL;
- registration->key = NULL;
- registration->nentries = 0;
- registration->msg_size = 0;
- registration->assigned_limit = 0;
- registration->idle_limit = 0;
-
- xpc_interface.disconnect(ch_number);
-
- mutex_unlock(&registration->mutex);
-
- return;
-}
-
-
-int __init
-xp_init(void)
-{
- int ret, ch_number;
- u64 func_addr = *(u64 *) xp_nofault_PIOR;
- u64 err_func_addr = *(u64 *) xp_error_PIOR;
-
-
- if (!ia64_platform_is("sn2")) {
- return -ENODEV;
- }
-
- /*
- * Register a nofault code region which performs a cross-partition
- * PIO read. If the PIO read times out, the MCA handler will consume
- * the error and return to a kernel-provided instruction to indicate
- * an error. This PIO read exists because it is guaranteed to timeout
- * if the destination is down (AMO operations do not timeout on at
- * least some CPUs on Shubs <= v1.2, which unfortunately we have to
- * work around).
- */
- if ((ret = sn_register_nofault_code(func_addr, err_func_addr,
- err_func_addr, 1, 1)) != 0) {
- printk(KERN_ERR "XP: can't register nofault code, error=%d\n",
- ret);
- }
- /*
- * Setup the nofault PIO read target. (There is no special reason why
- * SH_IPI_ACCESS was selected.)
- */
- if (is_shub2()) {
- xp_nofault_PIOR_target = SH2_IPI_ACCESS0;
- } else {
- xp_nofault_PIOR_target = SH1_IPI_ACCESS;
- }
-
- /* initialize the connection registration mutex */
- for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) {
- mutex_init(&xpc_registrations[ch_number].mutex);
- }
-
- return 0;
-}
-module_init(xp_init);
-
-
-void __exit
-xp_exit(void)
-{
- u64 func_addr = *(u64 *) xp_nofault_PIOR;
- u64 err_func_addr = *(u64 *) xp_error_PIOR;
-
-
- /* unregister the PIO read nofault code region */
- (void) sn_register_nofault_code(func_addr, err_func_addr,
- err_func_addr, 1, 0);
-}
-module_exit(xp_exit);
-
-
-MODULE_AUTHOR("Silicon Graphics, Inc.");
-MODULE_DESCRIPTION("Cross Partition (XP) base");
-MODULE_LICENSE("GPL");
-
-EXPORT_SYMBOL(xp_nofault_PIOR);
-EXPORT_SYMBOL(xp_nofault_PIOR_target);
-EXPORT_SYMBOL(xpc_registrations);
-EXPORT_SYMBOL(xpc_interface);
-EXPORT_SYMBOL(xpc_clear_interface);
-EXPORT_SYMBOL(xpc_set_interface);
-EXPORT_SYMBOL(xpc_connect);
-EXPORT_SYMBOL(xpc_disconnect);
-
diff --git a/arch/ia64/sn/kernel/xp_nofault.S b/arch/ia64/sn/kernel/xp_nofault.S
deleted file mode 100644
index b772543053c..00000000000
--- a/arch/ia64/sn/kernel/xp_nofault.S
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved.
- */
-
-
-/*
- * The xp_nofault_PIOR function takes a pointer to a remote PIO register
- * and attempts to load and consume a value from it. This function
- * will be registered as a nofault code block. In the event that the
- * PIO read fails, the MCA handler will force the error to look
- * corrected and vector to the xp_error_PIOR which will return an error.
- *
- * extern int xp_nofault_PIOR(void *remote_register);
- */
-
- .global xp_nofault_PIOR
-xp_nofault_PIOR:
- mov r8=r0 // Stage a success return value
- ld8.acq r9=[r32];; // PIO Read the specified register
- adds r9=1,r9 // Add to force a consume
- br.ret.sptk.many b0;; // Return success
-
- .global xp_error_PIOR
-xp_error_PIOR:
- mov r8=1 // Return value of 1
- br.ret.sptk.many b0;; // Return failure
-
diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c
deleted file mode 100644
index 36e5437a0fb..00000000000
--- a/arch/ia64/sn/kernel/xpc_channel.c
+++ /dev/null
@@ -1,2356 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (c) 2004-2006 Silicon Graphics, Inc. All Rights Reserved.
- */
-
-
-/*
- * Cross Partition Communication (XPC) channel support.
- *
- * This is the part of XPC that manages the channels and
- * sends/receives messages across them to/from other partitions.
- *
- */
-
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/cache.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/completion.h>
-#include <asm/sn/bte.h>
-#include <asm/sn/sn_sal.h>
-#include <asm/sn/xpc.h>
-
-
-/*
- * Set up the initial values for the XPartition Communication channels.
- */
-static void
-xpc_initialize_channels(struct xpc_partition *part, partid_t partid)
-{
- int ch_number;
- struct xpc_channel *ch;
-
-
- for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
- ch = &part->channels[ch_number];
-
- ch->partid = partid;
- ch->number = ch_number;
- ch->flags = XPC_C_DISCONNECTED;
-
- ch->local_GP = &part->local_GPs[ch_number];
- ch->local_openclose_args =
- &part->local_openclose_args[ch_number];
-
- atomic_set(&ch->kthreads_assigned, 0);
- atomic_set(&ch->kthreads_idle, 0);
- atomic_set(&ch->kthreads_active, 0);
-
- atomic_set(&ch->references, 0);
- atomic_set(&ch->n_to_notify, 0);
-
- spin_lock_init(&ch->lock);
- mutex_init(&ch->msg_to_pull_mutex);
- init_completion(&ch->wdisconnect_wait);
-
- atomic_set(&ch->n_on_msg_allocate_wq, 0);
- init_waitqueue_head(&ch->msg_allocate_wq);
- init_waitqueue_head(&ch->idle_wq);
- }
-}
-
-
-/*
- * Setup the infrastructure necessary to support XPartition Communication
- * between the specified remote partition and the local one.
- */
-enum xpc_retval
-xpc_setup_infrastructure(struct xpc_partition *part)
-{
- int ret, cpuid;
- struct timer_list *timer;
- partid_t partid = XPC_PARTID(part);
-
-
- /*
- * Zero out MOST of the entry for this partition. Only the fields
- * starting with `nchannels' will be zeroed. The preceding fields must
- * remain `viable' across partition ups and downs, since they may be
- * referenced during this memset() operation.
- */
- memset(&part->nchannels, 0, sizeof(struct xpc_partition) -
- offsetof(struct xpc_partition, nchannels));
-
- /*
- * Allocate all of the channel structures as a contiguous chunk of
- * memory.
- */
- part->channels = kmalloc(sizeof(struct xpc_channel) * XPC_NCHANNELS,
- GFP_KERNEL);
- if (part->channels == NULL) {
- dev_err(xpc_chan, "can't get memory for channels\n");
- return xpcNoMemory;
- }
- memset(part->channels, 0, sizeof(struct xpc_channel) * XPC_NCHANNELS);
-
- part->nchannels = XPC_NCHANNELS;
-
-
- /* allocate all the required GET/PUT values */
-
- part->local_GPs = xpc_kmalloc_cacheline_aligned(XPC_GP_SIZE,
- GFP_KERNEL, &part->local_GPs_base);
- if (part->local_GPs == NULL) {
- kfree(part->channels);
- part->channels = NULL;
- dev_err(xpc_chan, "can't get memory for local get/put "
- "values\n");
- return xpcNoMemory;
- }
- memset(part->local_GPs, 0, XPC_GP_SIZE);
-
- part->remote_GPs = xpc_kmalloc_cacheline_aligned(XPC_GP_SIZE,
- GFP_KERNEL, &part->remote_GPs_base);
- if (part->remote_GPs == NULL) {
- kfree(part->channels);
- part->channels = NULL;
- kfree(part->local_GPs_base);
- part->local_GPs = NULL;
- dev_err(xpc_chan, "can't get memory for remote get/put "
- "values\n");
- return xpcNoMemory;
- }
- memset(part->remote_GPs, 0, XPC_GP_SIZE);
-
-
- /* allocate all the required open and close args */
-
- part->local_openclose_args = xpc_kmalloc_cacheline_aligned(
- XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
- &part->local_openclose_args_base);
- if (part->local_openclose_args == NULL) {
- kfree(part->channels);
- part->channels = NULL;
- kfree(part->local_GPs_base);
- part->local_GPs = NULL;
- kfree(part->remote_GPs_base);
- part->remote_GPs = NULL;
- dev_err(xpc_chan, "can't get memory for local connect args\n");
- return xpcNoMemory;
- }
- memset(part->local_openclose_args, 0, XPC_OPENCLOSE_ARGS_SIZE);
-
- part->remote_openclose_args = xpc_kmalloc_cacheline_aligned(
- XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
- &part->remote_openclose_args_base);
- if (part->remote_openclose_args == NULL) {
- kfree(part->channels);
- part->channels = NULL;
- kfree(part->local_GPs_base);
- part->local_GPs = NULL;
- kfree(part->remote_GPs_base);
- part->remote_GPs = NULL;
- kfree(part->local_openclose_args_base);
- part->local_openclose_args = NULL;
- dev_err(xpc_chan, "can't get memory for remote connect args\n");
- return xpcNoMemory;
- }
- memset(part->remote_openclose_args, 0, XPC_OPENCLOSE_ARGS_SIZE);
-
-
- xpc_initialize_channels(part, partid);
-
- atomic_set(&part->nchannels_active, 0);
- atomic_set(&part->nchannels_engaged, 0);
-
-
- /* local_IPI_amo were set to 0 by an earlier memset() */
-
- /* Initialize this partitions AMO_t structure */
- part->local_IPI_amo_va = xpc_IPI_init(partid);
-
- spin_lock_init(&part->IPI_lock);
-
- atomic_set(&part->channel_mgr_requests, 1);
- init_waitqueue_head(&part->channel_mgr_wq);
-
- sprintf(part->IPI_owner, "xpc%02d", partid);
- ret = request_irq(SGI_XPC_NOTIFY, xpc_notify_IRQ_handler, SA_SHIRQ,
- part->IPI_owner, (void *) (u64) partid);
- if (ret != 0) {
- kfree(part->channels);
- part->channels = NULL;
- kfree(part->local_GPs_base);
- part->local_GPs = NULL;
- kfree(part->remote_GPs_base);
- part->remote_GPs = NULL;
- kfree(part->local_openclose_args_base);
- part->local_openclose_args = NULL;
- kfree(part->remote_openclose_args_base);
- part->remote_openclose_args = NULL;
- dev_err(xpc_chan, "can't register NOTIFY IRQ handler, "
- "errno=%d\n", -ret);
- return xpcLackOfResources;
- }
-
- /* Setup a timer to check for dropped IPIs */
- timer = &part->dropped_IPI_timer;
- init_timer(timer);
- timer->function = (void (*)(unsigned long)) xpc_dropped_IPI_check;
- timer->data = (unsigned long) part;
- timer->expires = jiffies + XPC_P_DROPPED_IPI_WAIT;
- add_timer(timer);
-
- /*
- * With the setting of the partition setup_state to XPC_P_SETUP, we're
- * declaring that this partition is ready to go.
- */
- part->setup_state = XPC_P_SETUP;
-
-
- /*
- * Setup the per partition specific variables required by the
- * remote partition to establish channel connections with us.
- *
- * The setting of the magic # indicates that these per partition
- * specific variables are ready to be used.
- */
- xpc_vars_part[partid].GPs_pa = __pa(part->local_GPs);
- xpc_vars_part[partid].openclose_args_pa =
- __pa(part->local_openclose_args);
- xpc_vars_part[partid].IPI_amo_pa = __pa(part->local_IPI_amo_va);
- cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */
- xpc_vars_part[partid].IPI_nasid = cpuid_to_nasid(cpuid);
- xpc_vars_part[partid].IPI_phys_cpuid = cpu_physical_id(cpuid);
- xpc_vars_part[partid].nchannels = part->nchannels;
- xpc_vars_part[partid].magic = XPC_VP_MAGIC1;
-
- return xpcSuccess;
-}
-
-
-/*
- * Create a wrapper that hides the underlying mechanism for pulling a cacheline
- * (or multiple cachelines) from a remote partition.
- *
- * src must be a cacheline aligned physical address on the remote partition.
- * dst must be a cacheline aligned virtual address on this partition.
- * cnt must be an cacheline sized
- */
-static enum xpc_retval
-xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst,
- const void *src, size_t cnt)
-{
- bte_result_t bte_ret;
-
-
- DBUG_ON((u64) src != L1_CACHE_ALIGN((u64) src));
- DBUG_ON((u64) dst != L1_CACHE_ALIGN((u64) dst));
- DBUG_ON(cnt != L1_CACHE_ALIGN(cnt));
-
- if (part->act_state == XPC_P_DEACTIVATING) {
- return part->reason;
- }
-
- bte_ret = xp_bte_copy((u64) src, (u64) ia64_tpa((u64) dst),
- (u64) cnt, (BTE_NORMAL | BTE_WACQUIRE), NULL);
- if (bte_ret == BTE_SUCCESS) {
- return xpcSuccess;
- }
-
- dev_dbg(xpc_chan, "xp_bte_copy() from partition %d failed, ret=%d\n",
- XPC_PARTID(part), bte_ret);
-
- return xpc_map_bte_errors(bte_ret);
-}
-
-
-/*
- * Pull the remote per partititon specific variables from the specified
- * partition.
- */
-enum xpc_retval
-xpc_pull_remote_vars_part(struct xpc_partition *part)
-{
- u8 buffer[L1_CACHE_BYTES * 2];
- struct xpc_vars_part *pulled_entry_cacheline =
- (struct xpc_vars_part *) L1_CACHE_ALIGN((u64) buffer);
- struct xpc_vars_part *pulled_entry;
- u64 remote_entry_cacheline_pa, remote_entry_pa;
- partid_t partid = XPC_PARTID(part);
- enum xpc_retval ret;
-
-
- /* pull the cacheline that contains the variables we're interested in */
-
- DBUG_ON(part->remote_vars_part_pa !=
- L1_CACHE_ALIGN(part->remote_vars_part_pa));
- DBUG_ON(sizeof(struct xpc_vars_part) != L1_CACHE_BYTES / 2);
-
- remote_entry_pa = part->remote_vars_part_pa +
- sn_partition_id * sizeof(struct xpc_vars_part);
-
- remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1));
-
- pulled_entry = (struct xpc_vars_part *) ((u64) pulled_entry_cacheline +
- (remote_entry_pa & (L1_CACHE_BYTES - 1)));
-
- ret = xpc_pull_remote_cachelines(part, pulled_entry_cacheline,
- (void *) remote_entry_cacheline_pa,
- L1_CACHE_BYTES);
- if (ret != xpcSuccess) {
- dev_dbg(xpc_chan, "failed to pull XPC vars_part from "
- "partition %d, ret=%d\n", partid, ret);
- return ret;
- }
-
-
- /* see if they've been set up yet */
-
- if (pulled_entry->magic != XPC_VP_MAGIC1 &&
- pulled_entry->magic != XPC_VP_MAGIC2) {
-
- if (pulled_entry->magic != 0) {
- dev_dbg(xpc_chan, "partition %d's XPC vars_part for "
- "partition %d has bad magic value (=0x%lx)\n",
- partid, sn_partition_id, pulled_entry->magic);
- return xpcBadMagic;
- }
-
- /* they've not been initialized yet */
- return xpcRetry;
- }
-
- if (xpc_vars_part[partid].magic == XPC_VP_MAGIC1) {
-
- /* validate the variables */
-
- if (pulled_entry->GPs_pa == 0 ||
- pulled_entry->openclose_args_pa == 0 ||
- pulled_entry->IPI_amo_pa == 0) {
-
- dev_err(xpc_chan, "partition %d's XPC vars_part for "
- "partition %d are not valid\n", partid,
- sn_partition_id);
- return xpcInvalidAddress;
- }
-
- /* the variables we imported look to be valid */
-
- part->remote_GPs_pa = pulled_entry->GPs_pa;
- part->remote_openclose_args_pa =
- pulled_entry->openclose_args_pa;
- part->remote_IPI_amo_va =
- (AMO_t *) __va(pulled_entry->IPI_amo_pa);
- part->remote_IPI_nasid = pulled_entry->IPI_nasid;
- part->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid;
-
- if (part->nchannels > pulled_entry->nchannels) {
- part->nchannels = pulled_entry->nchannels;
- }
-
- /* let the other side know that we've pulled their variables */
-
- xpc_vars_part[partid].magic = XPC_VP_MAGIC2;
- }
-
- if (pulled_entry->magic == XPC_VP_MAGIC1) {
- return xpcRetry;
- }
-
- return xpcSuccess;
-}
-
-
-/*
- * Get the IPI flags and pull the openclose args and/or remote GPs as needed.
- */
-static u64
-xpc_get_IPI_flags(struct xpc_partition *part)
-{
- unsigned long irq_flags;
- u64 IPI_amo;
- enum xpc_retval ret;
-
-
- /*
- * See if there are any IPI flags to be handled.
- */
-
- spin_lock_irqsave(&part->IPI_lock, irq_flags);
- if ((IPI_amo = part->local_IPI_amo) != 0) {
- part->local_IPI_amo = 0;
- }
- spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
-
-
- if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) {
- ret = xpc_pull_remote_cachelines(part,
- part->remote_openclose_args,
- (void *) part->remote_openclose_args_pa,
- XPC_OPENCLOSE_ARGS_SIZE);
- if (ret != xpcSuccess) {
- XPC_DEACTIVATE_PARTITION(part, ret);
-
- dev_dbg(xpc_chan, "failed to pull openclose args from "
- "partition %d, ret=%d\n", XPC_PARTID(part),
- ret);
-
- /* don't bother processing IPIs anymore */
- IPI_amo = 0;
- }
- }
-
- if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_amo)) {
- ret = xpc_pull_remote_cachelines(part, part->remote_GPs,
- (void *) part->remote_GPs_pa,
- XPC_GP_SIZE);
- if (ret != xpcSuccess) {
- XPC_DEACTIVATE_PARTITION(part, ret);
-
- dev_dbg(xpc_chan, "failed to pull GPs from partition "
- "%d, ret=%d\n", XPC_PARTID(part), ret);
-
- /* don't bother processing IPIs anymore */
- IPI_amo = 0;
- }
- }
-
- return IPI_amo;
-}
-
-
-/*
- * Allocate the local message queue and the notify queue.
- */
-static enum xpc_retval
-xpc_allocate_local_msgqueue(struct xpc_channel *ch)
-{
- unsigned long irq_flags;
- int nentries;
- size_t nbytes;
-
-
- // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
- // >>> iterations of the for-loop, bail if set?
-
- // >>> should we impose a minumum #of entries? like 4 or 8?
- for (nentries = ch->local_nentries; nentries > 0; nentries--) {
-
- nbytes = nentries * ch->msg_size;
- ch->local_msgqueue = xpc_kmalloc_cacheline_aligned(nbytes,
- GFP_KERNEL,
- &ch->local_msgqueue_base);
- if (ch->local_msgqueue == NULL) {
- continue;
- }
- memset(ch->local_msgqueue, 0, nbytes);
-
- nbytes = nentries * sizeof(struct xpc_notify);
- ch->notify_queue = kmalloc(nbytes, GFP_KERNEL);
- if (ch->notify_queue == NULL) {
- kfree(ch->local_msgqueue_base);
- ch->local_msgqueue = NULL;
- continue;
- }
- memset(ch->notify_queue, 0, nbytes);
-
- spin_lock_irqsave(&ch->lock, irq_flags);
- if (nentries < ch->local_nentries) {
- dev_dbg(xpc_chan, "nentries=%d local_nentries=%d, "
- "partid=%d, channel=%d\n", nentries,
- ch->local_nentries, ch->partid, ch->number);
-
- ch->local_nentries = nentries;
- }
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return xpcSuccess;
- }
-
- dev_dbg(xpc_chan, "can't get memory for local message queue and notify "
- "queue, partid=%d, channel=%d\n", ch->partid, ch->number);
- return xpcNoMemory;
-}
-
-
-/*
- * Allocate the cached remote message queue.
- */
-static enum xpc_retval
-xpc_allocate_remote_msgqueue(struct xpc_channel *ch)
-{
- unsigned long irq_flags;
- int nentries;
- size_t nbytes;
-
-
- DBUG_ON(ch->remote_nentries <= 0);
-
- // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
- // >>> iterations of the for-loop, bail if set?
-
- // >>> should we impose a minumum #of entries? like 4 or 8?
- for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
-
- nbytes = nentries * ch->msg_size;
- ch->remote_msgqueue = xpc_kmalloc_cacheline_aligned(nbytes,
- GFP_KERNEL,
- &ch->remote_msgqueue_base);
- if (ch->remote_msgqueue == NULL) {
- continue;
- }
- memset(ch->remote_msgqueue, 0, nbytes);
-
- spin_lock_irqsave(&ch->lock, irq_flags);
- if (nentries < ch->remote_nentries) {
- dev_dbg(xpc_chan, "nentries=%d remote_nentries=%d, "
- "partid=%d, channel=%d\n", nentries,
- ch->remote_nentries, ch->partid, ch->number);
-
- ch->remote_nentries = nentries;
- }
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return xpcSuccess;
- }
-
- dev_dbg(xpc_chan, "can't get memory for cached remote message queue, "
- "partid=%d, channel=%d\n", ch->partid, ch->number);
- return xpcNoMemory;
-}
-
-
-/*
- * Allocate message queues and other stuff associated with a channel.
- *
- * Note: Assumes all of the channel sizes are filled in.
- */
-static enum xpc_retval
-xpc_allocate_msgqueues(struct xpc_channel *ch)
-{
- unsigned long irq_flags;
- enum xpc_retval ret;
-
-
- DBUG_ON(ch->flags & XPC_C_SETUP);
-
- if ((ret = xpc_allocate_local_msgqueue(ch)) != xpcSuccess) {
- return ret;
- }
-
- if ((ret = xpc_allocate_remote_msgqueue(ch)) != xpcSuccess) {
- kfree(ch->local_msgqueue_base);
- ch->local_msgqueue = NULL;
- kfree(ch->notify_queue);
- ch->notify_queue = NULL;
- return ret;
- }
-
- spin_lock_irqsave(&ch->lock, irq_flags);
- ch->flags |= XPC_C_SETUP;
- spin_unlock_irqrestore(&ch->lock, irq_flags);
-
- return xpcSuccess;
-}
-
-
-/*
- * Process a connect message from a remote partition.
- *
- * Note: xpc_process_connect() is expecting to be called with the
- * spin_lock_irqsave held and will leave it locked upon return.
- */
-static void
-xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
-{
- enum xpc_retval ret;
-
-
- DBUG_ON(!spin_is_locked(&ch->lock));
-
- if (!(ch->flags & XPC_C_OPENREQUEST) ||
- !(ch->flags & XPC_C_ROPENREQUEST)) {
- /* nothing more to do for now */
- return;
- }
- DBUG_ON(!(ch->flags & XPC_C_CONNECTING));
-
- if (!(ch->flags & XPC_C_SETUP)) {
- spin_unlock_irqrestore(&ch->lock, *irq_flags);
- ret = xpc_allocate_msgqueues(ch);
- spin_lock_irqsave(&ch->lock, *irq_flags);
-
- if (ret != xpcSuccess) {
- XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags);
- }
- if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) {
- return;
- }
-
- DBUG_ON(!(ch->flags & XPC_C_SETUP));
- DBUG_ON(ch->local_msgqueue == NULL);
- DBUG_ON(ch->remote_msgqueue == NULL);
- }
-
- if (!(ch->flags & XPC_C_OPENREPLY)) {
- ch->flags |= XPC_C_OPENREPLY;
- xpc_IPI_send_openreply(ch, irq_flags);
- }
-
- if (!(ch->flags & XPC_C_ROPENREPLY)) {
- return;
- }
-
- DBUG_ON(ch->remote_msgqueue_pa == 0);
-
- ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */
-
- dev_info(xpc_chan, "channel %d to partition %d connected\n",
- ch->number, ch->partid);
-
- spin_unlock_irqrestore(&ch->lock, *irq_flags);
- xpc_create_kthreads(ch, 1);
- spin_lock_irqsave(&ch->lock, *irq_flags);
-}
-
-
-/*
- * Notify those who wanted to be notified upon delivery of their message.
- */
-static void
-xpc_notify_senders(struct xpc_channel *ch, enum xpc_retval reason, s64 put)
-{
- struct xpc_notify *notify;
- u8 notify_type;
- s64 get = ch->w_remote_GP.get - 1;
-
-
- while (++get < put && atomic_read(&ch->n_to_notify) > 0) {
-
- notify = &ch->notify_queue[get % ch->local_nentries];
-
- /*
- * See if the notify entry indicates it was associated with
- * a message who's sender wants to be notified. It is possible
- * that it is, but someone else is doing or has done the
- * notification.
- */
- notify_type = notify->type;
- if (notify_type == 0 ||
- cmpxchg(&notify->type, notify_type, 0) !=
- notify_type) {
- continue;
- }
-
- DBUG_ON(notify_type != XPC_N_CALL);
-
- atomic_dec(&ch->n_to_notify);
-
- if (notify->func != NULL) {
- dev_dbg(xpc_chan, "notify->func() called, notify=0x%p, "
- "msg_number=%ld, partid=%d, channel=%d\n",
- (void *) notify, get, ch->partid, ch->number);
-
- notify->func(reason, ch->partid, ch->number,
- notify->key);
-
- dev_dbg(xpc_chan, "notify->func() returned, "
- "notify=0x%p, msg_number=%ld, partid=%d, "
- "channel=%d\n", (void *) notify, get,
- ch->partid, ch->number);
- }
- }
-}
-
-
-/*
- * Free up message queues and other stuff that were allocated for the specified
- * channel.
- *
- * Note: ch->reason and ch->reason_line are left set for debugging purposes,
- * they're cleared when XPC_C_DISCONNECTED is cleared.
- */
-static void
-xpc_free_msgqueues(struct xpc_channel *ch)
-{
- DBUG_ON(!spin_is_locked(&ch->lock));
- DBUG_ON(atomic_read(&ch->n_to_notify) != 0);
-
- ch->remote_msgqueue_pa = 0;
- ch->func = NULL;
- ch->key = NULL;
- ch->msg_size = 0;
- ch->local_nentries = 0;
- ch->remote_nentries = 0;
- ch->kthreads_assigned_limit = 0;
- ch->kthreads_idle_limit = 0;
-
- ch->local_GP->get = 0;
- ch->local_GP->put = 0;
- ch->remote_GP.get = 0;
- ch->remote_GP.put = 0;
- ch->w_local_GP.get = 0;
- ch->w_local_GP.put = 0;
- ch->w_remote_GP.get = 0;
- ch->w_remote_GP.put = 0;
- ch->next_msg_to_pull = 0;
-
- if (ch->flags & XPC_C_SETUP) {
- ch->flags &= ~XPC_C_SETUP;
-
- dev_dbg(xpc_chan, "ch->flags=0x%x, partid=%d, channel=%d\n",
- ch->flags, ch->partid, ch->number);
-
- kfree(ch->local_msgqueue_base);
- ch->local_msgqueue = NULL;
- kfree(ch->remote_msgqueue_base);
- ch->remote_msgqueue = NULL;
- kfree(ch->notify_queue);
- ch->notify_queue = NULL;
- }
-}
-
-
-/*
- * spin_lock_irqsave() is expected to be held on entry.
- */
-static void
-xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
-{
- struct xpc_partition *part = &xpc_partitions[ch->partid];
- u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED);
-
-
- DBUG_ON(!spin_is_locked(&ch->lock));
-
- if (!(ch->flags & XPC_C_DISCONNECTING)) {
- return;
- }
-
- DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
-
- /* make sure all activity has settled down first */
-
- if (atomic_read(&ch->references) > 0) {
- return;
- }
- DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0);
-
- if (part->act_state == XPC_P_DEACTIVATING) {
- /* can't proceed until the other side disengages from us */
- if (xpc_partition_engaged(1UL << ch->partid)) {
- return;
- }
-
- } else {
-
- /* as long as the other side is up do the full protocol */
-
- if (!(ch->flags & XPC_C_RCLOSEREQUEST)) {
- return;
- }
-
- if (!(ch->flags & XPC_C_CLOSEREPLY)) {
- ch->flags |= XPC_C_CLOSEREPLY;
- xpc_IPI_send_closereply(ch, irq_flags);
- }
-
- if (!(ch->flags & XPC_C_RCLOSEREPLY)) {
- return;
- }
- }
-
- /* wake those waiting for notify completion */
- if (atomic_read(&ch->n_to_notify) > 0) {
- /* >>> we do callout while holding ch->lock */
- xpc_notify_senders(ch, ch->reason, ch->w_local_GP.put);
- }
-
- /* both sides are disconnected now */
-
- if (ch->flags & XPC_C_CONNECTCALLOUT) {
- spin_unlock_irqrestore(&ch->lock, *irq_flags);
- xpc_disconnect_callout(ch, xpcDisconnected);
- spin_lock_irqsave(&ch->lock, *irq_flags);
- }
-
- /* it's now safe to free the channel's message queues */
- xpc_free_msgqueues(ch);
-
- /* mark disconnected, clear all other flags except XPC_C_WDISCONNECT */
- ch->flags = (XPC_C_DISCONNECTED | (ch->flags & XPC_C_WDISCONNECT));
-
- atomic_dec(&part->nchannels_active);
-
- if (channel_was_connected) {
- dev_info(xpc_chan, "channel %d to partition %d disconnected, "
- "reason=%d\n", ch->number, ch->partid, ch->reason);
- }
-
- if (ch->flags & XPC_C_WDISCONNECT) {
- /* we won't lose the CPU since we're holding ch->lock */
- complete(&ch->wdisconnect_wait);
- } else if (ch->delayed_IPI_flags) {
- if (part->act_state != XPC_P_DEACTIVATING) {
- /* time to take action on any delayed IPI flags */
- spin_lock(&part->IPI_lock);
- XPC_SET_IPI_FLAGS(part->local_IPI_amo, ch->number,
- ch->delayed_IPI_flags);
- spin_unlock(&part->IPI_lock);
- }
- ch->delayed_IPI_flags = 0;
- }
-}
-
-
-/*
- * Process a change in the channel's remote connection state.
- */
-static void
-xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number,
- u8 IPI_flags)
-{
- unsigned long irq_flags;
- struct xpc_openclose_args *args =
- &part->remote_openclose_args[ch_number];
- struct xpc_channel *ch = &part->channels[ch_number];
- enum xpc_retval reason;
-
-
-
- spin_lock_irqsave(&ch->lock, irq_flags);
-
-again:
-
- if ((ch->flags & XPC_C_DISCONNECTED) &&
- (ch->flags & XPC_C_WDISCONNECT)) {
- /*
- * Delay processing IPI flags until thread waiting disconnect
- * has had a chance to see that the channel is disconnected.
- */
- ch->delayed_IPI_flags |= IPI_flags;
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return;
- }
-
-
- if (IPI_flags & XPC_IPI_CLOSEREQUEST) {
-
- dev_dbg(xpc_chan, "XPC_IPI_CLOSEREQUEST (reason=%d) received "
- "from partid=%d, channel=%d\n", args->reason,
- ch->partid, ch->number);
-
- /*
- * If RCLOSEREQUEST is set, we're probably waiting for
- * RCLOSEREPLY. We should find it and a ROPENREQUEST packed
- * with this RCLOSEREQUEST in the IPI_flags.
- */
-
- if (ch->flags & XPC_C_RCLOSEREQUEST) {
- DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING));
- DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
- DBUG_ON(!(ch->flags & XPC_C_CLOSEREPLY));
- DBUG_ON(ch->flags & XPC_C_RCLOSEREPLY);
-
- DBUG_ON(!(IPI_flags & XPC_IPI_CLOSEREPLY));
- IPI_flags &= ~XPC_IPI_CLOSEREPLY;
- ch->flags |= XPC_C_RCLOSEREPLY;
-
- /* both sides have finished disconnecting */
- xpc_process_disconnect(ch, &irq_flags);
- DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
- goto again;
- }
-
- if (ch->flags & XPC_C_DISCONNECTED) {
- if (!(IPI_flags & XPC_IPI_OPENREQUEST)) {
- if ((XPC_GET_IPI_FLAGS(part->local_IPI_amo,
- ch_number) & XPC_IPI_OPENREQUEST)) {
-
- DBUG_ON(ch->delayed_IPI_flags != 0);
- spin_lock(&part->IPI_lock);
- XPC_SET_IPI_FLAGS(part->local_IPI_amo,
- ch_number,
- XPC_IPI_CLOSEREQUEST);
- spin_unlock(&part->IPI_lock);
- }
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return;
- }
-
- XPC_SET_REASON(ch, 0, 0);
- ch->flags &= ~XPC_C_DISCONNECTED;
-
- atomic_inc(&part->nchannels_active);
- ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST);
- }
-
- IPI_flags &= ~(XPC_IPI_OPENREQUEST | XPC_IPI_OPENREPLY);
-
- /*
- * The meaningful CLOSEREQUEST connection state fields are:
- * reason = reason connection is to be closed
- */
-
- ch->flags |= XPC_C_RCLOSEREQUEST;
-
- if (!(ch->flags & XPC_C_DISCONNECTING)) {
- reason = args->reason;
- if (reason <= xpcSuccess || reason > xpcUnknownReason) {
- reason = xpcUnknownReason;
- } else if (reason == xpcUnregistering) {
- reason = xpcOtherUnregistering;
- }
-
- XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
-
- DBUG_ON(IPI_flags & XPC_IPI_CLOSEREPLY);
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return;
- }
-
- xpc_process_disconnect(ch, &irq_flags);
- }
-
-
- if (IPI_flags & XPC_IPI_CLOSEREPLY) {
-
- dev_dbg(xpc_chan, "XPC_IPI_CLOSEREPLY received from partid=%d,"
- " channel=%d\n", ch->partid, ch->number);
-
- if (ch->flags & XPC_C_DISCONNECTED) {
- DBUG_ON(part->act_state != XPC_P_DEACTIVATING);
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return;
- }
-
- DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
-
- if (!(ch->flags & XPC_C_RCLOSEREQUEST)) {
- if ((XPC_GET_IPI_FLAGS(part->local_IPI_amo, ch_number)
- & XPC_IPI_CLOSEREQUEST)) {
-
- DBUG_ON(ch->delayed_IPI_flags != 0);
- spin_lock(&part->IPI_lock);
- XPC_SET_IPI_FLAGS(part->local_IPI_amo,
- ch_number, XPC_IPI_CLOSEREPLY);
- spin_unlock(&part->IPI_lock);
- }
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return;
- }
-
- ch->flags |= XPC_C_RCLOSEREPLY;
-
- if (ch->flags & XPC_C_CLOSEREPLY) {
- /* both sides have finished disconnecting */
- xpc_process_disconnect(ch, &irq_flags);
- }
- }
-
-
- if (IPI_flags & XPC_IPI_OPENREQUEST) {
-
- dev_dbg(xpc_chan, "XPC_IPI_OPENREQUEST (msg_size=%d, "
- "local_nentries=%d) received from partid=%d, "
- "channel=%d\n", args->msg_size, args->local_nentries,
- ch->partid, ch->number);
-
- if (part->act_state == XPC_P_DEACTIVATING ||
- (ch->flags & XPC_C_ROPENREQUEST)) {
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return;
- }
-
- if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_WDISCONNECT)) {
- ch->delayed_IPI_flags |= XPC_IPI_OPENREQUEST;
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return;
- }
- DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED |
- XPC_C_OPENREQUEST)));
- DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
- XPC_C_OPENREPLY | XPC_C_CONNECTED));
-
- /*
- * The meaningful OPENREQUEST connection state fields are:
- * msg_size = size of channel's messages in bytes
- * local_nentries = remote partition's local_nentries
- */
- if (args->msg_size == 0 || args->local_nentries == 0) {
- /* assume OPENREQUEST was delayed by mistake */
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return;
- }
-
- ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING);
- ch->remote_nentries = args->local_nentries;
-
-
- if (ch->flags & XPC_C_OPENREQUEST) {
- if (args->msg_size != ch->msg_size) {
- XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes,
- &irq_flags);
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return;
- }
- } else {
- ch->msg_size = args->msg_size;
-
- XPC_SET_REASON(ch, 0, 0);
- ch->flags &= ~XPC_C_DISCONNECTED;
-
- atomic_inc(&part->nchannels_active);
- }
-
- xpc_process_connect(ch, &irq_flags);
- }
-
-
- if (IPI_flags & XPC_IPI_OPENREPLY) {
-
- dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY (local_msgqueue_pa=0x%lx, "
- "local_nentries=%d, remote_nentries=%d) received from "
- "partid=%d, channel=%d\n", args->local_msgqueue_pa,
- args->local_nentries, args->remote_nentries,
- ch->partid, ch->number);
-
- if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) {
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return;
- }
- if (!(ch->flags & XPC_C_OPENREQUEST)) {
- XPC_DISCONNECT_CHANNEL(ch, xpcOpenCloseError,
- &irq_flags);
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return;
- }
-
- DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST));
- DBUG_ON(ch->flags & XPC_C_CONNECTED);
-
- /*
- * The meaningful OPENREPLY connection state fields are:
- * local_msgqueue_pa = physical address of remote
- * partition's local_msgqueue
- * local_nentries = remote partition's local_nentries
- * remote_nentries = remote partition's remote_nentries
- */
- DBUG_ON(args->local_msgqueue_pa == 0);
- DBUG_ON(args->local_nentries == 0);
- DBUG_ON(args->remote_nentries == 0);
-
- ch->flags |= XPC_C_ROPENREPLY;
- ch->remote_msgqueue_pa = args->local_msgqueue_pa;
-
- if (args->local_nentries < ch->remote_nentries) {
- dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY: new "
- "remote_nentries=%d, old remote_nentries=%d, "
- "partid=%d, channel=%d\n",
- args->local_nentries, ch->remote_nentries,
- ch->partid, ch->number);
-
- ch->remote_nentries = args->local_nentries;
- }
- if (args->remote_nentries < ch->local_nentries) {
- dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY: new "
- "local_nentries=%d, old local_nentries=%d, "
- "partid=%d, channel=%d\n",
- args->remote_nentries, ch->local_nentries,
- ch->partid, ch->number);
-
- ch->local_nentries = args->remote_nentries;
- }
-
- xpc_process_connect(ch, &irq_flags);
- }
-
- spin_unlock_irqrestore(&ch->lock, irq_flags);
-}
-
-
-/*
- * Attempt to establish a channel connection to a remote partition.
- */
-static enum xpc_retval
-xpc_connect_channel(struct xpc_channel *ch)
-{
- unsigned long irq_flags;
- struct xpc_registration *registration = &xpc_registrations[ch->number];
-
-
- if (mutex_trylock(&registration->mutex) == 0) {
- return xpcRetry;
- }
-
- if (!XPC_CHANNEL_REGISTERED(ch->number)) {
- mutex_unlock(&registration->mutex);
- return xpcUnregistered;
- }
-
- spin_lock_irqsave(&ch->lock, irq_flags);
-
- DBUG_ON(ch->flags & XPC_C_CONNECTED);
- DBUG_ON(ch->flags & XPC_C_OPENREQUEST);
-
- if (ch->flags & XPC_C_DISCONNECTING) {
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- mutex_unlock(&registration->mutex);
- return ch->reason;
- }
-
-
- /* add info from the channel connect registration to the channel */
-
- ch->kthreads_assigned_limit = registration->assigned_limit;
- ch->kthreads_idle_limit = registration->idle_limit;
- DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0);
- DBUG_ON(atomic_read(&ch->kthreads_idle) != 0);
- DBUG_ON(atomic_read(&ch->kthreads_active) != 0);
-
- ch->func = registration->func;
- DBUG_ON(registration->func == NULL);
- ch->key = registration->key;
-
- ch->local_nentries = registration->nentries;
-
- if (ch->flags & XPC_C_ROPENREQUEST) {
- if (registration->msg_size != ch->msg_size) {
- /* the local and remote sides aren't the same */
-
- /*
- * Because XPC_DISCONNECT_CHANNEL() can block we're
- * forced to up the registration sema before we unlock
- * the channel lock. But that's okay here because we're
- * done with the part that required the registration
- * sema. XPC_DISCONNECT_CHANNEL() requires that the
- * channel lock be locked and will unlock and relock
- * the channel lock as needed.
- */
- mutex_unlock(&registration->mutex);
- XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes,
- &irq_flags);
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return xpcUnequalMsgSizes;
- }
- } else {
- ch->msg_size = registration->msg_size;
-
- XPC_SET_REASON(ch, 0, 0);
- ch->flags &= ~XPC_C_DISCONNECTED;
-
- atomic_inc(&xpc_partitions[ch->partid].nchannels_active);
- }
-
- mutex_unlock(&registration->mutex);
-
-
- /* initiate the connection */
-
- ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING);
- xpc_IPI_send_openrequest(ch, &irq_flags);
-
- xpc_process_connect(ch, &irq_flags);
-
- spin_unlock_irqrestore(&ch->lock, irq_flags);
-
- return xpcSuccess;
-}
-
-
-/*
- * Clear some of the msg flags in the local message queue.
- */
-static inline void
-xpc_clear_local_msgqueue_flags(struct xpc_channel *ch)
-{
- struct xpc_msg *msg;
- s64 get;
-
-
- get = ch->w_remote_GP.get;
- do {
- msg = (struct xpc_msg *) ((u64) ch->local_msgqueue +
- (get % ch->local_nentries) * ch->msg_size);
- msg->flags = 0;
- } while (++get < (volatile s64) ch->remote_GP.get);
-}
-
-
-/*
- * Clear some of the msg flags in the remote message queue.
- */
-static inline void
-xpc_clear_remote_msgqueue_flags(struct xpc_channel *ch)
-{
- struct xpc_msg *msg;
- s64 put;
-
-
- put = ch->w_remote_GP.put;
- do {
- msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue +
- (put % ch->remote_nentries) * ch->msg_size);
- msg->flags = 0;
- } while (++put < (volatile s64) ch->remote_GP.put);
-}
-
-
-static void
-xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
-{
- struct xpc_channel *ch = &part->channels[ch_number];
- int nmsgs_sent;
-
-
- ch->remote_GP = part->remote_GPs[ch_number];
-
-
- /* See what, if anything, has changed for each connected channel */
-
- xpc_msgqueue_ref(ch);
-
- if (ch->w_remote_GP.get == ch->remote_GP.get &&
- ch->w_remote_GP.put == ch->remote_GP.put) {
- /* nothing changed since GPs were last pulled */
- xpc_msgqueue_deref(ch);
- return;
- }
-
- if (!(ch->flags & XPC_C_CONNECTED)){
- xpc_msgqueue_deref(ch);
- return;
- }
-
-
- /*
- * First check to see if messages recently sent by us have been
- * received by the other side. (The remote GET value will have
- * changed since we last looked at it.)
- */
-
- if (ch->w_remote_GP.get != ch->remote_GP.get) {
-
- /*
- * We need to notify any senders that want to be notified
- * that their sent messages have been received by their
- * intended recipients. We need to do this before updating
- * w_remote_GP.get so that we don't allocate the same message
- * queue entries prematurely (see xpc_allocate_msg()).
- */
- if (atomic_read(&ch->n_to_notify) > 0) {
- /*
- * Notify senders that messages sent have been
- * received and delivered by the other side.
- */
- xpc_notify_senders(ch, xpcMsgDelivered,
- ch->remote_GP.get);
- }
-
- /*
- * Clear msg->flags in previously sent messages, so that
- * they're ready for xpc_allocate_msg().
- */
- xpc_clear_local_msgqueue_flags(ch);
-
- ch->w_remote_GP.get = ch->remote_GP.get;
-
- dev_dbg(xpc_chan, "w_remote_GP.get changed to %ld, partid=%d, "
- "channel=%d\n", ch->w_remote_GP.get, ch->partid,
- ch->number);
-
- /*
- * If anyone was waiting for message queue entries to become
- * available, wake them up.
- */
- if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) {
- wake_up(&ch->msg_allocate_wq);
- }
- }
-
-
- /*
- * Now check for newly sent messages by the other side. (The remote
- * PUT value will have changed since we last looked at it.)
- */
-
- if (ch->w_remote_GP.put != ch->remote_GP.put) {
- /*
- * Clear msg->flags in previously received messages, so that
- * they're ready for xpc_get_deliverable_msg().
- */
- xpc_clear_remote_msgqueue_flags(ch);
-
- ch->w_remote_GP.put = ch->remote_GP.put;
-
- dev_dbg(xpc_chan, "w_remote_GP.put changed to %ld, partid=%d, "
- "channel=%d\n", ch->w_remote_GP.put, ch->partid,
- ch->number);
-
- nmsgs_sent = ch->w_remote_GP.put - ch->w_local_GP.get;
- if (nmsgs_sent > 0) {
- dev_dbg(xpc_chan, "msgs waiting to be copied and "
- "delivered=%d, partid=%d, channel=%d\n",
- nmsgs_sent, ch->partid, ch->number);
-
- if (ch->flags & XPC_C_CONNECTCALLOUT) {
- xpc_activate_kthreads(ch, nmsgs_sent);
- }
- }
- }
-
- xpc_msgqueue_deref(ch);
-}
-
-
-void
-xpc_process_channel_activity(struct xpc_partition *part)
-{
- unsigned long irq_flags;
- u64 IPI_amo, IPI_flags;
- struct xpc_channel *ch;
- int ch_number;
- u32 ch_flags;
-
-
- IPI_amo = xpc_get_IPI_flags(part);
-
- /*
- * Initiate channel connections for registered channels.
- *
- * For each connected channel that has pending messages activate idle
- * kthreads and/or create new kthreads as needed.
- */
-
- for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
- ch = &part->channels[ch_number];
-
-
- /*
- * Process any open or close related IPI flags, and then deal
- * with connecting or disconnecting the channel as required.
- */
-
- IPI_flags = XPC_GET_IPI_FLAGS(IPI_amo, ch_number);
-
- if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_flags)) {
- xpc_process_openclose_IPI(part, ch_number, IPI_flags);
- }
-
- ch_flags = ch->flags; /* need an atomic snapshot of flags */
-
- if (ch_flags & XPC_C_DISCONNECTING) {
- spin_lock_irqsave(&ch->lock, irq_flags);
- xpc_process_disconnect(ch, &irq_flags);
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- continue;
- }
-
- if (part->act_state == XPC_P_DEACTIVATING) {
- continue;
- }
-
- if (!(ch_flags & XPC_C_CONNECTED)) {
- if (!(ch_flags & XPC_C_OPENREQUEST)) {
- DBUG_ON(ch_flags & XPC_C_SETUP);
- (void) xpc_connect_channel(ch);
- } else {
- spin_lock_irqsave(&ch->lock, irq_flags);
- xpc_process_connect(ch, &irq_flags);
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- }
- continue;
- }
-
-
- /*
- * Process any message related IPI flags, this may involve the
- * activation of kthreads to deliver any pending messages sent
- * from the other partition.
- */
-
- if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_flags)) {
- xpc_process_msg_IPI(part, ch_number);
- }
- }
-}
-
-
-/*
- * XPC's heartbeat code calls this function to inform XPC that a partition is
- * going down. XPC responds by tearing down the XPartition Communication
- * infrastructure used for the just downed partition.
- *
- * XPC's heartbeat code will never call this function and xpc_partition_up()
- * at the same time. Nor will it ever make multiple calls to either function
- * at the same time.
- */
-void
-xpc_partition_going_down(struct xpc_partition *part, enum xpc_retval reason)
-{
- unsigned long irq_flags;
- int ch_number;
- struct xpc_channel *ch;
-
-
- dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n",
- XPC_PARTID(part), reason);
-
- if (!xpc_part_ref(part)) {
- /* infrastructure for this partition isn't currently set up */
- return;
- }
-
-
- /* disconnect channels associated with the partition going down */
-
- for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
- ch = &part->channels[ch_number];
-
- xpc_msgqueue_ref(ch);
- spin_lock_irqsave(&ch->lock, irq_flags);
-
- XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
-
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- xpc_msgqueue_deref(ch);
- }
-
- xpc_wakeup_channel_mgr(part);
-
- xpc_part_deref(part);
-}
-
-
-/*
- * Teardown the infrastructure necessary to support XPartition Communication
- * between the specified remote partition and the local one.
- */
-void
-xpc_teardown_infrastructure(struct xpc_partition *part)
-{
- partid_t partid = XPC_PARTID(part);
-
-
- /*
- * We start off by making this partition inaccessible to local
- * processes by marking it as no longer setup. Then we make it
- * inaccessible to remote processes by clearing the XPC per partition
- * specific variable's magic # (which indicates that these variables
- * are no longer valid) and by ignoring all XPC notify IPIs sent to
- * this partition.
- */
-
- DBUG_ON(atomic_read(&part->nchannels_engaged) != 0);
- DBUG_ON(atomic_read(&part->nchannels_active) != 0);
- DBUG_ON(part->setup_state != XPC_P_SETUP);
- part->setup_state = XPC_P_WTEARDOWN;
-
- xpc_vars_part[partid].magic = 0;
-
-
- free_irq(SGI_XPC_NOTIFY, (void *) (u64) partid);
-
-
- /*
- * Before proceding with the teardown we have to wait until all
- * existing references cease.
- */
- wait_event(part->teardown_wq, (atomic_read(&part->references) == 0));
-
-
- /* now we can begin tearing down the infrastructure */
-
- part->setup_state = XPC_P_TORNDOWN;
-
- /* in case we've still got outstanding timers registered... */
- del_timer_sync(&part->dropped_IPI_timer);
-
- kfree(part->remote_openclose_args_base);
- part->remote_openclose_args = NULL;
- kfree(part->local_openclose_args_base);
- part->local_openclose_args = NULL;
- kfree(part->remote_GPs_base);
- part->remote_GPs = NULL;
- kfree(part->local_GPs_base);
- part->local_GPs = NULL;
- kfree(part->channels);
- part->channels = NULL;
- part->local_IPI_amo_va = NULL;
-}
-
-
-/*
- * Called by XP at the time of channel connection registration to cause
- * XPC to establish connections to all currently active partitions.
- */
-void
-xpc_initiate_connect(int ch_number)
-{
- partid_t partid;
- struct xpc_partition *part;
- struct xpc_channel *ch;
-
-
- DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
-
- for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
- part = &xpc_partitions[partid];
-
- if (xpc_part_ref(part)) {
- ch = &part->channels[ch_number];
-
- /*
- * Initiate the establishment of a connection on the
- * newly registered channel to the remote partition.
- */
- xpc_wakeup_channel_mgr(part);
- xpc_part_deref(part);
- }
- }
-}
-
-
-void
-xpc_connected_callout(struct xpc_channel *ch)
-{
- /* let the registerer know that a connection has been established */
-
- if (ch->func != NULL) {
- dev_dbg(xpc_chan, "ch->func() called, reason=xpcConnected, "
- "partid=%d, channel=%d\n", ch->partid, ch->number);
-
- ch->func(xpcConnected, ch->partid, ch->number,
- (void *) (u64) ch->local_nentries, ch->key);
-
- dev_dbg(xpc_chan, "ch->func() returned, reason=xpcConnected, "
- "partid=%d, channel=%d\n", ch->partid, ch->number);
- }
-}
-
-
-/*
- * Called by XP at the time of channel connection unregistration to cause
- * XPC to teardown all current connections for the specified channel.
- *
- * Before returning xpc_initiate_disconnect() will wait until all connections
- * on the specified channel have been closed/torndown. So the caller can be
- * assured that they will not be receiving any more callouts from XPC to the
- * function they registered via xpc_connect().
- *
- * Arguments:
- *
- * ch_number - channel # to unregister.
- */
-void
-xpc_initiate_disconnect(int ch_number)
-{
- unsigned long irq_flags;
- partid_t partid;
- struct xpc_partition *part;
- struct xpc_channel *ch;
-
-
- DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
-
- /* initiate the channel disconnect for every active partition */
- for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
- part = &xpc_partitions[partid];
-
- if (xpc_part_ref(part)) {
- ch = &part->channels[ch_number];
- xpc_msgqueue_ref(ch);
-
- spin_lock_irqsave(&ch->lock, irq_flags);
-
- if (!(ch->flags & XPC_C_DISCONNECTED)) {
- ch->flags |= XPC_C_WDISCONNECT;
-
- XPC_DISCONNECT_CHANNEL(ch, xpcUnregistering,
- &irq_flags);
- }
-
- spin_unlock_irqrestore(&ch->lock, irq_flags);
-
- xpc_msgqueue_deref(ch);
- xpc_part_deref(part);
- }
- }
-
- xpc_disconnect_wait(ch_number);
-}
-
-
-/*
- * To disconnect a channel, and reflect it back to all who may be waiting.
- *
- * An OPEN is not allowed until XPC_C_DISCONNECTING is cleared by
- * xpc_process_disconnect(), and if set, XPC_C_WDISCONNECT is cleared by
- * xpc_disconnect_wait().
- *
- * THE CHANNEL IS TO BE LOCKED BY THE CALLER AND WILL REMAIN LOCKED UPON RETURN.
- */
-void
-xpc_disconnect_channel(const int line, struct xpc_channel *ch,
- enum xpc_retval reason, unsigned long *irq_flags)
-{
- u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED);
-
-
- DBUG_ON(!spin_is_locked(&ch->lock));
-
- if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) {
- return;
- }
- DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED)));
-
- dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n",
- reason, line, ch->partid, ch->number);
-
- XPC_SET_REASON(ch, reason, line);
-
- ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING);
- /* some of these may not have been set */
- ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY |
- XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
- XPC_C_CONNECTING | XPC_C_CONNECTED);
-
- xpc_IPI_send_closerequest(ch, irq_flags);
-
- if (channel_was_connected) {
- ch->flags |= XPC_C_WASCONNECTED;
- }
-
- spin_unlock_irqrestore(&ch->lock, *irq_flags);
-
- /* wake all idle kthreads so they can exit */
- if (atomic_read(&ch->kthreads_idle) > 0) {
- wake_up_all(&ch->idle_wq);
- }
-
- /* wake those waiting to allocate an entry from the local msg queue */
- if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) {
- wake_up(&ch->msg_allocate_wq);
- }
-
- spin_lock_irqsave(&ch->lock, *irq_flags);
-}
-
-
-void
-xpc_disconnect_callout(struct xpc_channel *ch, enum xpc_retval reason)
-{
- /*
- * Let the channel's registerer know that the channel is being
- * disconnected. We don't want to do this if the registerer was never
- * informed of a connection being made.
- */
-
- if (ch->func != NULL) {
- dev_dbg(xpc_chan, "ch->func() called, reason=%d, partid=%d, "
- "channel=%d\n", reason, ch->partid, ch->number);
-
- ch->func(reason, ch->partid, ch->number, NULL, ch->key);
-
- dev_dbg(xpc_chan, "ch->func() returned, reason=%d, partid=%d, "
- "channel=%d\n", reason, ch->partid, ch->number);
- }
-}
-
-
-/*
- * Wait for a message entry to become available for the specified channel,
- * but don't wait any longer than 1 jiffy.
- */
-static enum xpc_retval
-xpc_allocate_msg_wait(struct xpc_channel *ch)
-{
- enum xpc_retval ret;
-
-
- if (ch->flags & XPC_C_DISCONNECTING) {
- DBUG_ON(ch->reason == xpcInterrupted); // >>> Is this true?
- return ch->reason;
- }
-
- atomic_inc(&ch->n_on_msg_allocate_wq);
- ret = interruptible_sleep_on_timeout(&ch->msg_allocate_wq, 1);
- atomic_dec(&ch->n_on_msg_allocate_wq);
-
- if (ch->flags & XPC_C_DISCONNECTING) {
- ret = ch->reason;
- DBUG_ON(ch->reason == xpcInterrupted); // >>> Is this true?
- } else if (ret == 0) {
- ret = xpcTimeout;
- } else {
- ret = xpcInterrupted;
- }
-
- return ret;
-}
-
-
-/*
- * Allocate an entry for a message from the message queue associated with the
- * specified channel.
- */
-static enum xpc_retval
-xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
- struct xpc_msg **address_of_msg)
-{
- struct xpc_msg *msg;
- enum xpc_retval ret;
- s64 put;
-
-
- /* this reference will be dropped in xpc_send_msg() */
- xpc_msgqueue_ref(ch);
-
- if (ch->flags & XPC_C_DISCONNECTING) {
- xpc_msgqueue_deref(ch);
- return ch->reason;
- }
- if (!(ch->flags & XPC_C_CONNECTED)) {
- xpc_msgqueue_deref(ch);
- return xpcNotConnected;
- }
-
-
- /*
- * Get the next available message entry from the local message queue.
- * If none are available, we'll make sure that we grab the latest
- * GP values.
- */
- ret = xpcTimeout;
-
- while (1) {
-
- put = (volatile s64) ch->w_local_GP.put;
- if (put - (volatile s64) ch->w_remote_GP.get <
- ch->local_nentries) {
-
- /* There are available message entries. We need to try
- * to secure one for ourselves. We'll do this by trying
- * to increment w_local_GP.put as long as someone else
- * doesn't beat us to it. If they do, we'll have to
- * try again.
- */
- if (cmpxchg(&ch->w_local_GP.put, put, put + 1) ==
- put) {
- /* we got the entry referenced by put */
- break;
- }
- continue; /* try again */
- }
-
-
- /*
- * There aren't any available msg entries at this time.
- *
- * In waiting for a message entry to become available,
- * we set a timeout in case the other side is not
- * sending completion IPIs. This lets us fake an IPI
- * that will cause the IPI handler to fetch the latest
- * GP values as if an IPI was sent by the other side.
- */
- if (ret == xpcTimeout) {
- xpc_IPI_send_local_msgrequest(ch);
- }
-
- if (flags & XPC_NOWAIT) {
- xpc_msgqueue_deref(ch);
- return xpcNoWait;
- }
-
- ret = xpc_allocate_msg_wait(ch);
- if (ret != xpcInterrupted && ret != xpcTimeout) {
- xpc_msgqueue_deref(ch);
- return ret;
- }
- }
-
-
- /* get the message's address and initialize it */
- msg = (struct xpc_msg *) ((u64) ch->local_msgqueue +
- (put % ch->local_nentries) * ch->msg_size);
-
-
- DBUG_ON(msg->flags != 0);
- msg->number = put;
-
- dev_dbg(xpc_chan, "w_local_GP.put changed to %ld; msg=0x%p, "
- "msg_number=%ld, partid=%d, channel=%d\n", put + 1,
- (void *) msg, msg->number, ch->partid, ch->number);
-
- *address_of_msg = msg;
-
- return xpcSuccess;
-}
-
-
-/*
- * Allocate an entry for a message from the message queue associated with the
- * specified channel. NOTE that this routine can sleep waiting for a message
- * entry to become available. To not sleep, pass in the XPC_NOWAIT flag.
- *
- * Arguments:
- *
- * partid - ID of partition to which the channel is connected.
- * ch_number - channel #.
- * flags - see xpc.h for valid flags.
- * payload - address of the allocated payload area pointer (filled in on
- * return) in which the user-defined message is constructed.
- */
-enum xpc_retval
-xpc_initiate_allocate(partid_t partid, int ch_number, u32 flags, void **payload)
-{
- struct xpc_partition *part = &xpc_partitions[partid];
- enum xpc_retval ret = xpcUnknownReason;
- struct xpc_msg *msg;
-
-
- DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
- DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
-
- *payload = NULL;
-
- if (xpc_part_ref(part)) {
- ret = xpc_allocate_msg(&part->channels[ch_number], flags, &msg);
- xpc_part_deref(part);
-
- if (msg != NULL) {
- *payload = &msg->payload;
- }
- }
-
- return ret;
-}
-
-
-/*
- * Now we actually send the messages that are ready to be sent by advancing
- * the local message queue's Put value and then send an IPI to the recipient
- * partition.
- */
-static void
-xpc_send_msgs(struct xpc_channel *ch, s64 initial_put)
-{
- struct xpc_msg *msg;
- s64 put = initial_put + 1;
- int send_IPI = 0;
-
-
- while (1) {
-
- while (1) {
- if (put == (volatile s64) ch->w_local_GP.put) {
- break;
- }
-
- msg = (struct xpc_msg *) ((u64) ch->local_msgqueue +
- (put % ch->local_nentries) * ch->msg_size);
-
- if (!(msg->flags & XPC_M_READY)) {
- break;
- }
-
- put++;
- }
-
- if (put == initial_put) {
- /* nothing's changed */
- break;
- }
-
- if (cmpxchg_rel(&ch->local_GP->put, initial_put, put) !=
- initial_put) {
- /* someone else beat us to it */
- DBUG_ON((volatile s64) ch->local_GP->put < initial_put);
- break;
- }
-
- /* we just set the new value of local_GP->put */
-
- dev_dbg(xpc_chan, "local_GP->put changed to %ld, partid=%d, "
- "channel=%d\n", put, ch->partid, ch->number);
-
- send_IPI = 1;
-
- /*
- * We need to ensure that the message referenced by
- * local_GP->put is not XPC_M_READY or that local_GP->put
- * equals w_local_GP.put, so we'll go have a look.
- */
- initial_put = put;
- }
-
- if (send_IPI) {
- xpc_IPI_send_msgrequest(ch);
- }
-}
-
-
-/*
- * Common code that does the actual sending of the message by advancing the
- * local message queue's Put value and sends an IPI to the partition the
- * message is being sent to.
- */
-static enum xpc_retval
-xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
- xpc_notify_func func, void *key)
-{
- enum xpc_retval ret = xpcSuccess;
- struct xpc_notify *notify = notify;
- s64 put, msg_number = msg->number;
-
-
- DBUG_ON(notify_type == XPC_N_CALL && func == NULL);
- DBUG_ON((((u64) msg - (u64) ch->local_msgqueue) / ch->msg_size) !=
- msg_number % ch->local_nentries);
- DBUG_ON(msg->flags & XPC_M_READY);
-
- if (ch->flags & XPC_C_DISCONNECTING) {
- /* drop the reference grabbed in xpc_allocate_msg() */
- xpc_msgqueue_deref(ch);
- return ch->reason;
- }
-
- if (notify_type != 0) {
- /*
- * Tell the remote side to send an ACK interrupt when the
- * message has been delivered.
- */
- msg->flags |= XPC_M_INTERRUPT;
-
- atomic_inc(&ch->n_to_notify);
-
- notify = &ch->notify_queue[msg_number % ch->local_nentries];
- notify->func = func;
- notify->key = key;
- notify->type = notify_type;
-
- // >>> is a mb() needed here?
-
- if (ch->flags & XPC_C_DISCONNECTING) {
- /*
- * An error occurred between our last error check and
- * this one. We will try to clear the type field from
- * the notify entry. If we succeed then
- * xpc_disconnect_channel() didn't already process
- * the notify entry.
- */
- if (cmpxchg(&notify->type, notify_type, 0) ==
- notify_type) {
- atomic_dec(&ch->n_to_notify);
- ret = ch->reason;
- }
-
- /* drop the reference grabbed in xpc_allocate_msg() */
- xpc_msgqueue_deref(ch);
- return ret;
- }
- }
-
- msg->flags |= XPC_M_READY;
-
- /*
- * The preceding store of msg->flags must occur before the following
- * load of ch->local_GP->put.
- */
- mb();
-
- /* see if the message is next in line to be sent, if so send it */
-
- put = ch->local_GP->put;
- if (put == msg_number) {
- xpc_send_msgs(ch, put);
- }
-
- /* drop the reference grabbed in xpc_allocate_msg() */
- xpc_msgqueue_deref(ch);
- return ret;
-}
-
-
-/*
- * Send a message previously allocated using xpc_initiate_allocate() on the
- * specified channel connected to the specified partition.
- *
- * This routine will not wait for the message to be received, nor will
- * notification be given when it does happen. Once this routine has returned
- * the message entry allocated via xpc_initiate_allocate() is no longer
- * accessable to the caller.
- *
- * This routine, although called by users, does not call xpc_part_ref() to
- * ensure that the partition infrastructure is in place. It relies on the
- * fact that we called xpc_msgqueue_ref() in xpc_allocate_msg().
- *
- * Arguments:
- *
- * partid - ID of partition to which the channel is connected.
- * ch_number - channel # to send message on.
- * payload - pointer to the payload area allocated via
- * xpc_initiate_allocate().
- */
-enum xpc_retval
-xpc_initiate_send(partid_t partid, int ch_number, void *payload)
-{
- struct xpc_partition *part = &xpc_partitions[partid];
- struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
- enum xpc_retval ret;
-
-
- dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *) msg,
- partid, ch_number);
-
- DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
- DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
- DBUG_ON(msg == NULL);
-
- ret = xpc_send_msg(&part->channels[ch_number], msg, 0, NULL, NULL);
-
- return ret;
-}
-
-
-/*
- * Send a message previously allocated using xpc_initiate_allocate on the
- * specified channel connected to the specified partition.
- *
- * This routine will not wait for the message to be sent. Once this routine
- * has returned the message entry allocated via xpc_initiate_allocate() is no
- * longer accessable to the caller.
- *
- * Once the remote end of the channel has received the message, the function
- * passed as an argument to xpc_initiate_send_notify() will be called. This
- * allows the sender to free up or re-use any buffers referenced by the
- * message, but does NOT mean the message has been processed at the remote
- * end by a receiver.
- *
- * If this routine returns an error, the caller's function will NOT be called.
- *
- * This routine, although called by users, does not call xpc_part_ref() to
- * ensure that the partition infrastructure is in place. It relies on the
- * fact that we called xpc_msgqueue_ref() in xpc_allocate_msg().
- *
- * Arguments:
- *
- * partid - ID of partition to which the channel is connected.
- * ch_number - channel # to send message on.
- * payload - pointer to the payload area allocated via
- * xpc_initiate_allocate().
- * func - function to call with asynchronous notification of message
- * receipt. THIS FUNCTION MUST BE NON-BLOCKING.
- * key - user-defined key to be passed to the function when it's called.
- */
-enum xpc_retval
-xpc_initiate_send_notify(partid_t partid, int ch_number, void *payload,
- xpc_notify_func func, void *key)
-{
- struct xpc_partition *part = &xpc_partitions[partid];
- struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
- enum xpc_retval ret;
-
-
- dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *) msg,
- partid, ch_number);
-
- DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
- DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
- DBUG_ON(msg == NULL);
- DBUG_ON(func == NULL);
-
- ret = xpc_send_msg(&part->channels[ch_number], msg, XPC_N_CALL,
- func, key);
- return ret;
-}
-
-
-static struct xpc_msg *
-xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
-{
- struct xpc_partition *part = &xpc_partitions[ch->partid];
- struct xpc_msg *remote_msg, *msg;
- u32 msg_index, nmsgs;
- u64 msg_offset;
- enum xpc_retval ret;
-
-
- if (mutex_lock_interruptible(&ch->msg_to_pull_mutex) != 0) {
- /* we were interrupted by a signal */
- return NULL;
- }
-
- while (get >= ch->next_msg_to_pull) {
-
- /* pull as many messages as are ready and able to be pulled */
-
- msg_index = ch->next_msg_to_pull % ch->remote_nentries;
-
- DBUG_ON(ch->next_msg_to_pull >=
- (volatile s64) ch->w_remote_GP.put);
- nmsgs = (volatile s64) ch->w_remote_GP.put -
- ch->next_msg_to_pull;
- if (msg_index + nmsgs > ch->remote_nentries) {
- /* ignore the ones that wrap the msg queue for now */
- nmsgs = ch->remote_nentries - msg_index;
- }
-
- msg_offset = msg_index * ch->msg_size;
- msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue +
- msg_offset);
- remote_msg = (struct xpc_msg *) (ch->remote_msgqueue_pa +
- msg_offset);
-
- if ((ret = xpc_pull_remote_cachelines(part, msg, remote_msg,
- nmsgs * ch->msg_size)) != xpcSuccess) {
-
- dev_dbg(xpc_chan, "failed to pull %d msgs starting with"
- " msg %ld from partition %d, channel=%d, "
- "ret=%d\n", nmsgs, ch->next_msg_to_pull,
- ch->partid, ch->number, ret);
-
- XPC_DEACTIVATE_PARTITION(part, ret);
-
- mutex_unlock(&ch->msg_to_pull_mutex);
- return NULL;
- }
-
- mb(); /* >>> this may not be needed, we're not sure */
-
- ch->next_msg_to_pull += nmsgs;
- }
-
- mutex_unlock(&ch->msg_to_pull_mutex);
-
- /* return the message we were looking for */
- msg_offset = (get % ch->remote_nentries) * ch->msg_size;
- msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + msg_offset);
-
- return msg;
-}
-
-
-/*
- * Get a message to be delivered.
- */
-static struct xpc_msg *
-xpc_get_deliverable_msg(struct xpc_channel *ch)
-{
- struct xpc_msg *msg = NULL;
- s64 get;
-
-
- do {
- if ((volatile u32) ch->flags & XPC_C_DISCONNECTING) {
- break;
- }
-
- get = (volatile s64) ch->w_local_GP.get;
- if (get == (volatile s64) ch->w_remote_GP.put) {
- break;
- }
-
- /* There are messages waiting to be pulled and delivered.
- * We need to try to secure one for ourselves. We'll do this
- * by trying to increment w_local_GP.get and hope that no one
- * else beats us to it. If they do, we'll we'll simply have
- * to try again for the next one.
- */
-
- if (cmpxchg(&ch->w_local_GP.get, get, get + 1) == get) {
- /* we got the entry referenced by get */
-
- dev_dbg(xpc_chan, "w_local_GP.get changed to %ld, "
- "partid=%d, channel=%d\n", get + 1,
- ch->partid, ch->number);
-
- /* pull the message from the remote partition */
-
- msg = xpc_pull_remote_msg(ch, get);
-
- DBUG_ON(msg != NULL && msg->number != get);
- DBUG_ON(msg != NULL && (msg->flags & XPC_M_DONE));
- DBUG_ON(msg != NULL && !(msg->flags & XPC_M_READY));
-
- break;
- }
-
- } while (1);
-
- return msg;
-}
-
-
-/*
- * Deliver a message to its intended recipient.
- */
-void
-xpc_deliver_msg(struct xpc_channel *ch)
-{
- struct xpc_msg *msg;
-
-
- if ((msg = xpc_get_deliverable_msg(ch)) != NULL) {
-
- /*
- * This ref is taken to protect the payload itself from being
- * freed before the user is finished with it, which the user
- * indicates by calling xpc_initiate_received().
- */
- xpc_msgqueue_ref(ch);
-
- atomic_inc(&ch->kthreads_active);
-
- if (ch->func != NULL) {
- dev_dbg(xpc_chan, "ch->func() called, msg=0x%p, "
- "msg_number=%ld, partid=%d, channel=%d\n",
- (void *) msg, msg->number, ch->partid,
- ch->number);
-
- /* deliver the message to its intended recipient */
- ch->func(xpcMsgReceived, ch->partid, ch->number,
- &msg->payload, ch->key);
-
- dev_dbg(xpc_chan, "ch->func() returned, msg=0x%p, "
- "msg_number=%ld, partid=%d, channel=%d\n",
- (void *) msg, msg->number, ch->partid,
- ch->number);
- }
-
- atomic_dec(&ch->kthreads_active);
- }
-}
-
-
-/*
- * Now we actually acknowledge the messages that have been delivered and ack'd
- * by advancing the cached remote message queue's Get value and if requested
- * send an IPI to the message sender's partition.
- */
-static void
-xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
-{
- struct xpc_msg *msg;
- s64 get = initial_get + 1;
- int send_IPI = 0;
-
-
- while (1) {
-
- while (1) {
- if (get == (volatile s64) ch->w_local_GP.get) {
- break;
- }
-
- msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue +
- (get % ch->remote_nentries) * ch->msg_size);
-
- if (!(msg->flags & XPC_M_DONE)) {
- break;
- }
-
- msg_flags |= msg->flags;
- get++;
- }
-
- if (get == initial_get) {
- /* nothing's changed */
- break;
- }
-
- if (cmpxchg_rel(&ch->local_GP->get, initial_get, get) !=
- initial_get) {
- /* someone else beat us to it */
- DBUG_ON((volatile s64) ch->local_GP->get <=
- initial_get);
- break;
- }
-
- /* we just set the new value of local_GP->get */
-
- dev_dbg(xpc_chan, "local_GP->get changed to %ld, partid=%d, "
- "channel=%d\n", get, ch->partid, ch->number);
-
- send_IPI = (msg_flags & XPC_M_INTERRUPT);
-
- /*
- * We need to ensure that the message referenced by
- * local_GP->get is not XPC_M_DONE or that local_GP->get
- * equals w_local_GP.get, so we'll go have a look.
- */
- initial_get = get;
- }
-
- if (send_IPI) {
- xpc_IPI_send_msgrequest(ch);
- }
-}
-
-
-/*
- * Acknowledge receipt of a delivered message.
- *
- * If a message has XPC_M_INTERRUPT set, send an interrupt to the partition
- * that sent the message.
- *
- * This function, although called by users, does not call xpc_part_ref() to
- * ensure that the partition infrastructure is in place. It relies on the
- * fact that we called xpc_msgqueue_ref() in xpc_deliver_msg().
- *
- * Arguments:
- *
- * partid - ID of partition to which the channel is connected.
- * ch_number - channel # message received on.
- * payload - pointer to the payload area allocated via
- * xpc_initiate_allocate().
- */
-void
-xpc_initiate_received(partid_t partid, int ch_number, void *payload)
-{
- struct xpc_partition *part = &xpc_partitions[partid];
- struct xpc_channel *ch;
- struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
- s64 get, msg_number = msg->number;
-
-
- DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
- DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
-
- ch = &part->channels[ch_number];
-
- dev_dbg(xpc_chan, "msg=0x%p, msg_number=%ld, partid=%d, channel=%d\n",
- (void *) msg, msg_number, ch->partid, ch->number);
-
- DBUG_ON((((u64) msg - (u64) ch->remote_msgqueue) / ch->msg_size) !=
- msg_number % ch->remote_nentries);
- DBUG_ON(msg->flags & XPC_M_DONE);
-
- msg->flags |= XPC_M_DONE;
-
- /*
- * The preceding store of msg->flags must occur before the following
- * load of ch->local_GP->get.
- */
- mb();
-
- /*
- * See if this message is next in line to be acknowledged as having
- * been delivered.
- */
- get = ch->local_GP->get;
- if (get == msg_number) {
- xpc_acknowledge_msgs(ch, get, msg->flags);
- }
-
- /* the call to xpc_msgqueue_ref() was done by xpc_deliver_msg() */
- xpc_msgqueue_deref(ch);
-}
-
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c
deleted file mode 100644
index c75f8aeefc2..00000000000
--- a/arch/ia64/sn/kernel/xpc_main.c
+++ /dev/null
@@ -1,1416 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (c) 2004-2006 Silicon Graphics, Inc. All Rights Reserved.
- */
-
-
-/*
- * Cross Partition Communication (XPC) support - standard version.
- *
- * XPC provides a message passing capability that crosses partition
- * boundaries. This module is made up of two parts:
- *
- * partition This part detects the presence/absence of other
- * partitions. It provides a heartbeat and monitors
- * the heartbeats of other partitions.
- *
- * channel This part manages the channels and sends/receives
- * messages across them to/from other partitions.
- *
- * There are a couple of additional functions residing in XP, which
- * provide an interface to XPC for its users.
- *
- *
- * Caveats:
- *
- * . We currently have no way to determine which nasid an IPI came
- * from. Thus, xpc_IPI_send() does a remote AMO write followed by
- * an IPI. The AMO indicates where data is to be pulled from, so
- * after the IPI arrives, the remote partition checks the AMO word.
- * The IPI can actually arrive before the AMO however, so other code
- * must periodically check for this case. Also, remote AMO operations
- * do not reliably time out. Thus we do a remote PIO read solely to
- * know whether the remote partition is down and whether we should
- * stop sending IPIs to it. This remote PIO read operation is set up
- * in a special nofault region so SAL knows to ignore (and cleanup)
- * any errors due to the remote AMO write, PIO read, and/or PIO
- * write operations.
- *
- * If/when new hardware solves this IPI problem, we should abandon
- * the current approach.
- *
- */
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/syscalls.h>
-#include <linux/cache.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/reboot.h>
-#include <linux/completion.h>
-#include <asm/sn/intr.h>
-#include <asm/sn/sn_sal.h>
-#include <asm/kdebug.h>
-#include <asm/uaccess.h>
-#include <asm/sn/xpc.h>
-
-
-/* define two XPC debug device structures to be used with dev_dbg() et al */
-
-struct device_driver xpc_dbg_name = {
- .name = "xpc"
-};
-
-struct device xpc_part_dbg_subname = {
- .bus_id = {0}, /* set to "part" at xpc_init() time */
- .driver = &xpc_dbg_name
-};
-
-struct device xpc_chan_dbg_subname = {
- .bus_id = {0}, /* set to "chan" at xpc_init() time */
- .driver = &xpc_dbg_name
-};
-
-struct device *xpc_part = &xpc_part_dbg_subname;
-struct device *xpc_chan = &xpc_chan_dbg_subname;
-
-
-static int xpc_kdebug_ignore;
-
-
-/* systune related variables for /proc/sys directories */
-
-static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL;
-static int xpc_hb_min_interval = 1;
-static int xpc_hb_max_interval = 10;
-
-static int xpc_hb_check_interval = XPC_HB_CHECK_DEFAULT_INTERVAL;
-static int xpc_hb_check_min_interval = 10;
-static int xpc_hb_check_max_interval = 120;
-
-int xpc_disengage_request_timelimit = XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT;
-static int xpc_disengage_request_min_timelimit = 0;
-static int xpc_disengage_request_max_timelimit = 120;
-
-static ctl_table xpc_sys_xpc_hb_dir[] = {
- {
- 1,
- "hb_interval",
- &xpc_hb_interval,
- sizeof(int),
- 0644,
- NULL,
- &proc_dointvec_minmax,
- &sysctl_intvec,
- NULL,
- &xpc_hb_min_interval,
- &xpc_hb_max_interval
- },
- {
- 2,
- "hb_check_interval",
- &xpc_hb_check_interval,
- sizeof(int),
- 0644,
- NULL,
- &proc_dointvec_minmax,
- &sysctl_intvec,
- NULL,
- &xpc_hb_check_min_interval,
- &xpc_hb_check_max_interval
- },
- {0}
-};
-static ctl_table xpc_sys_xpc_dir[] = {
- {
- 1,
- "hb",
- NULL,
- 0,
- 0555,
- xpc_sys_xpc_hb_dir
- },
- {
- 2,
- "disengage_request_timelimit",
- &xpc_disengage_request_timelimit,
- sizeof(int),
- 0644,
- NULL,
- &proc_dointvec_minmax,
- &sysctl_intvec,
- NULL,
- &xpc_disengage_request_min_timelimit,
- &xpc_disengage_request_max_timelimit
- },
- {0}
-};
-static ctl_table xpc_sys_dir[] = {
- {
- 1,
- "xpc",
- NULL,
- 0,
- 0555,
- xpc_sys_xpc_dir
- },
- {0}
-};
-static struct ctl_table_header *xpc_sysctl;
-
-/* non-zero if any remote partition disengage request was timed out */
-int xpc_disengage_request_timedout;
-
-/* #of IRQs received */
-static atomic_t xpc_act_IRQ_rcvd;
-
-/* IRQ handler notifies this wait queue on receipt of an IRQ */
-static DECLARE_WAIT_QUEUE_HEAD(xpc_act_IRQ_wq);
-
-static unsigned long xpc_hb_check_timeout;
-
-/* notification that the xpc_hb_checker thread has exited */
-static DECLARE_COMPLETION(xpc_hb_checker_exited);
-
-/* notification that the xpc_discovery thread has exited */
-static DECLARE_COMPLETION(xpc_discovery_exited);
-
-
-static struct timer_list xpc_hb_timer;
-
-
-static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *);
-
-
-static int xpc_system_reboot(struct notifier_block *, unsigned long, void *);
-static struct notifier_block xpc_reboot_notifier = {
- .notifier_call = xpc_system_reboot,
-};
-
-static int xpc_system_die(struct notifier_block *, unsigned long, void *);
-static struct notifier_block xpc_die_notifier = {
- .notifier_call = xpc_system_die,
-};
-
-
-/*
- * Timer function to enforce the timelimit on the partition disengage request.
- */
-static void
-xpc_timeout_partition_disengage_request(unsigned long data)
-{
- struct xpc_partition *part = (struct xpc_partition *) data;
-
-
- DBUG_ON(jiffies < part->disengage_request_timeout);
-
- (void) xpc_partition_disengaged(part);
-
- DBUG_ON(part->disengage_request_timeout != 0);
- DBUG_ON(xpc_partition_engaged(1UL << XPC_PARTID(part)) != 0);
-}
-
-
-/*
- * Notify the heartbeat check thread that an IRQ has been received.
- */
-static irqreturn_t
-xpc_act_IRQ_handler(int irq, void *dev_id, struct pt_regs *regs)
-{
- atomic_inc(&xpc_act_IRQ_rcvd);
- wake_up_interruptible(&xpc_act_IRQ_wq);
- return IRQ_HANDLED;
-}
-
-
-/*
- * Timer to produce the heartbeat. The timer structures function is
- * already set when this is initially called. A tunable is used to
- * specify when the next timeout should occur.
- */
-static void
-xpc_hb_beater(unsigned long dummy)
-{
- xpc_vars->heartbeat++;
-
- if (jiffies >= xpc_hb_check_timeout) {
- wake_up_interruptible(&xpc_act_IRQ_wq);
- }
-
- xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ);
- add_timer(&xpc_hb_timer);
-}
-
-
-/*
- * This thread is responsible for nearly all of the partition
- * activation/deactivation.
- */
-static int
-xpc_hb_checker(void *ignore)
-{
- int last_IRQ_count = 0;
- int new_IRQ_count;
- int force_IRQ=0;
-
-
- /* this thread was marked active by xpc_hb_init() */
-
- daemonize(XPC_HB_CHECK_THREAD_NAME);
-
- set_cpus_allowed(current, cpumask_of_cpu(XPC_HB_CHECK_CPU));
-
- xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
-
- while (!(volatile int) xpc_exiting) {
-
- dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have "
- "been received\n",
- (int) (xpc_hb_check_timeout - jiffies),
- atomic_read(&xpc_act_IRQ_rcvd) - last_IRQ_count);
-
-
- /* checking of remote heartbeats is skewed by IRQ handling */
- if (jiffies >= xpc_hb_check_timeout) {
- dev_dbg(xpc_part, "checking remote heartbeats\n");
- xpc_check_remote_hb();
-
- /*
- * We need to periodically recheck to ensure no
- * IPI/AMO pairs have been missed. That check
- * must always reset xpc_hb_check_timeout.
- */
- force_IRQ = 1;
- }
-
-
- /* check for outstanding IRQs */
- new_IRQ_count = atomic_read(&xpc_act_IRQ_rcvd);
- if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) {
- force_IRQ = 0;
-
- dev_dbg(xpc_part, "found an IRQ to process; will be "
- "resetting xpc_hb_check_timeout\n");
-
- last_IRQ_count += xpc_identify_act_IRQ_sender();
- if (last_IRQ_count < new_IRQ_count) {
- /* retry once to help avoid missing AMO */
- (void) xpc_identify_act_IRQ_sender();
- }
- last_IRQ_count = new_IRQ_count;
-
- xpc_hb_check_timeout = jiffies +
- (xpc_hb_check_interval * HZ);
- }
-
- /* wait for IRQ or timeout */
- (void) wait_event_interruptible(xpc_act_IRQ_wq,
- (last_IRQ_count < atomic_read(&xpc_act_IRQ_rcvd) ||
- jiffies >= xpc_hb_check_timeout ||
- (volatile int) xpc_exiting));
- }
-
- dev_dbg(xpc_part, "heartbeat checker is exiting\n");
-
-
- /* mark this thread as having exited */
- complete(&xpc_hb_checker_exited);
- return 0;
-}
-
-
-/*
- * This thread will attempt to discover other partitions to activate
- * based on info provided by SAL. This new thread is short lived and
- * will exit once discovery is complete.
- */
-static int
-xpc_initiate_discovery(void *ignore)
-{
- daemonize(XPC_DISCOVERY_THREAD_NAME);
-
- xpc_discovery();
-
- dev_dbg(xpc_part, "discovery thread is exiting\n");
-
- /* mark this thread as having exited */
- complete(&xpc_discovery_exited);
- return 0;
-}
-
-
-/*
- * Establish first contact with the remote partititon. This involves pulling
- * the XPC per partition variables from the remote partition and waiting for
- * the remote partition to pull ours.
- */
-static enum xpc_retval
-xpc_make_first_contact(struct xpc_partition *part)
-{
- enum xpc_retval ret;
-
-
- while ((ret = xpc_pull_remote_vars_part(part)) != xpcSuccess) {
- if (ret != xpcRetry) {
- XPC_DEACTIVATE_PARTITION(part, ret);
- return ret;
- }
-
- dev_dbg(xpc_chan, "waiting to make first contact with "
- "partition %d\n", XPC_PARTID(part));
-
- /* wait a 1/4 of a second or so */
- (void) msleep_interruptible(250);
-
- if (part->act_state == XPC_P_DEACTIVATING) {
- return part->reason;
- }
- }
-
- return xpc_mark_partition_active(part);
-}
-
-
-/*
- * The first kthread assigned to a newly activated partition is the one
- * created by XPC HB with which it calls xpc_partition_up(). XPC hangs on to
- * that kthread until the partition is brought down, at which time that kthread
- * returns back to XPC HB. (The return of that kthread will signify to XPC HB
- * that XPC has dismantled all communication infrastructure for the associated
- * partition.) This kthread becomes the channel manager for that partition.
- *
- * Each active partition has a channel manager, who, besides connecting and
- * disconnecting channels, will ensure that each of the partition's connected
- * channels has the required number of assigned kthreads to get the work done.
- */
-static void
-xpc_channel_mgr(struct xpc_partition *part)
-{
- while (part->act_state != XPC_P_DEACTIVATING ||
- atomic_read(&part->nchannels_active) > 0 ||
- !xpc_partition_disengaged(part)) {
-
- xpc_process_channel_activity(part);
-
-
- /*
- * Wait until we've been requested to activate kthreads or
- * all of the channel's message queues have been torn down or
- * a signal is pending.
- *
- * The channel_mgr_requests is set to 1 after being awakened,
- * This is done to prevent the channel mgr from making one pass
- * through the loop for each request, since he will
- * be servicing all the requests in one pass. The reason it's
- * set to 1 instead of 0 is so that other kthreads will know
- * that the channel mgr is running and won't bother trying to
- * wake him up.
- */
- atomic_dec(&part->channel_mgr_requests);
- (void) wait_event_interruptible(part->channel_mgr_wq,
- (atomic_read(&part->channel_mgr_requests) > 0 ||
- (volatile u64) part->local_IPI_amo != 0 ||
- ((volatile u8) part->act_state ==
- XPC_P_DEACTIVATING &&
- atomic_read(&part->nchannels_active) == 0 &&
- xpc_partition_disengaged(part))));
- atomic_set(&part->channel_mgr_requests, 1);
-
- // >>> Does it need to wakeup periodically as well? In case we
- // >>> miscalculated the #of kthreads to wakeup or create?
- }
-}
-
-
-/*
- * When XPC HB determines that a partition has come up, it will create a new
- * kthread and that kthread will call this function to attempt to set up the
- * basic infrastructure used for Cross Partition Communication with the newly
- * upped partition.
- *
- * The kthread that was created by XPC HB and which setup the XPC
- * infrastructure will remain assigned to the partition until the partition
- * goes down. At which time the kthread will teardown the XPC infrastructure
- * and then exit.
- *
- * XPC HB will put the remote partition's XPC per partition specific variables
- * physical address into xpc_partitions[partid].remote_vars_part_pa prior to
- * calling xpc_partition_up().
- */
-static void
-xpc_partition_up(struct xpc_partition *part)
-{
- DBUG_ON(part->channels != NULL);
-
- dev_dbg(xpc_chan, "activating partition %d\n", XPC_PARTID(part));
-
- if (xpc_setup_infrastructure(part) != xpcSuccess) {
- return;
- }
-
- /*
- * The kthread that XPC HB called us with will become the
- * channel manager for this partition. It will not return
- * back to XPC HB until the partition's XPC infrastructure
- * has been dismantled.
- */
-
- (void) xpc_part_ref(part); /* this will always succeed */
-
- if (xpc_make_first_contact(part) == xpcSuccess) {
- xpc_channel_mgr(part);
- }
-
- xpc_part_deref(part);
-
- xpc_teardown_infrastructure(part);
-}
-
-
-static int
-xpc_activating(void *__partid)
-{
- partid_t partid = (u64) __partid;
- struct xpc_partition *part = &xpc_partitions[partid];
- unsigned long irq_flags;
- struct sched_param param = { sched_priority: MAX_RT_PRIO - 1 };
- int ret;
-
-
- DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
-
- spin_lock_irqsave(&part->act_lock, irq_flags);
-
- if (part->act_state == XPC_P_DEACTIVATING) {
- part->act_state = XPC_P_INACTIVE;
- spin_unlock_irqrestore(&part->act_lock, irq_flags);
- part->remote_rp_pa = 0;
- return 0;
- }
-
- /* indicate the thread is activating */
- DBUG_ON(part->act_state != XPC_P_ACTIVATION_REQ);
- part->act_state = XPC_P_ACTIVATING;
-
- XPC_SET_REASON(part, 0, 0);
- spin_unlock_irqrestore(&part->act_lock, irq_flags);
-
- dev_dbg(xpc_part, "bringing partition %d up\n", partid);
-
- daemonize("xpc%02d", partid);
-
- /*
- * This thread needs to run at a realtime priority to prevent a
- * significant performance degradation.
- */
- ret = sched_setscheduler(current, SCHED_FIFO, &param);
- if (ret != 0) {
- dev_warn(xpc_part, "unable to set pid %d to a realtime "
- "priority, ret=%d\n", current->pid, ret);
- }
-
- /* allow this thread and its children to run on any CPU */
- set_cpus_allowed(current, CPU_MASK_ALL);
-
- /*
- * Register the remote partition's AMOs with SAL so it can handle
- * and cleanup errors within that address range should the remote
- * partition go down. We don't unregister this range because it is
- * difficult to tell when outstanding writes to the remote partition
- * are finished and thus when it is safe to unregister. This should
- * not result in wasted space in the SAL xp_addr_region table because
- * we should get the same page for remote_amos_page_pa after module
- * reloads and system reboots.
- */
- if (sn_register_xp_addr_region(part->remote_amos_page_pa,
- PAGE_SIZE, 1) < 0) {
- dev_warn(xpc_part, "xpc_partition_up(%d) failed to register "
- "xp_addr region\n", partid);
-
- spin_lock_irqsave(&part->act_lock, irq_flags);
- part->act_state = XPC_P_INACTIVE;
- XPC_SET_REASON(part, xpcPhysAddrRegFailed, __LINE__);
- spin_unlock_irqrestore(&part->act_lock, irq_flags);
- part->remote_rp_pa = 0;
- return 0;
- }
-
- xpc_allow_hb(partid, xpc_vars);
- xpc_IPI_send_activated(part);
-
-
- /*
- * xpc_partition_up() holds this thread and marks this partition as
- * XPC_P_ACTIVE by calling xpc_hb_mark_active().
- */
- (void) xpc_partition_up(part);
-
- xpc_disallow_hb(partid, xpc_vars);
- xpc_mark_partition_inactive(part);
-
- if (part->reason == xpcReactivating) {
- /* interrupting ourselves results in activating partition */
- xpc_IPI_send_reactivate(part);
- }
-
- return 0;
-}
-
-
-void
-xpc_activate_partition(struct xpc_partition *part)
-{
- partid_t partid = XPC_PARTID(part);
- unsigned long irq_flags;
- pid_t pid;
-
-
- spin_lock_irqsave(&part->act_lock, irq_flags);
-
- pid = kernel_thread(xpc_activating, (void *) ((u64) partid), 0);
-
- DBUG_ON(part->act_state != XPC_P_INACTIVE);
-
- if (pid > 0) {
- part->act_state = XPC_P_ACTIVATION_REQ;
- XPC_SET_REASON(part, xpcCloneKThread, __LINE__);
- } else {
- XPC_SET_REASON(part, xpcCloneKThreadFailed, __LINE__);
- }
-
- spin_unlock_irqrestore(&part->act_lock, irq_flags);
-}
-
-
-/*
- * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified
- * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more
- * than one partition, we use an AMO_t structure per partition to indicate
- * whether a partition has sent an IPI or not. >>> If it has, then wake up the
- * associated kthread to handle it.
- *
- * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IPIs sent by XPC
- * running on other partitions.
- *
- * Noteworthy Arguments:
- *
- * irq - Interrupt ReQuest number. NOT USED.
- *
- * dev_id - partid of IPI's potential sender.
- *
- * regs - processor's context before the processor entered
- * interrupt code. NOT USED.
- */
-irqreturn_t
-xpc_notify_IRQ_handler(int irq, void *dev_id, struct pt_regs *regs)
-{
- partid_t partid = (partid_t) (u64) dev_id;
- struct xpc_partition *part = &xpc_partitions[partid];
-
-
- DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
-
- if (xpc_part_ref(part)) {
- xpc_check_for_channel_activity(part);
-
- xpc_part_deref(part);
- }
- return IRQ_HANDLED;
-}
-
-
-/*
- * Check to see if xpc_notify_IRQ_handler() dropped any IPIs on the floor
- * because the write to their associated IPI amo completed after the IRQ/IPI
- * was received.
- */
-void
-xpc_dropped_IPI_check(struct xpc_partition *part)
-{
- if (xpc_part_ref(part)) {
- xpc_check_for_channel_activity(part);
-
- part->dropped_IPI_timer.expires = jiffies +
- XPC_P_DROPPED_IPI_WAIT;
- add_timer(&part->dropped_IPI_timer);
- xpc_part_deref(part);
- }
-}
-
-
-void
-xpc_activate_kthreads(struct xpc_channel *ch, int needed)
-{
- int idle = atomic_read(&ch->kthreads_idle);
- int assigned = atomic_read(&ch->kthreads_assigned);
- int wakeup;
-
-
- DBUG_ON(needed <= 0);
-
- if (idle > 0) {
- wakeup = (needed > idle) ? idle : needed;
- needed -= wakeup;
-
- dev_dbg(xpc_chan, "wakeup %d idle kthreads, partid=%d, "
- "channel=%d\n", wakeup, ch->partid, ch->number);
-
- /* only wakeup the requested number of kthreads */
- wake_up_nr(&ch->idle_wq, wakeup);
- }
-
- if (needed <= 0) {
- return;
- }
-
- if (needed + assigned > ch->kthreads_assigned_limit) {
- needed = ch->kthreads_assigned_limit - assigned;
- // >>>should never be less than 0
- if (needed <= 0) {
- return;
- }
- }
-
- dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n",
- needed, ch->partid, ch->number);
-
- xpc_create_kthreads(ch, needed);
-}
-
-
-/*
- * This function is where XPC's kthreads wait for messages to deliver.
- */
-static void
-xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
-{
- do {
- /* deliver messages to their intended recipients */
-
- while ((volatile s64) ch->w_local_GP.get <
- (volatile s64) ch->w_remote_GP.put &&
- !((volatile u32) ch->flags &
- XPC_C_DISCONNECTING)) {
- xpc_deliver_msg(ch);
- }
-
- if (atomic_inc_return(&ch->kthreads_idle) >
- ch->kthreads_idle_limit) {
- /* too many idle kthreads on this channel */
- atomic_dec(&ch->kthreads_idle);
- break;
- }
-
- dev_dbg(xpc_chan, "idle kthread calling "
- "wait_event_interruptible_exclusive()\n");
-
- (void) wait_event_interruptible_exclusive(ch->idle_wq,
- ((volatile s64) ch->w_local_GP.get <
- (volatile s64) ch->w_remote_GP.put ||
- ((volatile u32) ch->flags &
- XPC_C_DISCONNECTING)));
-
- atomic_dec(&ch->kthreads_idle);
-
- } while (!((volatile u32) ch->flags & XPC_C_DISCONNECTING));
-}
-
-
-static int
-xpc_daemonize_kthread(void *args)
-{
- partid_t partid = XPC_UNPACK_ARG1(args);
- u16 ch_number = XPC_UNPACK_ARG2(args);
- struct xpc_partition *part = &xpc_partitions[partid];
- struct xpc_channel *ch;
- int n_needed;
- unsigned long irq_flags;
-
-
- daemonize("xpc%02dc%d", partid, ch_number);
-
- dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n",
- partid, ch_number);
-
- ch = &part->channels[ch_number];
-
- if (!(ch->flags & XPC_C_DISCONNECTING)) {
-
- /* let registerer know that connection has been established */
-
- spin_lock_irqsave(&ch->lock, irq_flags);
- if (!(ch->flags & XPC_C_CONNECTCALLOUT)) {
- ch->flags |= XPC_C_CONNECTCALLOUT;
- spin_unlock_irqrestore(&ch->lock, irq_flags);
-
- xpc_connected_callout(ch);
-
- /*
- * It is possible that while the callout was being
- * made that the remote partition sent some messages.
- * If that is the case, we may need to activate
- * additional kthreads to help deliver them. We only
- * need one less than total #of messages to deliver.
- */
- n_needed = ch->w_remote_GP.put - ch->w_local_GP.get - 1;
- if (n_needed > 0 &&
- !(ch->flags & XPC_C_DISCONNECTING)) {
- xpc_activate_kthreads(ch, n_needed);
- }
- } else {
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- }
-
- xpc_kthread_waitmsgs(part, ch);
- }
-
- if (atomic_dec_return(&ch->kthreads_assigned) == 0) {
- spin_lock_irqsave(&ch->lock, irq_flags);
- if ((ch->flags & XPC_C_CONNECTCALLOUT) &&
- !(ch->flags & XPC_C_DISCONNECTCALLOUT)) {
- ch->flags |= XPC_C_DISCONNECTCALLOUT;
- spin_unlock_irqrestore(&ch->lock, irq_flags);
-
- xpc_disconnect_callout(ch, xpcDisconnecting);
- } else {
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- }
- if (atomic_dec_return(&part->nchannels_engaged) == 0) {
- xpc_mark_partition_disengaged(part);
- xpc_IPI_send_disengage(part);
- }
- }
-
-
- xpc_msgqueue_deref(ch);
-
- dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n",
- partid, ch_number);
-
- xpc_part_deref(part);
- return 0;
-}
-
-
-/*
- * For each partition that XPC has established communications with, there is
- * a minimum of one kernel thread assigned to perform any operation that
- * may potentially sleep or block (basically the callouts to the asynchronous
- * functions registered via xpc_connect()).
- *
- * Additional kthreads are created and destroyed by XPC as the workload
- * demands.
- *
- * A kthread is assigned to one of the active channels that exists for a given
- * partition.
- */
-void
-xpc_create_kthreads(struct xpc_channel *ch, int needed)
-{
- unsigned long irq_flags;
- pid_t pid;
- u64 args = XPC_PACK_ARGS(ch->partid, ch->number);
- struct xpc_partition *part = &xpc_partitions[ch->partid];
-
-
- while (needed-- > 0) {
-
- /*
- * The following is done on behalf of the newly created
- * kthread. That kthread is responsible for doing the
- * counterpart to the following before it exits.
- */
- (void) xpc_part_ref(part);
- xpc_msgqueue_ref(ch);
- if (atomic_inc_return(&ch->kthreads_assigned) == 1 &&
- atomic_inc_return(&part->nchannels_engaged) == 1) {
- xpc_mark_partition_engaged(part);
- }
-
- pid = kernel_thread(xpc_daemonize_kthread, (void *) args, 0);
- if (pid < 0) {
- /* the fork failed */
- if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
- atomic_dec_return(&part->nchannels_engaged) == 0) {
- xpc_mark_partition_disengaged(part);
- xpc_IPI_send_disengage(part);
- }
- xpc_msgqueue_deref(ch);
- xpc_part_deref(part);
-
- if (atomic_read(&ch->kthreads_assigned) <
- ch->kthreads_idle_limit) {
- /*
- * Flag this as an error only if we have an
- * insufficient #of kthreads for the channel
- * to function.
- *
- * No xpc_msgqueue_ref() is needed here since
- * the channel mgr is doing this.
- */
- spin_lock_irqsave(&ch->lock, irq_flags);
- XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources,
- &irq_flags);
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- }
- break;
- }
-
- ch->kthreads_created++; // >>> temporary debug only!!!
- }
-}
-
-
-void
-xpc_disconnect_wait(int ch_number)
-{
- unsigned long irq_flags;
- partid_t partid;
- struct xpc_partition *part;
- struct xpc_channel *ch;
- int wakeup_channel_mgr;
-
-
- /* now wait for all callouts to the caller's function to cease */
- for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
- part = &xpc_partitions[partid];
-
- if (!xpc_part_ref(part)) {
- continue;
- }
-
- ch = &part->channels[ch_number];
-
- if (!(ch->flags & XPC_C_WDISCONNECT)) {
- xpc_part_deref(part);
- continue;
- }
-
- wait_for_completion(&ch->wdisconnect_wait);
-
- spin_lock_irqsave(&ch->lock, irq_flags);
- DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
- wakeup_channel_mgr = 0;
-
- if (ch->delayed_IPI_flags) {
- if (part->act_state != XPC_P_DEACTIVATING) {
- spin_lock(&part->IPI_lock);
- XPC_SET_IPI_FLAGS(part->local_IPI_amo,
- ch->number, ch->delayed_IPI_flags);
- spin_unlock(&part->IPI_lock);
- wakeup_channel_mgr = 1;
- }
- ch->delayed_IPI_flags = 0;
- }
-
- ch->flags &= ~XPC_C_WDISCONNECT;
- spin_unlock_irqrestore(&ch->lock, irq_flags);
-
- if (wakeup_channel_mgr) {
- xpc_wakeup_channel_mgr(part);
- }
-
- xpc_part_deref(part);
- }
-}
-
-
-static void
-xpc_do_exit(enum xpc_retval reason)
-{
- partid_t partid;
- int active_part_count, printed_waiting_msg = 0;
- struct xpc_partition *part;
- unsigned long printmsg_time, disengage_request_timeout = 0;
-
-
- /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */
- DBUG_ON(xpc_exiting == 1);
-
- /*
- * Let the heartbeat checker thread and the discovery thread
- * (if one is running) know that they should exit. Also wake up
- * the heartbeat checker thread in case it's sleeping.
- */
- xpc_exiting = 1;
- wake_up_interruptible(&xpc_act_IRQ_wq);
-
- /* ignore all incoming interrupts */
- free_irq(SGI_XPC_ACTIVATE, NULL);
-
- /* wait for the discovery thread to exit */
- wait_for_completion(&xpc_discovery_exited);
-
- /* wait for the heartbeat checker thread to exit */
- wait_for_completion(&xpc_hb_checker_exited);
-
-
- /* sleep for a 1/3 of a second or so */
- (void) msleep_interruptible(300);
-
-
- /* wait for all partitions to become inactive */
-
- printmsg_time = jiffies + (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ);
- xpc_disengage_request_timedout = 0;
-
- do {
- active_part_count = 0;
-
- for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
- part = &xpc_partitions[partid];
-
- if (xpc_partition_disengaged(part) &&
- part->act_state == XPC_P_INACTIVE) {
- continue;
- }
-
- active_part_count++;
-
- XPC_DEACTIVATE_PARTITION(part, reason);
-
- if (part->disengage_request_timeout >
- disengage_request_timeout) {
- disengage_request_timeout =
- part->disengage_request_timeout;
- }
- }
-
- if (xpc_partition_engaged(-1UL)) {
- if (time_after(jiffies, printmsg_time)) {
- dev_info(xpc_part, "waiting for remote "
- "partitions to disengage, timeout in "
- "%ld seconds\n",
- (disengage_request_timeout - jiffies)
- / HZ);
- printmsg_time = jiffies +
- (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ);
- printed_waiting_msg = 1;
- }
-
- } else if (active_part_count > 0) {
- if (printed_waiting_msg) {
- dev_info(xpc_part, "waiting for local partition"
- " to disengage\n");
- printed_waiting_msg = 0;
- }
-
- } else {
- if (!xpc_disengage_request_timedout) {
- dev_info(xpc_part, "all partitions have "
- "disengaged\n");
- }
- break;
- }
-
- /* sleep for a 1/3 of a second or so */
- (void) msleep_interruptible(300);
-
- } while (1);
-
- DBUG_ON(xpc_partition_engaged(-1UL));
-
-
- /* indicate to others that our reserved page is uninitialized */
- xpc_rsvd_page->vars_pa = 0;
-
- /* now it's time to eliminate our heartbeat */
- del_timer_sync(&xpc_hb_timer);
- DBUG_ON(xpc_vars->heartbeating_to_mask != 0);
-
- if (reason == xpcUnloading) {
- /* take ourselves off of the reboot_notifier_list */
- (void) unregister_reboot_notifier(&xpc_reboot_notifier);
-
- /* take ourselves off of the die_notifier list */
- (void) unregister_die_notifier(&xpc_die_notifier);
- }
-
- /* close down protections for IPI operations */
- xpc_restrict_IPI_ops();
-
-
- /* clear the interface to XPC's functions */
- xpc_clear_interface();
-
- if (xpc_sysctl) {
- unregister_sysctl_table(xpc_sysctl);
- }
-}
-
-
-/*
- * This function is called when the system is being rebooted.
- */
-static int
-xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
-{
- enum xpc_retval reason;
-
-
- switch (event) {
- case SYS_RESTART:
- reason = xpcSystemReboot;
- break;
- case SYS_HALT:
- reason = xpcSystemHalt;
- break;
- case SYS_POWER_OFF:
- reason = xpcSystemPoweroff;
- break;
- default:
- reason = xpcSystemGoingDown;
- }
-
- xpc_do_exit(reason);
- return NOTIFY_DONE;
-}
-
-
-/*
- * Notify other partitions to disengage from all references to our memory.
- */
-static void
-xpc_die_disengage(void)
-{
- struct xpc_partition *part;
- partid_t partid;
- unsigned long engaged;
- long time, printmsg_time, disengage_request_timeout;
-
-
- /* keep xpc_hb_checker thread from doing anything (just in case) */
- xpc_exiting = 1;
-
- xpc_vars->heartbeating_to_mask = 0; /* indicate we're deactivated */
-
- for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
- part = &xpc_partitions[partid];
-
- if (!XPC_SUPPORTS_DISENGAGE_REQUEST(part->
- remote_vars_version)) {
-
- /* just in case it was left set by an earlier XPC */
- xpc_clear_partition_engaged(1UL << partid);
- continue;
- }
-
- if (xpc_partition_engaged(1UL << partid) ||
- part->act_state != XPC_P_INACTIVE) {
- xpc_request_partition_disengage(part);
- xpc_mark_partition_disengaged(part);
- xpc_IPI_send_disengage(part);
- }
- }
-
- time = rtc_time();
- printmsg_time = time +
- (XPC_DISENGAGE_PRINTMSG_INTERVAL * sn_rtc_cycles_per_second);
- disengage_request_timeout = time +
- (xpc_disengage_request_timelimit * sn_rtc_cycles_per_second);
-
- /* wait for all other partitions to disengage from us */
-
- while (1) {
- engaged = xpc_partition_engaged(-1UL);
- if (!engaged) {
- dev_info(xpc_part, "all partitions have disengaged\n");
- break;
- }
-
- time = rtc_time();
- if (time >= disengage_request_timeout) {
- for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
- if (engaged & (1UL << partid)) {
- dev_info(xpc_part, "disengage from "
- "remote partition %d timed "
- "out\n", partid);
- }
- }
- break;
- }
-
- if (time >= printmsg_time) {
- dev_info(xpc_part, "waiting for remote partitions to "
- "disengage, timeout in %ld seconds\n",
- (disengage_request_timeout - time) /
- sn_rtc_cycles_per_second);
- printmsg_time = time +
- (XPC_DISENGAGE_PRINTMSG_INTERVAL *
- sn_rtc_cycles_per_second);
- }
- }
-}
-
-
-/*
- * This function is called when the system is being restarted or halted due
- * to some sort of system failure. If this is the case we need to notify the
- * other partitions to disengage from all references to our memory.
- * This function can also be called when our heartbeater could be offlined
- * for a time. In this case we need to notify other partitions to not worry
- * about the lack of a heartbeat.
- */
-static int
-xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
-{
- switch (event) {
- case DIE_MACHINE_RESTART:
- case DIE_MACHINE_HALT:
- xpc_die_disengage();
- break;
-
- case DIE_KDEBUG_ENTER:
- /* Should lack of heartbeat be ignored by other partitions? */
- if (!xpc_kdebug_ignore) {
- break;
- }
- /* fall through */
- case DIE_MCA_MONARCH_ENTER:
- case DIE_INIT_MONARCH_ENTER:
- xpc_vars->heartbeat++;
- xpc_vars->heartbeat_offline = 1;
- break;
-
- case DIE_KDEBUG_LEAVE:
- /* Is lack of heartbeat being ignored by other partitions? */
- if (!xpc_kdebug_ignore) {
- break;
- }
- /* fall through */
- case DIE_MCA_MONARCH_LEAVE:
- case DIE_INIT_MONARCH_LEAVE:
- xpc_vars->heartbeat++;
- xpc_vars->heartbeat_offline = 0;
- break;
- }
-
- return NOTIFY_DONE;
-}
-
-
-int __init
-xpc_init(void)
-{
- int ret;
- partid_t partid;
- struct xpc_partition *part;
- pid_t pid;
-
-
- if (!ia64_platform_is("sn2")) {
- return -ENODEV;
- }
-
- /*
- * xpc_remote_copy_buffer is used as a temporary buffer for bte_copy'ng
- * various portions of a partition's reserved page. Its size is based
- * on the size of the reserved page header and part_nasids mask. So we
- * need to ensure that the other items will fit as well.
- */
- if (XPC_RP_VARS_SIZE > XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES) {
- dev_err(xpc_part, "xpc_remote_copy_buffer is not big enough\n");
- return -EPERM;
- }
- DBUG_ON((u64) xpc_remote_copy_buffer !=
- L1_CACHE_ALIGN((u64) xpc_remote_copy_buffer));
-
- snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part");
- snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan");
-
- xpc_sysctl = register_sysctl_table(xpc_sys_dir, 1);
-
- /*
- * The first few fields of each entry of xpc_partitions[] need to
- * be initialized now so that calls to xpc_connect() and
- * xpc_disconnect() can be made prior to the activation of any remote
- * partition. NOTE THAT NONE OF THE OTHER FIELDS BELONGING TO THESE
- * ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING
- * PARTITION HAS BEEN ACTIVATED.
- */
- for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
- part = &xpc_partitions[partid];
-
- DBUG_ON((u64) part != L1_CACHE_ALIGN((u64) part));
-
- part->act_IRQ_rcvd = 0;
- spin_lock_init(&part->act_lock);
- part->act_state = XPC_P_INACTIVE;
- XPC_SET_REASON(part, 0, 0);
-
- init_timer(&part->disengage_request_timer);
- part->disengage_request_timer.function =
- xpc_timeout_partition_disengage_request;
- part->disengage_request_timer.data = (unsigned long) part;
-
- part->setup_state = XPC_P_UNSET;
- init_waitqueue_head(&part->teardown_wq);
- atomic_set(&part->references, 0);
- }
-
- /*
- * Open up protections for IPI operations (and AMO operations on
- * Shub 1.1 systems).
- */
- xpc_allow_IPI_ops();
-
- /*
- * Interrupts being processed will increment this atomic variable and
- * awaken the heartbeat thread which will process the interrupts.
- */
- atomic_set(&xpc_act_IRQ_rcvd, 0);
-
- /*
- * This is safe to do before the xpc_hb_checker thread has started
- * because the handler releases a wait queue. If an interrupt is
- * received before the thread is waiting, it will not go to sleep,
- * but rather immediately process the interrupt.
- */
- ret = request_irq(SGI_XPC_ACTIVATE, xpc_act_IRQ_handler, 0,
- "xpc hb", NULL);
- if (ret != 0) {
- dev_err(xpc_part, "can't register ACTIVATE IRQ handler, "
- "errno=%d\n", -ret);
-
- xpc_restrict_IPI_ops();
-
- if (xpc_sysctl) {
- unregister_sysctl_table(xpc_sysctl);
- }
- return -EBUSY;
- }
-
- /*
- * Fill the partition reserved page with the information needed by
- * other partitions to discover we are alive and establish initial
- * communications.
- */
- xpc_rsvd_page = xpc_rsvd_page_init();
- if (xpc_rsvd_page == NULL) {
- dev_err(xpc_part, "could not setup our reserved page\n");
-
- free_irq(SGI_XPC_ACTIVATE, NULL);
- xpc_restrict_IPI_ops();
-
- if (xpc_sysctl) {
- unregister_sysctl_table(xpc_sysctl);
- }
- return -EBUSY;
- }
-
-
- /* add ourselves to the reboot_notifier_list */
- ret = register_reboot_notifier(&xpc_reboot_notifier);
- if (ret != 0) {
- dev_warn(xpc_part, "can't register reboot notifier\n");
- }
-
- /* add ourselves to the die_notifier list (i.e., ia64die_chain) */
- ret = register_die_notifier(&xpc_die_notifier);
- if (ret != 0) {
- dev_warn(xpc_part, "can't register die notifier\n");
- }
-
-
- /*
- * Set the beating to other partitions into motion. This is
- * the last requirement for other partitions' discovery to
- * initiate communications with us.
- */
- init_timer(&xpc_hb_timer);
- xpc_hb_timer.function = xpc_hb_beater;
- xpc_hb_beater(0);
-
-
- /*
- * The real work-horse behind xpc. This processes incoming
- * interrupts and monitors remote heartbeats.
- */
- pid = kernel_thread(xpc_hb_checker, NULL, 0);
- if (pid < 0) {
- dev_err(xpc_part, "failed while forking hb check thread\n");
-
- /* indicate to others that our reserved page is uninitialized */
- xpc_rsvd_page->vars_pa = 0;
-
- /* take ourselves off of the reboot_notifier_list */
- (void) unregister_reboot_notifier(&xpc_reboot_notifier);
-
- /* take ourselves off of the die_notifier list */
- (void) unregister_die_notifier(&xpc_die_notifier);
-
- del_timer_sync(&xpc_hb_timer);
- free_irq(SGI_XPC_ACTIVATE, NULL);
- xpc_restrict_IPI_ops();
-
- if (xpc_sysctl) {
- unregister_sysctl_table(xpc_sysctl);
- }
- return -EBUSY;
- }
-
-
- /*
- * Startup a thread that will attempt to discover other partitions to
- * activate based on info provided by SAL. This new thread is short
- * lived and will exit once discovery is complete.
- */
- pid = kernel_thread(xpc_initiate_discovery, NULL, 0);
- if (pid < 0) {
- dev_err(xpc_part, "failed while forking discovery thread\n");
-
- /* mark this new thread as a non-starter */
- complete(&xpc_discovery_exited);
-
- xpc_do_exit(xpcUnloading);
- return -EBUSY;
- }
-
-
- /* set the interface to point at XPC's functions */
- xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect,
- xpc_initiate_allocate, xpc_initiate_send,
- xpc_initiate_send_notify, xpc_initiate_received,
- xpc_initiate_partid_to_nasids);
-
- return 0;
-}
-module_init(xpc_init);
-
-
-void __exit
-xpc_exit(void)
-{
- xpc_do_exit(xpcUnloading);
-}
-module_exit(xpc_exit);
-
-
-MODULE_AUTHOR("Silicon Graphics, Inc.");
-MODULE_DESCRIPTION("Cross Partition Communication (XPC) support");
-MODULE_LICENSE("GPL");
-
-module_param(xpc_hb_interval, int, 0);
-MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between "
- "heartbeat increments.");
-
-module_param(xpc_hb_check_interval, int, 0);
-MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between "
- "heartbeat checks.");
-
-module_param(xpc_disengage_request_timelimit, int, 0);
-MODULE_PARM_DESC(xpc_disengage_request_timelimit, "Number of seconds to wait "
- "for disengage request to complete.");
-
-module_param(xpc_kdebug_ignore, int, 0);
-MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by "
- "other partitions when dropping into kdebug.");
-
diff --git a/arch/ia64/sn/kernel/xpc_partition.c b/arch/ia64/sn/kernel/xpc_partition.c
deleted file mode 100644
index 88a730e6cfd..00000000000
--- a/arch/ia64/sn/kernel/xpc_partition.c
+++ /dev/null
@@ -1,1227 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (c) 2004-2006 Silicon Graphics, Inc. All Rights Reserved.
- */
-
-
-/*
- * Cross Partition Communication (XPC) partition support.
- *
- * This is the part of XPC that detects the presence/absence of
- * other partitions. It provides a heartbeat and monitors the
- * heartbeats of other partitions.
- *
- */
-
-
-#include <linux/kernel.h>
-#include <linux/sysctl.h>
-#include <linux/cache.h>
-#include <linux/mmzone.h>
-#include <linux/nodemask.h>
-#include <asm/uncached.h>
-#include <asm/sn/bte.h>
-#include <asm/sn/intr.h>
-#include <asm/sn/sn_sal.h>
-#include <asm/sn/nodepda.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/xpc.h>
-
-
-/* XPC is exiting flag */
-int xpc_exiting;
-
-
-/* SH_IPI_ACCESS shub register value on startup */
-static u64 xpc_sh1_IPI_access;
-static u64 xpc_sh2_IPI_access0;
-static u64 xpc_sh2_IPI_access1;
-static u64 xpc_sh2_IPI_access2;
-static u64 xpc_sh2_IPI_access3;
-
-
-/* original protection values for each node */
-u64 xpc_prot_vec[MAX_NUMNODES];
-
-
-/* this partition's reserved page pointers */
-struct xpc_rsvd_page *xpc_rsvd_page;
-static u64 *xpc_part_nasids;
-static u64 *xpc_mach_nasids;
-struct xpc_vars *xpc_vars;
-struct xpc_vars_part *xpc_vars_part;
-
-static int xp_nasid_mask_bytes; /* actual size in bytes of nasid mask */
-static int xp_nasid_mask_words; /* actual size in words of nasid mask */
-
-
-/*
- * For performance reasons, each entry of xpc_partitions[] is cacheline
- * aligned. And xpc_partitions[] is padded with an additional entry at the
- * end so that the last legitimate entry doesn't share its cacheline with
- * another variable.
- */
-struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];
-
-
-/*
- * Generic buffer used to store a local copy of portions of a remote
- * partition's reserved page (either its header and part_nasids mask,
- * or its vars).
- *
- * xpc_discovery runs only once and is a seperate thread that is
- * very likely going to be processing in parallel with receiving
- * interrupts.
- */
-char ____cacheline_aligned xpc_remote_copy_buffer[XPC_RP_HEADER_SIZE +
- XP_NASID_MASK_BYTES];
-
-
-/*
- * Given a nasid, get the physical address of the partition's reserved page
- * for that nasid. This function returns 0 on any error.
- */
-static u64
-xpc_get_rsvd_page_pa(int nasid)
-{
- bte_result_t bte_res;
- s64 status;
- u64 cookie = 0;
- u64 rp_pa = nasid; /* seed with nasid */
- u64 len = 0;
- u64 buf = buf;
- u64 buf_len = 0;
- void *buf_base = NULL;
-
-
- while (1) {
-
- status = sn_partition_reserved_page_pa(buf, &cookie, &rp_pa,
- &len);
-
- dev_dbg(xpc_part, "SAL returned with status=%li, cookie="
- "0x%016lx, address=0x%016lx, len=0x%016lx\n",
- status, cookie, rp_pa, len);
-
- if (status != SALRET_MORE_PASSES) {
- break;
- }
-
- if (L1_CACHE_ALIGN(len) > buf_len) {
- if (buf_base != NULL) {
- kfree(buf_base);
- }
- buf_len = L1_CACHE_ALIGN(len);
- buf = (u64) xpc_kmalloc_cacheline_aligned(buf_len,
- GFP_KERNEL, &buf_base);
- if (buf_base == NULL) {
- dev_err(xpc_part, "unable to kmalloc "
- "len=0x%016lx\n", buf_len);
- status = SALRET_ERROR;
- break;
- }
- }
-
- bte_res = xp_bte_copy(rp_pa, ia64_tpa(buf), buf_len,
- (BTE_NOTIFY | BTE_WACQUIRE), NULL);
- if (bte_res != BTE_SUCCESS) {
- dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res);
- status = SALRET_ERROR;
- break;
- }
- }
-
- if (buf_base != NULL) {
- kfree(buf_base);
- }
-
- if (status != SALRET_OK) {
- rp_pa = 0;
- }
- dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa);
- return rp_pa;
-}
-
-
-/*
- * Fill the partition reserved page with the information needed by
- * other partitions to discover we are alive and establish initial
- * communications.
- */
-struct xpc_rsvd_page *
-xpc_rsvd_page_init(void)
-{
- struct xpc_rsvd_page *rp;
- AMO_t *amos_page;
- u64 rp_pa, nasid_array = 0;
- int i, ret;
-
-
- /* get the local reserved page's address */
-
- preempt_disable();
- rp_pa = xpc_get_rsvd_page_pa(cpuid_to_nasid(smp_processor_id()));
- preempt_enable();
- if (rp_pa == 0) {
- dev_err(xpc_part, "SAL failed to locate the reserved page\n");
- return NULL;
- }
- rp = (struct xpc_rsvd_page *) __va(rp_pa);
-
- if (rp->partid != sn_partition_id) {
- dev_err(xpc_part, "the reserved page's partid of %d should be "
- "%d\n", rp->partid, sn_partition_id);
- return NULL;
- }
-
- rp->version = XPC_RP_VERSION;
-
- /* establish the actual sizes of the nasid masks */
- if (rp->SAL_version == 1) {
- /* SAL_version 1 didn't set the nasids_size field */
- rp->nasids_size = 128;
- }
- xp_nasid_mask_bytes = rp->nasids_size;
- xp_nasid_mask_words = xp_nasid_mask_bytes / 8;
-
- /* setup the pointers to the various items in the reserved page */
- xpc_part_nasids = XPC_RP_PART_NASIDS(rp);
- xpc_mach_nasids = XPC_RP_MACH_NASIDS(rp);
- xpc_vars = XPC_RP_VARS(rp);
- xpc_vars_part = XPC_RP_VARS_PART(rp);
-
- /*
- * Before clearing xpc_vars, see if a page of AMOs had been previously
- * allocated. If not we'll need to allocate one and set permissions
- * so that cross-partition AMOs are allowed.
- *
- * The allocated AMO page needs MCA reporting to remain disabled after
- * XPC has unloaded. To make this work, we keep a copy of the pointer
- * to this page (i.e., amos_page) in the struct xpc_vars structure,
- * which is pointed to by the reserved page, and re-use that saved copy
- * on subsequent loads of XPC. This AMO page is never freed, and its
- * memory protections are never restricted.
- */
- if ((amos_page = xpc_vars->amos_page) == NULL) {
- amos_page = (AMO_t *) TO_AMO(uncached_alloc_page(0));
- if (amos_page == NULL) {
- dev_err(xpc_part, "can't allocate page of AMOs\n");
- return NULL;
- }
-
- /*
- * Open up AMO-R/W to cpu. This is done for Shub 1.1 systems
- * when xpc_allow_IPI_ops() is called via xpc_hb_init().
- */
- if (!enable_shub_wars_1_1()) {
- ret = sn_change_memprotect(ia64_tpa((u64) amos_page),
- PAGE_SIZE, SN_MEMPROT_ACCESS_CLASS_1,
- &nasid_array);
- if (ret != 0) {
- dev_err(xpc_part, "can't change memory "
- "protections\n");
- uncached_free_page(__IA64_UNCACHED_OFFSET |
- TO_PHYS((u64) amos_page));
- return NULL;
- }
- }
- } else if (!IS_AMO_ADDRESS((u64) amos_page)) {
- /*
- * EFI's XPBOOT can also set amos_page in the reserved page,
- * but it happens to leave it as an uncached physical address
- * and we need it to be an uncached virtual, so we'll have to
- * convert it.
- */
- if (!IS_AMO_PHYS_ADDRESS((u64) amos_page)) {
- dev_err(xpc_part, "previously used amos_page address "
- "is bad = 0x%p\n", (void *) amos_page);
- return NULL;
- }
- amos_page = (AMO_t *) TO_AMO((u64) amos_page);
- }
-
- /* clear xpc_vars */
- memset(xpc_vars, 0, sizeof(struct xpc_vars));
-
- xpc_vars->version = XPC_V_VERSION;
- xpc_vars->act_nasid = cpuid_to_nasid(0);
- xpc_vars->act_phys_cpuid = cpu_physical_id(0);
- xpc_vars->vars_part_pa = __pa(xpc_vars_part);
- xpc_vars->amos_page_pa = ia64_tpa((u64) amos_page);
- xpc_vars->amos_page = amos_page; /* save for next load of XPC */
-
-
- /* clear xpc_vars_part */
- memset((u64 *) xpc_vars_part, 0, sizeof(struct xpc_vars_part) *
- XP_MAX_PARTITIONS);
-
- /* initialize the activate IRQ related AMO variables */
- for (i = 0; i < xp_nasid_mask_words; i++) {
- (void) xpc_IPI_init(XPC_ACTIVATE_IRQ_AMOS + i);
- }
-
- /* initialize the engaged remote partitions related AMO variables */
- (void) xpc_IPI_init(XPC_ENGAGED_PARTITIONS_AMO);
- (void) xpc_IPI_init(XPC_DISENGAGE_REQUEST_AMO);
-
- /* timestamp of when reserved page was setup by XPC */
- rp->stamp = CURRENT_TIME;
-
- /*
- * This signifies to the remote partition that our reserved
- * page is initialized.
- */
- rp->vars_pa = __pa(xpc_vars);
-
- return rp;
-}
-
-
-/*
- * Change protections to allow IPI operations (and AMO operations on
- * Shub 1.1 systems).
- */
-void
-xpc_allow_IPI_ops(void)
-{
- int node;
- int nasid;
-
-
- // >>> Change SH_IPI_ACCESS code to use SAL call once it is available.
-
- if (is_shub2()) {
- xpc_sh2_IPI_access0 =
- (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS0));
- xpc_sh2_IPI_access1 =
- (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS1));
- xpc_sh2_IPI_access2 =
- (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS2));
- xpc_sh2_IPI_access3 =
- (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS3));
-
- for_each_online_node(node) {
- nasid = cnodeid_to_nasid(node);
- HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
- -1UL);
- HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
- -1UL);
- HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
- -1UL);
- HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
- -1UL);
- }
-
- } else {
- xpc_sh1_IPI_access =
- (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH1_IPI_ACCESS));
-
- for_each_online_node(node) {
- nasid = cnodeid_to_nasid(node);
- HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
- -1UL);
-
- /*
- * Since the BIST collides with memory operations on
- * SHUB 1.1 sn_change_memprotect() cannot be used.
- */
- if (enable_shub_wars_1_1()) {
- /* open up everything */
- xpc_prot_vec[node] = (u64) HUB_L((u64 *)
- GLOBAL_MMR_ADDR(nasid,
- SH1_MD_DQLP_MMR_DIR_PRIVEC0));
- HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
- SH1_MD_DQLP_MMR_DIR_PRIVEC0),
- -1UL);
- HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
- SH1_MD_DQRP_MMR_DIR_PRIVEC0),
- -1UL);
- }
- }
- }
-}
-
-
-/*
- * Restrict protections to disallow IPI operations (and AMO operations on
- * Shub 1.1 systems).
- */
-void
-xpc_restrict_IPI_ops(void)
-{
- int node;
- int nasid;
-
-
- // >>> Change SH_IPI_ACCESS code to use SAL call once it is available.
-
- if (is_shub2()) {
-
- for_each_online_node(node) {
- nasid = cnodeid_to_nasid(node);
- HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
- xpc_sh2_IPI_access0);
- HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
- xpc_sh2_IPI_access1);
- HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
- xpc_sh2_IPI_access2);
- HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
- xpc_sh2_IPI_access3);
- }
-
- } else {
-
- for_each_online_node(node) {
- nasid = cnodeid_to_nasid(node);
- HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
- xpc_sh1_IPI_access);
-
- if (enable_shub_wars_1_1()) {
- HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
- SH1_MD_DQLP_MMR_DIR_PRIVEC0),
- xpc_prot_vec[node]);
- HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
- SH1_MD_DQRP_MMR_DIR_PRIVEC0),
- xpc_prot_vec[node]);
- }
- }
- }
-}
-
-
-/*
- * At periodic intervals, scan through all active partitions and ensure
- * their heartbeat is still active. If not, the partition is deactivated.
- */
-void
-xpc_check_remote_hb(void)
-{
- struct xpc_vars *remote_vars;
- struct xpc_partition *part;
- partid_t partid;
- bte_result_t bres;
-
-
- remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer;
-
- for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
-
- if (xpc_exiting) {
- break;
- }
-
- if (partid == sn_partition_id) {
- continue;
- }
-
- part = &xpc_partitions[partid];
-
- if (part->act_state == XPC_P_INACTIVE ||
- part->act_state == XPC_P_DEACTIVATING) {
- continue;
- }
-
- /* pull the remote_hb cache line */
- bres = xp_bte_copy(part->remote_vars_pa,
- ia64_tpa((u64) remote_vars),
- XPC_RP_VARS_SIZE,
- (BTE_NOTIFY | BTE_WACQUIRE), NULL);
- if (bres != BTE_SUCCESS) {
- XPC_DEACTIVATE_PARTITION(part,
- xpc_map_bte_errors(bres));
- continue;
- }
-
- dev_dbg(xpc_part, "partid = %d, heartbeat = %ld, last_heartbeat"
- " = %ld, heartbeat_offline = %ld, HB_mask = 0x%lx\n",
- partid, remote_vars->heartbeat, part->last_heartbeat,
- remote_vars->heartbeat_offline,
- remote_vars->heartbeating_to_mask);
-
- if (((remote_vars->heartbeat == part->last_heartbeat) &&
- (remote_vars->heartbeat_offline == 0)) ||
- !xpc_hb_allowed(sn_partition_id, remote_vars)) {
-
- XPC_DEACTIVATE_PARTITION(part, xpcNoHeartbeat);
- continue;
- }
-
- part->last_heartbeat = remote_vars->heartbeat;
- }
-}
-
-
-/*
- * Get a copy of a portion of the remote partition's rsvd page.
- *
- * remote_rp points to a buffer that is cacheline aligned for BTE copies and
- * is large enough to contain a copy of their reserved page header and
- * part_nasids mask.
- */
-static enum xpc_retval
-xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
- struct xpc_rsvd_page *remote_rp, u64 *remote_rp_pa)
-{
- int bres, i;
-
-
- /* get the reserved page's physical address */
-
- *remote_rp_pa = xpc_get_rsvd_page_pa(nasid);
- if (*remote_rp_pa == 0) {
- return xpcNoRsvdPageAddr;
- }
-
-
- /* pull over the reserved page header and part_nasids mask */
-
- bres = xp_bte_copy(*remote_rp_pa, ia64_tpa((u64) remote_rp),
- XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes,
- (BTE_NOTIFY | BTE_WACQUIRE), NULL);
- if (bres != BTE_SUCCESS) {
- return xpc_map_bte_errors(bres);
- }
-
-
- if (discovered_nasids != NULL) {
- u64 *remote_part_nasids = XPC_RP_PART_NASIDS(remote_rp);
-
-
- for (i = 0; i < xp_nasid_mask_words; i++) {
- discovered_nasids[i] |= remote_part_nasids[i];
- }
- }
-
-
- /* check that the partid is for another partition */
-
- if (remote_rp->partid < 1 ||
- remote_rp->partid > (XP_MAX_PARTITIONS - 1)) {
- return xpcInvalidPartid;
- }
-
- if (remote_rp->partid == sn_partition_id) {
- return xpcLocalPartid;
- }
-
-
- if (XPC_VERSION_MAJOR(remote_rp->version) !=
- XPC_VERSION_MAJOR(XPC_RP_VERSION)) {
- return xpcBadVersion;
- }
-
- return xpcSuccess;
-}
-
-
-/*
- * Get a copy of the remote partition's XPC variables from the reserved page.
- *
- * remote_vars points to a buffer that is cacheline aligned for BTE copies and
- * assumed to be of size XPC_RP_VARS_SIZE.
- */
-static enum xpc_retval
-xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars)
-{
- int bres;
-
-
- if (remote_vars_pa == 0) {
- return xpcVarsNotSet;
- }
-
-
- /* pull over the cross partition variables */
-
- bres = xp_bte_copy(remote_vars_pa, ia64_tpa((u64) remote_vars),
- XPC_RP_VARS_SIZE,
- (BTE_NOTIFY | BTE_WACQUIRE), NULL);
- if (bres != BTE_SUCCESS) {
- return xpc_map_bte_errors(bres);
- }
-
- if (XPC_VERSION_MAJOR(remote_vars->version) !=
- XPC_VERSION_MAJOR(XPC_V_VERSION)) {
- return xpcBadVersion;
- }
-
- return xpcSuccess;
-}
-
-
-/*
- * Update the remote partition's info.
- */
-static void
-xpc_update_partition_info(struct xpc_partition *part, u8 remote_rp_version,
- struct timespec *remote_rp_stamp, u64 remote_rp_pa,
- u64 remote_vars_pa, struct xpc_vars *remote_vars)
-{
- part->remote_rp_version = remote_rp_version;
- dev_dbg(xpc_part, " remote_rp_version = 0x%016lx\n",
- part->remote_rp_version);
-
- part->remote_rp_stamp = *remote_rp_stamp;
- dev_dbg(xpc_part, " remote_rp_stamp (tv_sec = 0x%lx tv_nsec = 0x%lx\n",
- part->remote_rp_stamp.tv_sec, part->remote_rp_stamp.tv_nsec);
-
- part->remote_rp_pa = remote_rp_pa;
- dev_dbg(xpc_part, " remote_rp_pa = 0x%016lx\n", part->remote_rp_pa);
-
- part->remote_vars_pa = remote_vars_pa;
- dev_dbg(xpc_part, " remote_vars_pa = 0x%016lx\n",
- part->remote_vars_pa);
-
- part->last_heartbeat = remote_vars->heartbeat;
- dev_dbg(xpc_part, " last_heartbeat = 0x%016lx\n",
- part->last_heartbeat);
-
- part->remote_vars_part_pa = remote_vars->vars_part_pa;
- dev_dbg(xpc_part, " remote_vars_part_pa = 0x%016lx\n",
- part->remote_vars_part_pa);
-
- part->remote_act_nasid = remote_vars->act_nasid;
- dev_dbg(xpc_part, " remote_act_nasid = 0x%x\n",
- part->remote_act_nasid);
-
- part->remote_act_phys_cpuid = remote_vars->act_phys_cpuid;
- dev_dbg(xpc_part, " remote_act_phys_cpuid = 0x%x\n",
- part->remote_act_phys_cpuid);
-
- part->remote_amos_page_pa = remote_vars->amos_page_pa;
- dev_dbg(xpc_part, " remote_amos_page_pa = 0x%lx\n",
- part->remote_amos_page_pa);
-
- part->remote_vars_version = remote_vars->version;
- dev_dbg(xpc_part, " remote_vars_version = 0x%x\n",
- part->remote_vars_version);
-}
-
-
-/*
- * Prior code has determined the nasid which generated an IPI. Inspect
- * that nasid to determine if its partition needs to be activated or
- * deactivated.
- *
- * A partition is consider "awaiting activation" if our partition
- * flags indicate it is not active and it has a heartbeat. A
- * partition is considered "awaiting deactivation" if our partition
- * flags indicate it is active but it has no heartbeat or it is not
- * sending its heartbeat to us.
- *
- * To determine the heartbeat, the remote nasid must have a properly
- * initialized reserved page.
- */
-static void
-xpc_identify_act_IRQ_req(int nasid)
-{
- struct xpc_rsvd_page *remote_rp;
- struct xpc_vars *remote_vars;
- u64 remote_rp_pa;
- u64 remote_vars_pa;
- int remote_rp_version;
- int reactivate = 0;
- int stamp_diff;
- struct timespec remote_rp_stamp = { 0, 0 };
- partid_t partid;
- struct xpc_partition *part;
- enum xpc_retval ret;
-
-
- /* pull over the reserved page structure */
-
- remote_rp = (struct xpc_rsvd_page *) xpc_remote_copy_buffer;
-
- ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rp_pa);
- if (ret != xpcSuccess) {
- dev_warn(xpc_part, "unable to get reserved page from nasid %d, "
- "which sent interrupt, reason=%d\n", nasid, ret);
- return;
- }
-
- remote_vars_pa = remote_rp->vars_pa;
- remote_rp_version = remote_rp->version;
- if (XPC_SUPPORTS_RP_STAMP(remote_rp_version)) {
- remote_rp_stamp = remote_rp->stamp;
- }
- partid = remote_rp->partid;
- part = &xpc_partitions[partid];
-
-
- /* pull over the cross partition variables */
-
- remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer;
-
- ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
- if (ret != xpcSuccess) {
-
- dev_warn(xpc_part, "unable to get XPC variables from nasid %d, "
- "which sent interrupt, reason=%d\n", nasid, ret);
-
- XPC_DEACTIVATE_PARTITION(part, ret);
- return;
- }
-
-
- part->act_IRQ_rcvd++;
-
- dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = "
- "%ld:0x%lx\n", (int) nasid, (int) partid, part->act_IRQ_rcvd,
- remote_vars->heartbeat, remote_vars->heartbeating_to_mask);
-
- if (xpc_partition_disengaged(part) &&
- part->act_state == XPC_P_INACTIVE) {
-
- xpc_update_partition_info(part, remote_rp_version,
- &remote_rp_stamp, remote_rp_pa,
- remote_vars_pa, remote_vars);
-
- if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) {
- if (xpc_partition_disengage_requested(1UL << partid)) {
- /*
- * Other side is waiting on us to disengage,
- * even though we already have.
- */
- return;
- }
- } else {
- /* other side doesn't support disengage requests */
- xpc_clear_partition_disengage_request(1UL << partid);
- }
-
- xpc_activate_partition(part);
- return;
- }
-
- DBUG_ON(part->remote_rp_version == 0);
- DBUG_ON(part->remote_vars_version == 0);
-
- if (!XPC_SUPPORTS_RP_STAMP(part->remote_rp_version)) {
- DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(part->
- remote_vars_version));
-
- if (!XPC_SUPPORTS_RP_STAMP(remote_rp_version)) {
- DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->
- version));
- /* see if the other side rebooted */
- if (part->remote_amos_page_pa ==
- remote_vars->amos_page_pa &&
- xpc_hb_allowed(sn_partition_id,
- remote_vars)) {
- /* doesn't look that way, so ignore the IPI */
- return;
- }
- }
-
- /*
- * Other side rebooted and previous XPC didn't support the
- * disengage request, so we don't need to do anything special.
- */
-
- xpc_update_partition_info(part, remote_rp_version,
- &remote_rp_stamp, remote_rp_pa,
- remote_vars_pa, remote_vars);
- part->reactivate_nasid = nasid;
- XPC_DEACTIVATE_PARTITION(part, xpcReactivating);
- return;
- }
-
- DBUG_ON(!XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version));
-
- if (!XPC_SUPPORTS_RP_STAMP(remote_rp_version)) {
- DBUG_ON(!XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->version));
-
- /*
- * Other side rebooted and previous XPC did support the
- * disengage request, but the new one doesn't.
- */
-
- xpc_clear_partition_engaged(1UL << partid);
- xpc_clear_partition_disengage_request(1UL << partid);
-
- xpc_update_partition_info(part, remote_rp_version,
- &remote_rp_stamp, remote_rp_pa,
- remote_vars_pa, remote_vars);
- reactivate = 1;
-
- } else {
- DBUG_ON(!XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->version));
-
- stamp_diff = xpc_compare_stamps(&part->remote_rp_stamp,
- &remote_rp_stamp);
- if (stamp_diff != 0) {
- DBUG_ON(stamp_diff >= 0);
-
- /*
- * Other side rebooted and the previous XPC did support
- * the disengage request, as does the new one.
- */
-
- DBUG_ON(xpc_partition_engaged(1UL << partid));
- DBUG_ON(xpc_partition_disengage_requested(1UL <<
- partid));
-
- xpc_update_partition_info(part, remote_rp_version,
- &remote_rp_stamp, remote_rp_pa,
- remote_vars_pa, remote_vars);
- reactivate = 1;
- }
- }
-
- if (part->disengage_request_timeout > 0 &&
- !xpc_partition_disengaged(part)) {
- /* still waiting on other side to disengage from us */
- return;
- }
-
- if (reactivate) {
- part->reactivate_nasid = nasid;
- XPC_DEACTIVATE_PARTITION(part, xpcReactivating);
-
- } else if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version) &&
- xpc_partition_disengage_requested(1UL << partid)) {
- XPC_DEACTIVATE_PARTITION(part, xpcOtherGoingDown);
- }
-}
-
-
-/*
- * Loop through the activation AMO variables and process any bits
- * which are set. Each bit indicates a nasid sending a partition
- * activation or deactivation request.
- *
- * Return #of IRQs detected.
- */
-int
-xpc_identify_act_IRQ_sender(void)
-{
- int word, bit;
- u64 nasid_mask;
- u64 nasid; /* remote nasid */
- int n_IRQs_detected = 0;
- AMO_t *act_amos;
-
-
- act_amos = xpc_vars->amos_page + XPC_ACTIVATE_IRQ_AMOS;
-
-
- /* scan through act AMO variable looking for non-zero entries */
- for (word = 0; word < xp_nasid_mask_words; word++) {
-
- if (xpc_exiting) {
- break;
- }
-
- nasid_mask = xpc_IPI_receive(&act_amos[word]);
- if (nasid_mask == 0) {
- /* no IRQs from nasids in this variable */
- continue;
- }
-
- dev_dbg(xpc_part, "AMO[%d] gave back 0x%lx\n", word,
- nasid_mask);
-
-
- /*
- * If this nasid has been added to the machine since
- * our partition was reset, this will retain the
- * remote nasid in our reserved pages machine mask.
- * This is used in the event of module reload.
- */
- xpc_mach_nasids[word] |= nasid_mask;
-
-
- /* locate the nasid(s) which sent interrupts */
-
- for (bit = 0; bit < (8 * sizeof(u64)); bit++) {
- if (nasid_mask & (1UL << bit)) {
- n_IRQs_detected++;
- nasid = XPC_NASID_FROM_W_B(word, bit);
- dev_dbg(xpc_part, "interrupt from nasid %ld\n",
- nasid);
- xpc_identify_act_IRQ_req(nasid);
- }
- }
- }
- return n_IRQs_detected;
-}
-
-
-/*
- * See if the other side has responded to a partition disengage request
- * from us.
- */
-int
-xpc_partition_disengaged(struct xpc_partition *part)
-{
- partid_t partid = XPC_PARTID(part);
- int disengaged;
-
-
- disengaged = (xpc_partition_engaged(1UL << partid) == 0);
- if (part->disengage_request_timeout) {
- if (!disengaged) {
- if (jiffies < part->disengage_request_timeout) {
- /* timelimit hasn't been reached yet */
- return 0;
- }
-
- /*
- * Other side hasn't responded to our disengage
- * request in a timely fashion, so assume it's dead.
- */
-
- dev_info(xpc_part, "disengage from remote partition %d "
- "timed out\n", partid);
- xpc_disengage_request_timedout = 1;
- xpc_clear_partition_engaged(1UL << partid);
- disengaged = 1;
- }
- part->disengage_request_timeout = 0;
-
- /* cancel the timer function, provided it's not us */
- if (!in_interrupt()) {
- del_singleshot_timer_sync(&part->
- disengage_request_timer);
- }
-
- DBUG_ON(part->act_state != XPC_P_DEACTIVATING &&
- part->act_state != XPC_P_INACTIVE);
- if (part->act_state != XPC_P_INACTIVE) {
- xpc_wakeup_channel_mgr(part);
- }
-
- if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) {
- xpc_cancel_partition_disengage_request(part);
- }
- }
- return disengaged;
-}
-
-
-/*
- * Mark specified partition as active.
- */
-enum xpc_retval
-xpc_mark_partition_active(struct xpc_partition *part)
-{
- unsigned long irq_flags;
- enum xpc_retval ret;
-
-
- dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part));
-
- spin_lock_irqsave(&part->act_lock, irq_flags);
- if (part->act_state == XPC_P_ACTIVATING) {
- part->act_state = XPC_P_ACTIVE;
- ret = xpcSuccess;
- } else {
- DBUG_ON(part->reason == xpcSuccess);
- ret = part->reason;
- }
- spin_unlock_irqrestore(&part->act_lock, irq_flags);
-
- return ret;
-}
-
-
-/*
- * Notify XPC that the partition is down.
- */
-void
-xpc_deactivate_partition(const int line, struct xpc_partition *part,
- enum xpc_retval reason)
-{
- unsigned long irq_flags;
-
-
- spin_lock_irqsave(&part->act_lock, irq_flags);
-
- if (part->act_state == XPC_P_INACTIVE) {
- XPC_SET_REASON(part, reason, line);
- spin_unlock_irqrestore(&part->act_lock, irq_flags);
- if (reason == xpcReactivating) {
- /* we interrupt ourselves to reactivate partition */
- xpc_IPI_send_reactivate(part);
- }
- return;
- }
- if (part->act_state == XPC_P_DEACTIVATING) {
- if ((part->reason == xpcUnloading && reason != xpcUnloading) ||
- reason == xpcReactivating) {
- XPC_SET_REASON(part, reason, line);
- }
- spin_unlock_irqrestore(&part->act_lock, irq_flags);
- return;
- }
-
- part->act_state = XPC_P_DEACTIVATING;
- XPC_SET_REASON(part, reason, line);
-
- spin_unlock_irqrestore(&part->act_lock, irq_flags);
-
- if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) {
- xpc_request_partition_disengage(part);
- xpc_IPI_send_disengage(part);
-
- /* set a timelimit on the disengage request */
- part->disengage_request_timeout = jiffies +
- (xpc_disengage_request_timelimit * HZ);
- part->disengage_request_timer.expires =
- part->disengage_request_timeout;
- add_timer(&part->disengage_request_timer);
- }
-
- dev_dbg(xpc_part, "bringing partition %d down, reason = %d\n",
- XPC_PARTID(part), reason);
-
- xpc_partition_going_down(part, reason);
-}
-
-
-/*
- * Mark specified partition as inactive.
- */
-void
-xpc_mark_partition_inactive(struct xpc_partition *part)
-{
- unsigned long irq_flags;
-
-
- dev_dbg(xpc_part, "setting partition %d to INACTIVE\n",
- XPC_PARTID(part));
-
- spin_lock_irqsave(&part->act_lock, irq_flags);
- part->act_state = XPC_P_INACTIVE;
- spin_unlock_irqrestore(&part->act_lock, irq_flags);
- part->remote_rp_pa = 0;
-}
-
-
-/*
- * SAL has provided a partition and machine mask. The partition mask
- * contains a bit for each even nasid in our partition. The machine
- * mask contains a bit for each even nasid in the entire machine.
- *
- * Using those two bit arrays, we can determine which nasids are
- * known in the machine. Each should also have a reserved page
- * initialized if they are available for partitioning.
- */
-void
-xpc_discovery(void)
-{
- void *remote_rp_base;
- struct xpc_rsvd_page *remote_rp;
- struct xpc_vars *remote_vars;
- u64 remote_rp_pa;
- u64 remote_vars_pa;
- int region;
- int region_size;
- int max_regions;
- int nasid;
- struct xpc_rsvd_page *rp;
- partid_t partid;
- struct xpc_partition *part;
- u64 *discovered_nasids;
- enum xpc_retval ret;
-
-
- remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE +
- xp_nasid_mask_bytes,
- GFP_KERNEL, &remote_rp_base);
- if (remote_rp == NULL) {
- return;
- }
- remote_vars = (struct xpc_vars *) remote_rp;
-
-
- discovered_nasids = kmalloc(sizeof(u64) * xp_nasid_mask_words,
- GFP_KERNEL);
- if (discovered_nasids == NULL) {
- kfree(remote_rp_base);
- return;
- }
- memset(discovered_nasids, 0, sizeof(u64) * xp_nasid_mask_words);
-
- rp = (struct xpc_rsvd_page *) xpc_rsvd_page;
-
- /*
- * The term 'region' in this context refers to the minimum number of
- * nodes that can comprise an access protection grouping. The access
- * protection is in regards to memory, IOI and IPI.
- */
- max_regions = 64;
- region_size = sn_region_size;
-
- switch (region_size) {
- case 128:
- max_regions *= 2;
- case 64:
- max_regions *= 2;
- case 32:
- max_regions *= 2;
- region_size = 16;
- DBUG_ON(!is_shub2());
- }
-
- for (region = 0; region < max_regions; region++) {
-
- if ((volatile int) xpc_exiting) {
- break;
- }
-
- dev_dbg(xpc_part, "searching region %d\n", region);
-
- for (nasid = (region * region_size * 2);
- nasid < ((region + 1) * region_size * 2);
- nasid += 2) {
-
- if ((volatile int) xpc_exiting) {
- break;
- }
-
- dev_dbg(xpc_part, "checking nasid %d\n", nasid);
-
-
- if (XPC_NASID_IN_ARRAY(nasid, xpc_part_nasids)) {
- dev_dbg(xpc_part, "PROM indicates Nasid %d is "
- "part of the local partition; skipping "
- "region\n", nasid);
- break;
- }
-
- if (!(XPC_NASID_IN_ARRAY(nasid, xpc_mach_nasids))) {
- dev_dbg(xpc_part, "PROM indicates Nasid %d was "
- "not on Numa-Link network at reset\n",
- nasid);
- continue;
- }
-
- if (XPC_NASID_IN_ARRAY(nasid, discovered_nasids)) {
- dev_dbg(xpc_part, "Nasid %d is part of a "
- "partition which was previously "
- "discovered\n", nasid);
- continue;
- }
-
-
- /* pull over the reserved page structure */
-
- ret = xpc_get_remote_rp(nasid, discovered_nasids,
- remote_rp, &remote_rp_pa);
- if (ret != xpcSuccess) {
- dev_dbg(xpc_part, "unable to get reserved page "
- "from nasid %d, reason=%d\n", nasid,
- ret);
-
- if (ret == xpcLocalPartid) {
- break;
- }
- continue;
- }
-
- remote_vars_pa = remote_rp->vars_pa;
-
- partid = remote_rp->partid;
- part = &xpc_partitions[partid];
-
-
- /* pull over the cross partition variables */
-
- ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
- if (ret != xpcSuccess) {
- dev_dbg(xpc_part, "unable to get XPC variables "
- "from nasid %d, reason=%d\n", nasid,
- ret);
-
- XPC_DEACTIVATE_PARTITION(part, ret);
- continue;
- }
-
- if (part->act_state != XPC_P_INACTIVE) {
- dev_dbg(xpc_part, "partition %d on nasid %d is "
- "already activating\n", partid, nasid);
- break;
- }
-
- /*
- * Register the remote partition's AMOs with SAL so it
- * can handle and cleanup errors within that address
- * range should the remote partition go down. We don't
- * unregister this range because it is difficult to
- * tell when outstanding writes to the remote partition
- * are finished and thus when it is thus safe to
- * unregister. This should not result in wasted space
- * in the SAL xp_addr_region table because we should
- * get the same page for remote_act_amos_pa after
- * module reloads and system reboots.
- */
- if (sn_register_xp_addr_region(
- remote_vars->amos_page_pa,
- PAGE_SIZE, 1) < 0) {
- dev_dbg(xpc_part, "partition %d failed to "
- "register xp_addr region 0x%016lx\n",
- partid, remote_vars->amos_page_pa);
-
- XPC_SET_REASON(part, xpcPhysAddrRegFailed,
- __LINE__);
- break;
- }
-
- /*
- * The remote nasid is valid and available.
- * Send an interrupt to that nasid to notify
- * it that we are ready to begin activation.
- */
- dev_dbg(xpc_part, "sending an interrupt to AMO 0x%lx, "
- "nasid %d, phys_cpuid 0x%x\n",
- remote_vars->amos_page_pa,
- remote_vars->act_nasid,
- remote_vars->act_phys_cpuid);
-
- if (XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->
- version)) {
- part->remote_amos_page_pa =
- remote_vars->amos_page_pa;
- xpc_mark_partition_disengaged(part);
- xpc_cancel_partition_disengage_request(part);
- }
- xpc_IPI_send_activate(remote_vars);
- }
- }
-
- kfree(discovered_nasids);
- kfree(remote_rp_base);
-}
-
-
-/*
- * Given a partid, get the nasids owned by that partition from the
- * remote partition's reserved page.
- */
-enum xpc_retval
-xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask)
-{
- struct xpc_partition *part;
- u64 part_nasid_pa;
- int bte_res;
-
-
- part = &xpc_partitions[partid];
- if (part->remote_rp_pa == 0) {
- return xpcPartitionDown;
- }
-
- memset(nasid_mask, 0, XP_NASID_MASK_BYTES);
-
- part_nasid_pa = (u64) XPC_RP_PART_NASIDS(part->remote_rp_pa);
-
- bte_res = xp_bte_copy(part_nasid_pa, ia64_tpa((u64) nasid_mask),
- xp_nasid_mask_bytes, (BTE_NOTIFY | BTE_WACQUIRE), NULL);
-
- return xpc_map_bte_errors(bte_res);
-}
-
diff --git a/arch/ia64/sn/kernel/xpnet.c b/arch/ia64/sn/kernel/xpnet.c
deleted file mode 100644
index e5c6d3c0a8e..00000000000
--- a/arch/ia64/sn/kernel/xpnet.c
+++ /dev/null
@@ -1,719 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All rights reserved.
- */
-
-
-/*
- * Cross Partition Network Interface (XPNET) support
- *
- * XPNET provides a virtual network layered on top of the Cross
- * Partition communication layer.
- *
- * XPNET provides direct point-to-point and broadcast-like support
- * for an ethernet-like device. The ethernet broadcast medium is
- * replaced with a point-to-point message structure which passes
- * pointers to a DMA-capable block that a remote partition should
- * retrieve and pass to the upper level networking layer.
- *
- */
-
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/delay.h>
-#include <linux/ethtool.h>
-#include <linux/mii.h>
-#include <linux/smp.h>
-#include <linux/string.h>
-#include <asm/sn/bte.h>
-#include <asm/sn/io.h>
-#include <asm/sn/sn_sal.h>
-#include <asm/types.h>
-#include <asm/atomic.h>
-#include <asm/sn/xp.h>
-
-
-/*
- * The message payload transferred by XPC.
- *
- * buf_pa is the physical address where the DMA should pull from.
- *
- * NOTE: for performance reasons, buf_pa should _ALWAYS_ begin on a
- * cacheline boundary. To accomplish this, we record the number of
- * bytes from the beginning of the first cacheline to the first useful
- * byte of the skb (leadin_ignore) and the number of bytes from the
- * last useful byte of the skb to the end of the last cacheline
- * (tailout_ignore).
- *
- * size is the number of bytes to transfer which includes the skb->len
- * (useful bytes of the senders skb) plus the leadin and tailout
- */
-struct xpnet_message {
- u16 version; /* Version for this message */
- u16 embedded_bytes; /* #of bytes embedded in XPC message */
- u32 magic; /* Special number indicating this is xpnet */
- u64 buf_pa; /* phys address of buffer to retrieve */
- u32 size; /* #of bytes in buffer */
- u8 leadin_ignore; /* #of bytes to ignore at the beginning */
- u8 tailout_ignore; /* #of bytes to ignore at the end */
- unsigned char data; /* body of small packets */
-};
-
-/*
- * Determine the size of our message, the cacheline aligned size,
- * and then the number of message will request from XPC.
- *
- * XPC expects each message to exist in an individual cacheline.
- */
-#define XPNET_MSG_SIZE (L1_CACHE_BYTES - XPC_MSG_PAYLOAD_OFFSET)
-#define XPNET_MSG_DATA_MAX \
- (XPNET_MSG_SIZE - (u64)(&((struct xpnet_message *)0)->data))
-#define XPNET_MSG_ALIGNED_SIZE (L1_CACHE_ALIGN(XPNET_MSG_SIZE))
-#define XPNET_MSG_NENTRIES (PAGE_SIZE / XPNET_MSG_ALIGNED_SIZE)
-
-
-#define XPNET_MAX_KTHREADS (XPNET_MSG_NENTRIES + 1)
-#define XPNET_MAX_IDLE_KTHREADS (XPNET_MSG_NENTRIES + 1)
-
-/*
- * Version number of XPNET implementation. XPNET can always talk to versions
- * with same major #, and never talk to versions with a different version.
- */
-#define _XPNET_VERSION(_major, _minor) (((_major) << 4) | (_minor))
-#define XPNET_VERSION_MAJOR(_v) ((_v) >> 4)
-#define XPNET_VERSION_MINOR(_v) ((_v) & 0xf)
-
-#define XPNET_VERSION _XPNET_VERSION(1,0) /* version 1.0 */
-#define XPNET_VERSION_EMBED _XPNET_VERSION(1,1) /* version 1.1 */
-#define XPNET_MAGIC 0x88786984 /* "XNET" */
-
-#define XPNET_VALID_MSG(_m) \
- ((XPNET_VERSION_MAJOR(_m->version) == XPNET_VERSION_MAJOR(XPNET_VERSION)) \
- && (msg->magic == XPNET_MAGIC))
-
-#define XPNET_DEVICE_NAME "xp0"
-
-
-/*
- * When messages are queued with xpc_send_notify, a kmalloc'd buffer
- * of the following type is passed as a notification cookie. When the
- * notification function is called, we use the cookie to decide
- * whether all outstanding message sends have completed. The skb can
- * then be released.
- */
-struct xpnet_pending_msg {
- struct list_head free_list;
- struct sk_buff *skb;
- atomic_t use_count;
-};
-
-/* driver specific structure pointed to by the device structure */
-struct xpnet_dev_private {
- struct net_device_stats stats;
-};
-
-struct net_device *xpnet_device;
-
-/*
- * When we are notified of other partitions activating, we add them to
- * our bitmask of partitions to which we broadcast.
- */
-static u64 xpnet_broadcast_partitions;
-/* protect above */
-static DEFINE_SPINLOCK(xpnet_broadcast_lock);
-
-/*
- * Since the Block Transfer Engine (BTE) is being used for the transfer
- * and it relies upon cache-line size transfers, we need to reserve at
- * least one cache-line for head and tail alignment. The BTE is
- * limited to 8MB transfers.
- *
- * Testing has shown that changing MTU to greater than 64KB has no effect
- * on TCP as the two sides negotiate a Max Segment Size that is limited
- * to 64K. Other protocols May use packets greater than this, but for
- * now, the default is 64KB.
- */
-#define XPNET_MAX_MTU (0x800000UL - L1_CACHE_BYTES)
-/* 32KB has been determined to be the ideal */
-#define XPNET_DEF_MTU (0x8000UL)
-
-
-/*
- * The partition id is encapsulated in the MAC address. The following
- * define locates the octet the partid is in.
- */
-#define XPNET_PARTID_OCTET 1
-#define XPNET_LICENSE_OCTET 2
-
-
-/*
- * Define the XPNET debug device structure that is to be used with dev_dbg(),
- * dev_err(), dev_warn(), and dev_info().
- */
-struct device_driver xpnet_dbg_name = {
- .name = "xpnet"
-};
-
-struct device xpnet_dbg_subname = {
- .bus_id = {0}, /* set to "" */
- .driver = &xpnet_dbg_name
-};
-
-struct device *xpnet = &xpnet_dbg_subname;
-
-/*
- * Packet was recevied by XPC and forwarded to us.
- */
-static void
-xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
-{
- struct sk_buff *skb;
- bte_result_t bret;
- struct xpnet_dev_private *priv =
- (struct xpnet_dev_private *) xpnet_device->priv;
-
-
- if (!XPNET_VALID_MSG(msg)) {
- /*
- * Packet with a different XPC version. Ignore.
- */
- xpc_received(partid, channel, (void *) msg);
-
- priv->stats.rx_errors++;
-
- return;
- }
- dev_dbg(xpnet, "received 0x%lx, %d, %d, %d\n", msg->buf_pa, msg->size,
- msg->leadin_ignore, msg->tailout_ignore);
-
-
- /* reserve an extra cache line */
- skb = dev_alloc_skb(msg->size + L1_CACHE_BYTES);
- if (!skb) {
- dev_err(xpnet, "failed on dev_alloc_skb(%d)\n",
- msg->size + L1_CACHE_BYTES);
-
- xpc_received(partid, channel, (void *) msg);
-
- priv->stats.rx_errors++;
-
- return;
- }
-
- /*
- * The allocated skb has some reserved space.
- * In order to use bte_copy, we need to get the
- * skb->data pointer moved forward.
- */
- skb_reserve(skb, (L1_CACHE_BYTES - ((u64)skb->data &
- (L1_CACHE_BYTES - 1)) +
- msg->leadin_ignore));
-
- /*
- * Update the tail pointer to indicate data actually
- * transferred.
- */
- skb_put(skb, (msg->size - msg->leadin_ignore - msg->tailout_ignore));
-
- /*
- * Move the data over from the the other side.
- */
- if ((XPNET_VERSION_MINOR(msg->version) == 1) &&
- (msg->embedded_bytes != 0)) {
- dev_dbg(xpnet, "copying embedded message. memcpy(0x%p, 0x%p, "
- "%lu)\n", skb->data, &msg->data,
- (size_t) msg->embedded_bytes);
-
- memcpy(skb->data, &msg->data, (size_t) msg->embedded_bytes);
- } else {
- dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t"
- "bte_copy(0x%p, 0x%p, %hu)\n", (void *)msg->buf_pa,
- (void *)__pa((u64)skb->data & ~(L1_CACHE_BYTES - 1)),
- msg->size);
-
- bret = bte_copy(msg->buf_pa,
- __pa((u64)skb->data & ~(L1_CACHE_BYTES - 1)),
- msg->size, (BTE_NOTIFY | BTE_WACQUIRE), NULL);
-
- if (bret != BTE_SUCCESS) {
- // >>> Need better way of cleaning skb. Currently skb
- // >>> appears in_use and we can't just call
- // >>> dev_kfree_skb.
- dev_err(xpnet, "bte_copy(0x%p, 0x%p, 0x%hx) returned "
- "error=0x%x\n", (void *)msg->buf_pa,
- (void *)__pa((u64)skb->data &
- ~(L1_CACHE_BYTES - 1)),
- msg->size, bret);
-
- xpc_received(partid, channel, (void *) msg);
-
- priv->stats.rx_errors++;
-
- return;
- }
- }
-
- dev_dbg(xpnet, "<skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
- "skb->end=0x%p skb->len=%d\n", (void *) skb->head,
- (void *) skb->data, (void *) skb->tail, (void *) skb->end,
- skb->len);
-
- skb->dev = xpnet_device;
- skb->protocol = eth_type_trans(skb, xpnet_device);
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- dev_dbg(xpnet, "passing skb to network layer; \n\tskb->head=0x%p "
- "skb->data=0x%p skb->tail=0x%p skb->end=0x%p skb->len=%d\n",
- (void *) skb->head, (void *) skb->data, (void *) skb->tail,
- (void *) skb->end, skb->len);
-
-
- xpnet_device->last_rx = jiffies;
- priv->stats.rx_packets++;
- priv->stats.rx_bytes += skb->len + ETH_HLEN;
-
- netif_rx_ni(skb);
- xpc_received(partid, channel, (void *) msg);
-}
-
-
-/*
- * This is the handler which XPC calls during any sort of change in
- * state or message reception on a connection.
- */
-static void
-xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel,
- void *data, void *key)
-{
- long bp;
-
-
- DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
- DBUG_ON(channel != XPC_NET_CHANNEL);
-
- switch(reason) {
- case xpcMsgReceived: /* message received */
- DBUG_ON(data == NULL);
-
- xpnet_receive(partid, channel, (struct xpnet_message *) data);
- break;
-
- case xpcConnected: /* connection completed to a partition */
- spin_lock_bh(&xpnet_broadcast_lock);
- xpnet_broadcast_partitions |= 1UL << (partid -1 );
- bp = xpnet_broadcast_partitions;
- spin_unlock_bh(&xpnet_broadcast_lock);
-
- netif_carrier_on(xpnet_device);
-
- dev_dbg(xpnet, "%s connection created to partition %d; "
- "xpnet_broadcast_partitions=0x%lx\n",
- xpnet_device->name, partid, bp);
- break;
-
- default:
- spin_lock_bh(&xpnet_broadcast_lock);
- xpnet_broadcast_partitions &= ~(1UL << (partid -1 ));
- bp = xpnet_broadcast_partitions;
- spin_unlock_bh(&xpnet_broadcast_lock);
-
- if (bp == 0) {
- netif_carrier_off(xpnet_device);
- }
-
- dev_dbg(xpnet, "%s disconnected from partition %d; "
- "xpnet_broadcast_partitions=0x%lx\n",
- xpnet_device->name, partid, bp);
- break;
-
- }
-}
-
-
-static int
-xpnet_dev_open(struct net_device *dev)
-{
- enum xpc_retval ret;
-
-
- dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %ld, %ld, %d, "
- "%d)\n", XPC_NET_CHANNEL, xpnet_connection_activity,
- XPNET_MSG_SIZE, XPNET_MSG_NENTRIES, XPNET_MAX_KTHREADS,
- XPNET_MAX_IDLE_KTHREADS);
-
- ret = xpc_connect(XPC_NET_CHANNEL, xpnet_connection_activity, NULL,
- XPNET_MSG_SIZE, XPNET_MSG_NENTRIES,
- XPNET_MAX_KTHREADS, XPNET_MAX_IDLE_KTHREADS);
- if (ret != xpcSuccess) {
- dev_err(xpnet, "ifconfig up of %s failed on XPC connect, "
- "ret=%d\n", dev->name, ret);
-
- return -ENOMEM;
- }
-
- dev_dbg(xpnet, "ifconfig up of %s; XPC connected\n", dev->name);
-
- return 0;
-}
-
-
-static int
-xpnet_dev_stop(struct net_device *dev)
-{
- xpc_disconnect(XPC_NET_CHANNEL);
-
- dev_dbg(xpnet, "ifconfig down of %s; XPC disconnected\n", dev->name);
-
- return 0;
-}
-
-
-static int
-xpnet_dev_change_mtu(struct net_device *dev, int new_mtu)
-{
- /* 68 comes from min TCP+IP+MAC header */
- if ((new_mtu < 68) || (new_mtu > XPNET_MAX_MTU)) {
- dev_err(xpnet, "ifconfig %s mtu %d failed; value must be "
- "between 68 and %ld\n", dev->name, new_mtu,
- XPNET_MAX_MTU);
- return -EINVAL;
- }
-
- dev->mtu = new_mtu;
- dev_dbg(xpnet, "ifconfig %s mtu set to %d\n", dev->name, new_mtu);
- return 0;
-}
-
-
-/*
- * Required for the net_device structure.
- */
-static int
-xpnet_dev_set_config(struct net_device *dev, struct ifmap *new_map)
-{
- return 0;
-}
-
-
-/*
- * Return statistics to the caller.
- */
-static struct net_device_stats *
-xpnet_dev_get_stats(struct net_device *dev)
-{
- struct xpnet_dev_private *priv;
-
-
- priv = (struct xpnet_dev_private *) dev->priv;
-
- return &priv->stats;
-}
-
-
-/*
- * Notification that the other end has received the message and
- * DMA'd the skb information. At this point, they are done with
- * our side. When all recipients are done processing, we
- * release the skb and then release our pending message structure.
- */
-static void
-xpnet_send_completed(enum xpc_retval reason, partid_t partid, int channel,
- void *__qm)
-{
- struct xpnet_pending_msg *queued_msg =
- (struct xpnet_pending_msg *) __qm;
-
-
- DBUG_ON(queued_msg == NULL);
-
- dev_dbg(xpnet, "message to %d notified with reason %d\n",
- partid, reason);
-
- if (atomic_dec_return(&queued_msg->use_count) == 0) {
- dev_dbg(xpnet, "all acks for skb->head=-x%p\n",
- (void *) queued_msg->skb->head);
-
- dev_kfree_skb_any(queued_msg->skb);
- kfree(queued_msg);
- }
-}
-
-
-/*
- * Network layer has formatted a packet (skb) and is ready to place it
- * "on the wire". Prepare and send an xpnet_message to all partitions
- * which have connected with us and are targets of this packet.
- *
- * MAC-NOTE: For the XPNET driver, the MAC address contains the
- * destination partition_id. If the destination partition id word
- * is 0xff, this packet is to broadcast to all partitions.
- */
-static int
-xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct xpnet_pending_msg *queued_msg;
- enum xpc_retval ret;
- struct xpnet_message *msg;
- u64 start_addr, end_addr;
- long dp;
- u8 second_mac_octet;
- partid_t dest_partid;
- struct xpnet_dev_private *priv;
- u16 embedded_bytes;
-
-
- priv = (struct xpnet_dev_private *) dev->priv;
-
-
- dev_dbg(xpnet, ">skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
- "skb->end=0x%p skb->len=%d\n", (void *) skb->head,
- (void *) skb->data, (void *) skb->tail, (void *) skb->end,
- skb->len);
-
-
- /*
- * The xpnet_pending_msg tracks how many outstanding
- * xpc_send_notifies are relying on this skb. When none
- * remain, release the skb.
- */
- queued_msg = kmalloc(sizeof(struct xpnet_pending_msg), GFP_ATOMIC);
- if (queued_msg == NULL) {
- dev_warn(xpnet, "failed to kmalloc %ld bytes; dropping "
- "packet\n", sizeof(struct xpnet_pending_msg));
-
- priv->stats.tx_errors++;
-
- return -ENOMEM;
- }
-
-
- /* get the beginning of the first cacheline and end of last */
- start_addr = ((u64) skb->data & ~(L1_CACHE_BYTES - 1));
- end_addr = L1_CACHE_ALIGN((u64) skb->tail);
-
- /* calculate how many bytes to embed in the XPC message */
- embedded_bytes = 0;
- if (unlikely(skb->len <= XPNET_MSG_DATA_MAX)) {
- /* skb->data does fit so embed */
- embedded_bytes = skb->len;
- }
-
-
- /*
- * Since the send occurs asynchronously, we set the count to one
- * and begin sending. Any sends that happen to complete before
- * we are done sending will not free the skb. We will be left
- * with that task during exit. This also handles the case of
- * a packet destined for a partition which is no longer up.
- */
- atomic_set(&queued_msg->use_count, 1);
- queued_msg->skb = skb;
-
-
- second_mac_octet = skb->data[XPNET_PARTID_OCTET];
- if (second_mac_octet == 0xff) {
- /* we are being asked to broadcast to all partitions */
- dp = xpnet_broadcast_partitions;
- } else if (second_mac_octet != 0) {
- dp = xpnet_broadcast_partitions &
- (1UL << (second_mac_octet - 1));
- } else {
- /* 0 is an invalid partid. Ignore */
- dp = 0;
- }
- dev_dbg(xpnet, "destination Partitions mask (dp) = 0x%lx\n", dp);
-
- /*
- * If we wanted to allow promiscous mode to work like an
- * unswitched network, this would be a good point to OR in a
- * mask of partitions which should be receiving all packets.
- */
-
- /*
- * Main send loop.
- */
- for (dest_partid = 1; dp && dest_partid < XP_MAX_PARTITIONS;
- dest_partid++) {
-
-
- if (!(dp & (1UL << (dest_partid - 1)))) {
- /* not destined for this partition */
- continue;
- }
-
- /* remove this partition from the destinations mask */
- dp &= ~(1UL << (dest_partid - 1));
-
-
- /* found a partition to send to */
-
- ret = xpc_allocate(dest_partid, XPC_NET_CHANNEL,
- XPC_NOWAIT, (void **)&msg);
- if (unlikely(ret != xpcSuccess)) {
- continue;
- }
-
- msg->embedded_bytes = embedded_bytes;
- if (unlikely(embedded_bytes != 0)) {
- msg->version = XPNET_VERSION_EMBED;
- dev_dbg(xpnet, "calling memcpy(0x%p, 0x%p, 0x%lx)\n",
- &msg->data, skb->data, (size_t) embedded_bytes);
- memcpy(&msg->data, skb->data, (size_t) embedded_bytes);
- } else {
- msg->version = XPNET_VERSION;
- }
- msg->magic = XPNET_MAGIC;
- msg->size = end_addr - start_addr;
- msg->leadin_ignore = (u64) skb->data - start_addr;
- msg->tailout_ignore = end_addr - (u64) skb->tail;
- msg->buf_pa = __pa(start_addr);
-
- dev_dbg(xpnet, "sending XPC message to %d:%d\nmsg->buf_pa="
- "0x%lx, msg->size=%u, msg->leadin_ignore=%u, "
- "msg->tailout_ignore=%u\n", dest_partid,
- XPC_NET_CHANNEL, msg->buf_pa, msg->size,
- msg->leadin_ignore, msg->tailout_ignore);
-
-
- atomic_inc(&queued_msg->use_count);
-
- ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, msg,
- xpnet_send_completed, queued_msg);
- if (unlikely(ret != xpcSuccess)) {
- atomic_dec(&queued_msg->use_count);
- continue;
- }
-
- }
-
- if (atomic_dec_return(&queued_msg->use_count) == 0) {
- dev_dbg(xpnet, "no partitions to receive packet destined for "
- "%d\n", dest_partid);
-
-
- dev_kfree_skb(skb);
- kfree(queued_msg);
- }
-
- priv->stats.tx_packets++;
- priv->stats.tx_bytes += skb->len;
-
- return 0;
-}
-
-
-/*
- * Deal with transmit timeouts coming from the network layer.
- */
-static void
-xpnet_dev_tx_timeout (struct net_device *dev)
-{
- struct xpnet_dev_private *priv;
-
-
- priv = (struct xpnet_dev_private *) dev->priv;
-
- priv->stats.tx_errors++;
- return;
-}
-
-
-static int __init
-xpnet_init(void)
-{
- int i;
- u32 license_num;
- int result = -ENOMEM;
-
-
- if (!ia64_platform_is("sn2")) {
- return -ENODEV;
- }
-
- dev_info(xpnet, "registering network device %s\n", XPNET_DEVICE_NAME);
-
- /*
- * use ether_setup() to init the majority of our device
- * structure and then override the necessary pieces.
- */
- xpnet_device = alloc_netdev(sizeof(struct xpnet_dev_private),
- XPNET_DEVICE_NAME, ether_setup);
- if (xpnet_device == NULL) {
- return -ENOMEM;
- }
-
- netif_carrier_off(xpnet_device);
-
- xpnet_device->mtu = XPNET_DEF_MTU;
- xpnet_device->change_mtu = xpnet_dev_change_mtu;
- xpnet_device->open = xpnet_dev_open;
- xpnet_device->get_stats = xpnet_dev_get_stats;
- xpnet_device->stop = xpnet_dev_stop;
- xpnet_device->hard_start_xmit = xpnet_dev_hard_start_xmit;
- xpnet_device->tx_timeout = xpnet_dev_tx_timeout;
- xpnet_device->set_config = xpnet_dev_set_config;
-
- /*
- * Multicast assumes the LSB of the first octet is set for multicast
- * MAC addresses. We chose the first octet of the MAC to be unlikely
- * to collide with any vendor's officially issued MAC.
- */
- xpnet_device->dev_addr[0] = 0xfe;
- xpnet_device->dev_addr[XPNET_PARTID_OCTET] = sn_partition_id;
- license_num = sn_partition_serial_number_val();
- for (i = 3; i >= 0; i--) {
- xpnet_device->dev_addr[XPNET_LICENSE_OCTET + i] =
- license_num & 0xff;
- license_num = license_num >> 8;
- }
-
- /*
- * ether_setup() sets this to a multicast device. We are
- * really not supporting multicast at this time.
- */
- xpnet_device->flags &= ~IFF_MULTICAST;
-
- /*
- * No need to checksum as it is a DMA transfer. The BTE will
- * report an error if the data is not retrievable and the
- * packet will be dropped.
- */
- xpnet_device->features = NETIF_F_NO_CSUM;
-
- result = register_netdev(xpnet_device);
- if (result != 0) {
- free_netdev(xpnet_device);
- }
-
- return result;
-}
-module_init(xpnet_init);
-
-
-static void __exit
-xpnet_exit(void)
-{
- dev_info(xpnet, "unregistering network device %s\n",
- xpnet_device[0].name);
-
- unregister_netdev(xpnet_device);
-
- free_netdev(xpnet_device);
-}
-module_exit(xpnet_exit);
-
-
-MODULE_AUTHOR("Silicon Graphics, Inc.");
-MODULE_DESCRIPTION("Cross Partition Network adapter (XPNET)");
-MODULE_LICENSE("GPL");
-
diff --git a/arch/ia64/sn/pci/Makefile b/arch/ia64/sn/pci/Makefile
index 321576b1b42..df2a9014542 100644
--- a/arch/ia64/sn/pci/Makefile
+++ b/arch/ia64/sn/pci/Makefile
@@ -7,4 +7,6 @@
#
# Makefile for the sn pci general routines.
+ccflags-y := -Iarch/ia64/sn/include
+
obj-y := pci_dma.o tioca_provider.o tioce_provider.o pcibr/
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index 5a36292388e..d0853e8e862 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -9,14 +9,16 @@
* a description of how these routines should be used.
*/
+#include <linux/gfp.h>
#include <linux/module.h>
+#include <linux/dma-mapping.h>
#include <asm/dma.h>
-#include <asm/sn/pcibr_provider.h>
+#include <asm/sn/intr.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h>
#include <asm/sn/sn_sal.h>
-#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
+#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
/**
@@ -30,15 +32,14 @@
* this function. Of course, SN only supports devices that have 32 or more
* address bits when using the PMU.
*/
-int sn_dma_supported(struct device *dev, u64 mask)
+static int sn_dma_supported(struct device *dev, u64 mask)
{
- BUG_ON(dev->bus != &pci_bus_type);
+ BUG_ON(!dev_is_pci(dev));
if (mask < 0x7fffffff)
return 0;
return 1;
}
-EXPORT_SYMBOL(sn_dma_supported);
/**
* sn_dma_set_mask - set the DMA mask
@@ -49,7 +50,7 @@ EXPORT_SYMBOL(sn_dma_supported);
*/
int sn_dma_set_mask(struct device *dev, u64 dma_mask)
{
- BUG_ON(dev->bus != &pci_bus_type);
+ BUG_ON(!dev_is_pci(dev));
if (!sn_dma_supported(dev, dma_mask))
return 0;
@@ -74,8 +75,9 @@ EXPORT_SYMBOL(sn_dma_set_mask);
* queue for a SCSI controller). See Documentation/DMA-API.txt for
* more information.
*/
-void *sn_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t * dma_handle, gfp_t flags)
+static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t * dma_handle, gfp_t flags,
+ struct dma_attrs *attrs)
{
void *cpuaddr;
unsigned long phys_addr;
@@ -83,14 +85,15 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size,
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
- BUG_ON(dev->bus != &pci_bus_type);
+ BUG_ON(!dev_is_pci(dev));
/*
* Allocate the memory.
*/
node = pcibus_to_node(pdev->bus);
if (likely(node >=0)) {
- struct page *p = alloc_pages_node(node, flags, get_order(size));
+ struct page *p = alloc_pages_exact_node(node,
+ flags, get_order(size));
if (likely(p))
cpuaddr = page_address(p);
@@ -113,16 +116,16 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size,
* resources.
*/
- *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size);
+ *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size,
+ SN_DMA_ADDR_PHYS);
if (!*dma_handle) {
- printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
+ printk(KERN_ERR "%s: out of ATEs\n", __func__);
free_pages((unsigned long)cpuaddr, get_order(size));
return NULL;
}
return cpuaddr;
}
-EXPORT_SYMBOL(sn_dma_alloc_coherent);
/**
* sn_pci_free_coherent - free memory associated with coherent DMAable region
@@ -134,25 +137,25 @@ EXPORT_SYMBOL(sn_dma_alloc_coherent);
* Frees the memory allocated by dma_alloc_coherent(), potentially unmapping
* any associated IOMMU mappings.
*/
-void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t dma_handle)
+static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle, struct dma_attrs *attrs)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
- BUG_ON(dev->bus != &pci_bus_type);
+ BUG_ON(!dev_is_pci(dev));
provider->dma_unmap(pdev, dma_handle, 0);
free_pages((unsigned long)cpu_addr, get_order(size));
}
-EXPORT_SYMBOL(sn_dma_free_coherent);
/**
- * sn_dma_map_single - map a single page for DMA
+ * sn_dma_map_single_attrs - map a single page for DMA
* @dev: device to map for
* @cpu_addr: kernel virtual address of the region to map
* @size: size of the region
* @direction: DMA direction
+ * @attrs: optional dma attributes
*
* Map the region pointed to by @cpu_addr for DMA and return the
* DMA address.
@@ -162,51 +165,68 @@ EXPORT_SYMBOL(sn_dma_free_coherent);
* no way of saving the dmamap handle from the alloc to later free
* (which is pretty much unacceptable).
*
+ * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with
+ * dma_map_consistent() so that writes force a flush of pending DMA.
+ * (See "SGI Altix Architecture Considerations for Linux Device Drivers",
+ * Document Number: 007-4763-001)
+ *
* TODO: simplify our interface;
* figure out how to save dmamap handle so can use two step.
*/
-dma_addr_t sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size,
- int direction)
+static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
+ void *cpu_addr = page_address(page) + offset;
dma_addr_t dma_addr;
unsigned long phys_addr;
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
+ int dmabarr;
+
+ dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
- BUG_ON(dev->bus != &pci_bus_type);
+ BUG_ON(!dev_is_pci(dev));
phys_addr = __pa(cpu_addr);
- dma_addr = provider->dma_map(pdev, phys_addr, size);
+ if (dmabarr)
+ dma_addr = provider->dma_map_consistent(pdev, phys_addr,
+ size, SN_DMA_ADDR_PHYS);
+ else
+ dma_addr = provider->dma_map(pdev, phys_addr, size,
+ SN_DMA_ADDR_PHYS);
+
if (!dma_addr) {
- printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
+ printk(KERN_ERR "%s: out of ATEs\n", __func__);
return 0;
}
return dma_addr;
}
-EXPORT_SYMBOL(sn_dma_map_single);
/**
- * sn_dma_unmap_single - unamp a DMA mapped page
+ * sn_dma_unmap_single_attrs - unamp a DMA mapped page
* @dev: device to sync
* @dma_addr: DMA address to sync
* @size: size of region
* @direction: DMA direction
+ * @attrs: optional dma attributes
*
* This routine is supposed to sync the DMA region specified
* by @dma_handle into the coherence domain. On SN, we're always cache
* coherent, so we just need to free any ATEs associated with this mapping.
*/
-void sn_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
- int direction)
+static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
- BUG_ON(dev->bus != &pci_bus_type);
+ BUG_ON(!dev_is_pci(dev));
- provider->dma_unmap(pdev, dma_addr, direction);
+ provider->dma_unmap(pdev, dma_addr, dir);
}
-EXPORT_SYMBOL(sn_dma_unmap_single);
/**
* sn_dma_unmap_sg - unmap a DMA scatterlist
@@ -214,25 +234,27 @@ EXPORT_SYMBOL(sn_dma_unmap_single);
* @sg: scatterlist to unmap
* @nhwentries: number of scatterlist entries
* @direction: DMA direction
+ * @attrs: optional dma attributes
*
* Unmap a set of streaming mode DMA translations.
*/
-void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nhwentries, int direction)
+static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
+ int nhwentries, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
int i;
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
+ struct scatterlist *sg;
- BUG_ON(dev->bus != &pci_bus_type);
+ BUG_ON(!dev_is_pci(dev));
- for (i = 0; i < nhwentries; i++, sg++) {
- provider->dma_unmap(pdev, sg->dma_address, direction);
+ for_each_sg(sgl, sg, nhwentries, i) {
+ provider->dma_unmap(pdev, sg->dma_address, dir);
sg->dma_address = (dma_addr_t) NULL;
sg->dma_length = 0;
}
}
-EXPORT_SYMBOL(sn_dma_unmap_sg);
/**
* sn_dma_map_sg - map a scatterlist for DMA
@@ -240,36 +262,55 @@ EXPORT_SYMBOL(sn_dma_unmap_sg);
* @sg: scatterlist to map
* @nhwentries: number of entries
* @direction: direction of the DMA transaction
+ * @attrs: optional dma attributes
+ *
+ * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with
+ * dma_map_consistent() so that writes force a flush of pending DMA.
+ * (See "SGI Altix Architecture Considerations for Linux Device Drivers",
+ * Document Number: 007-4763-001)
*
* Maps each entry of @sg for DMA.
*/
-int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
- int direction)
+static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl,
+ int nhwentries, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
unsigned long phys_addr;
- struct scatterlist *saved_sg = sg;
+ struct scatterlist *saved_sg = sgl, *sg;
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
int i;
+ int dmabarr;
+
+ dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
- BUG_ON(dev->bus != &pci_bus_type);
+ BUG_ON(!dev_is_pci(dev));
/*
* Setup a DMA address for each entry in the scatterlist.
*/
- for (i = 0; i < nhwentries; i++, sg++) {
+ for_each_sg(sgl, sg, nhwentries, i) {
+ dma_addr_t dma_addr;
phys_addr = SG_ENT_PHYS_ADDRESS(sg);
- sg->dma_address = provider->dma_map(pdev,
- phys_addr, sg->length);
+ if (dmabarr)
+ dma_addr = provider->dma_map_consistent(pdev,
+ phys_addr,
+ sg->length,
+ SN_DMA_ADDR_PHYS);
+ else
+ dma_addr = provider->dma_map(pdev, phys_addr,
+ sg->length,
+ SN_DMA_ADDR_PHYS);
+ sg->dma_address = dma_addr;
if (!sg->dma_address) {
- printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
+ printk(KERN_ERR "%s: out of ATEs\n", __func__);
/*
* Free any successfully allocated entries.
*/
if (i > 0)
- sn_dma_unmap_sg(dev, saved_sg, i, direction);
+ sn_dma_unmap_sg(dev, saved_sg, i, dir, attrs);
return 0;
}
@@ -278,41 +319,42 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
return nhwentries;
}
-EXPORT_SYMBOL(sn_dma_map_sg);
-void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
- size_t size, int direction)
+static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir)
{
- BUG_ON(dev->bus != &pci_bus_type);
+ BUG_ON(!dev_is_pci(dev));
}
-EXPORT_SYMBOL(sn_dma_sync_single_for_cpu);
-void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
- size_t size, int direction)
+static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
+ size_t size,
+ enum dma_data_direction dir)
{
- BUG_ON(dev->bus != &pci_bus_type);
+ BUG_ON(!dev_is_pci(dev));
}
-EXPORT_SYMBOL(sn_dma_sync_single_for_device);
-void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
- int nelems, int direction)
+static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir)
{
- BUG_ON(dev->bus != &pci_bus_type);
+ BUG_ON(!dev_is_pci(dev));
}
-EXPORT_SYMBOL(sn_dma_sync_sg_for_cpu);
-void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
- int nelems, int direction)
+static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir)
{
- BUG_ON(dev->bus != &pci_bus_type);
+ BUG_ON(!dev_is_pci(dev));
}
-EXPORT_SYMBOL(sn_dma_sync_sg_for_device);
-int sn_dma_mapping_error(dma_addr_t dma_addr)
+static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
-EXPORT_SYMBOL(sn_dma_mapping_error);
+
+u64 sn_dma_get_required_mask(struct device *dev)
+{
+ return DMA_BIT_MASK(64);
+}
+EXPORT_SYMBOL_GPL(sn_dma_get_required_mask);
char *sn_pci_get_legacy_mem(struct pci_bus *bus)
{
@@ -331,14 +373,14 @@ int sn_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
/*
* First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work
* around hw issues at the pci bus level. SGI proms older than
- * 4.10 don't implment this.
+ * 4.10 don't implement this.
*/
SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,
- pci_domain_nr(bus), bus->number,
- 0, /* io */
- 0, /* read */
- port, size, __pa(val));
+ pci_domain_nr(bus), bus->number,
+ 0, /* io */
+ 0, /* read */
+ port, size, __pa(val));
if (isrv.status == 0)
return size;
@@ -346,7 +388,7 @@ int sn_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
/*
* If the above failed, retry using the SAL_PROBE call which should
* be present in all proms (but which cannot work round PCI chipset
- * bugs). This code is retained for compatability with old
+ * bugs). This code is retained for compatibility with old
* pre-4.10 proms, and should be removed at some point in the future.
*/
@@ -377,14 +419,14 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
/*
* First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work
* around hw issues at the pci bus level. SGI proms older than
- * 4.10 don't implment this.
+ * 4.10 don't implement this.
*/
SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,
- pci_domain_nr(bus), bus->number,
- 0, /* io */
- 1, /* write */
- port, size, __pa(&val));
+ pci_domain_nr(bus), bus->number,
+ 0, /* io */
+ 1, /* write */
+ port, size, __pa(&val));
if (isrv.status == 0)
return size;
@@ -392,7 +434,7 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
/*
* If the above failed, retry using the SAL_PROBE call which should
* be present in all proms (but which cannot work round PCI chipset
- * bugs). This code is retained for compatability with old
+ * bugs). This code is retained for compatibility with old
* pre-4.10 proms, and should be removed at some point in the future.
*/
@@ -423,3 +465,23 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
out:
return ret;
}
+
+static struct dma_map_ops sn_dma_ops = {
+ .alloc = sn_dma_alloc_coherent,
+ .free = sn_dma_free_coherent,
+ .map_page = sn_dma_map_page,
+ .unmap_page = sn_dma_unmap_page,
+ .map_sg = sn_dma_map_sg,
+ .unmap_sg = sn_dma_unmap_sg,
+ .sync_single_for_cpu = sn_dma_sync_single_for_cpu,
+ .sync_sg_for_cpu = sn_dma_sync_sg_for_cpu,
+ .sync_single_for_device = sn_dma_sync_single_for_device,
+ .sync_sg_for_device = sn_dma_sync_sg_for_device,
+ .mapping_error = sn_dma_mapping_error,
+ .dma_supported = sn_dma_supported,
+};
+
+void sn_dma_init(void)
+{
+ dma_ops = &sn_dma_ops;
+}
diff --git a/arch/ia64/sn/pci/pcibr/Makefile b/arch/ia64/sn/pci/pcibr/Makefile
index 1850c4a94c4..396bcae3630 100644
--- a/arch/ia64/sn/pci/pcibr/Makefile
+++ b/arch/ia64/sn/pci/pcibr/Makefile
@@ -7,5 +7,7 @@
#
# Makefile for the sn2 io routines.
+ccflags-y := -Iarch/ia64/sn/include
+
obj-y += pcibr_dma.o pcibr_reg.o \
pcibr_ate.o pcibr_provider.o
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_ate.c b/arch/ia64/sn/pci/pcibr/pcibr_ate.c
index aa3fa5152a3..5bc34eac9e0 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_ate.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_ate.c
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001-2004 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2001-2006 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
@@ -12,7 +12,7 @@
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h>
-int pcibr_invalidate_ate = 0; /* by default don't invalidate ATE on free */
+int pcibr_invalidate_ate; /* by default don't invalidate ATE on free */
/*
* mark_ate: Mark the ate as either free or inuse.
@@ -20,24 +20,21 @@ int pcibr_invalidate_ate = 0; /* by default don't invalidate ATE on free */
static void mark_ate(struct ate_resource *ate_resource, int start, int number,
u64 value)
{
-
u64 *ate = ate_resource->ate;
int index;
int length = 0;
for (index = start; length < number; index++, length++)
ate[index] = value;
-
}
/*
* find_free_ate: Find the first free ate index starting from the given
- * index for the desired consequtive count.
+ * index for the desired consecutive count.
*/
static int find_free_ate(struct ate_resource *ate_resource, int start,
int count)
{
-
u64 *ate = ate_resource->ate;
int index;
int start_free;
@@ -57,6 +54,8 @@ static int find_free_ate(struct ate_resource *ate_resource, int start,
break;
}
}
+ if (i >= ate_resource->num_ate)
+ return -1;
} else
index++; /* Try next ate */
}
@@ -70,12 +69,10 @@ static int find_free_ate(struct ate_resource *ate_resource, int start,
static inline void free_ate_resource(struct ate_resource *ate_resource,
int start)
{
-
mark_ate(ate_resource, start, ate_resource->ate[start], 0);
if ((ate_resource->lowest_free_index > start) ||
(ate_resource->lowest_free_index < 0))
ate_resource->lowest_free_index = start;
-
}
/*
@@ -84,7 +81,6 @@ static inline void free_ate_resource(struct ate_resource *ate_resource,
static inline int alloc_ate_resource(struct ate_resource *ate_resource,
int ate_needed)
{
-
int start_index;
/*
@@ -94,7 +90,7 @@ static inline int alloc_ate_resource(struct ate_resource *ate_resource,
return -1;
/*
- * Find the required number of free consequtive ates.
+ * Find the required number of free consecutive ates.
*/
start_index =
find_free_ate(ate_resource, ate_resource->lowest_free_index,
@@ -111,26 +107,19 @@ static inline int alloc_ate_resource(struct ate_resource *ate_resource,
/*
* Allocate "count" contiguous Bridge Address Translation Entries
* on the specified bridge to be used for PCI to XTALK mappings.
- * Indices in rm map range from 1..num_entries. Indicies returned
+ * Indices in rm map range from 1..num_entries. Indices returned
* to caller range from 0..num_entries-1.
*
* Return the start index on success, -1 on failure.
*/
int pcibr_ate_alloc(struct pcibus_info *pcibus_info, int count)
{
- int status = 0;
- u64 flag;
+ int status;
+ unsigned long flags;
- flag = pcibr_lock(pcibus_info);
+ spin_lock_irqsave(&pcibus_info->pbi_lock, flags);
status = alloc_ate_resource(&pcibus_info->pbi_int_ate_resource, count);
-
- if (status < 0) {
- /* Failed to allocate */
- pcibr_unlock(pcibus_info, flag);
- return -1;
- }
-
- pcibr_unlock(pcibus_info, flag);
+ spin_unlock_irqrestore(&pcibus_info->pbi_lock, flags);
return status;
}
@@ -139,7 +128,7 @@ int pcibr_ate_alloc(struct pcibus_info *pcibus_info, int count)
* Setup an Address Translation Entry as specified. Use either the Bridge
* internal maps or the external map RAM, as appropriate.
*/
-static inline u64 *pcibr_ate_addr(struct pcibus_info *pcibus_info,
+static inline u64 __iomem *pcibr_ate_addr(struct pcibus_info *pcibus_info,
int ate_index)
{
if (ate_index < pcibus_info->pbi_int_ate_size) {
@@ -173,7 +162,7 @@ void pcibr_ate_free(struct pcibus_info *pcibus_info, int index)
volatile u64 ate;
int count;
- u64 flags;
+ unsigned long flags;
if (pcibr_invalidate_ate) {
/* For debugging purposes, clear the valid bit in the ATE */
@@ -182,7 +171,7 @@ void pcibr_ate_free(struct pcibus_info *pcibus_info, int index)
ate_write(pcibus_info, index, count, (ate & ~PCI32_ATE_V));
}
- flags = pcibr_lock(pcibus_info);
+ spin_lock_irqsave(&pcibus_info->pbi_lock, flags);
free_ate_resource(&pcibus_info->pbi_int_ate_resource, index);
- pcibr_unlock(pcibus_info, flags);
+ spin_unlock_irqrestore(&pcibus_info->pbi_lock, flags);
}
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
index 54ce5b7ceed..1e863b277ac 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_dma.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
@@ -8,6 +8,7 @@
#include <linux/types.h>
#include <linux/pci.h>
+#include <linux/export.h>
#include <asm/sn/addrs.h>
#include <asm/sn/geo.h>
#include <asm/sn/pcibr_provider.h>
@@ -41,7 +42,7 @@ extern int sn_ioif_inited;
static dma_addr_t
pcibr_dmamap_ate32(struct pcidev_info *info,
- u64 paddr, size_t req_size, u64 flags)
+ u64 paddr, size_t req_size, u64 flags, int dma_flags)
{
struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
@@ -81,9 +82,12 @@ pcibr_dmamap_ate32(struct pcidev_info *info,
if (IS_PCIX(pcibus_info))
ate_flags &= ~(PCI32_ATE_PREF);
- xio_addr =
- IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
- PHYS_TO_TIODMA(paddr);
+ if (SN_DMA_ADDRTYPE(dma_flags == SN_DMA_ADDR_PHYS))
+ xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
+ PHYS_TO_TIODMA(paddr);
+ else
+ xio_addr = paddr;
+
offset = IOPGOFF(xio_addr);
ate = ate_flags | (xio_addr - offset);
@@ -91,6 +95,17 @@ pcibr_dmamap_ate32(struct pcidev_info *info,
if (IS_PIC_SOFT(pcibus_info)) {
ate |= (pcibus_info->pbi_hub_xid << PIC_ATE_TARGETID_SHFT);
}
+
+ /*
+ * If we're mapping for MSI, set the MSI bit in the ATE. If it's a
+ * TIOCP based pci bus, we also need to set the PIO bit in the ATE.
+ */
+ if (dma_flags & SN_DMA_MSI) {
+ ate |= PCI32_ATE_MSI;
+ if (IS_TIOCP_SOFT(pcibus_info))
+ ate |= PCI32_ATE_PIO;
+ }
+
ate_write(pcibus_info, ate_index, ate_count, ate);
/*
@@ -105,20 +120,26 @@ pcibr_dmamap_ate32(struct pcidev_info *info,
if (pcibus_info->pbi_devreg[internal_device] & PCIBR_DEV_SWAP_DIR)
ATE_SWAP_ON(pci_addr);
+
return pci_addr;
}
static dma_addr_t
pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr,
- u64 dma_attributes)
+ u64 dma_attributes, int dma_flags)
{
struct pcibus_info *pcibus_info = (struct pcibus_info *)
((info->pdi_host_pcidev_info)->pdi_pcibus_info);
u64 pci_addr;
/* Translate to Crosstalk View of Physical Address */
- pci_addr = (IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
- PHYS_TO_TIODMA(paddr)) | dma_attributes;
+ if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS)
+ pci_addr = IS_PIC_SOFT(pcibus_info) ?
+ PHYS_TO_DMA(paddr) :
+ PHYS_TO_TIODMA(paddr);
+ else
+ pci_addr = paddr;
+ pci_addr |= dma_attributes;
/* Handle Bus mode */
if (IS_PCIX(pcibus_info))
@@ -130,21 +151,21 @@ pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr,
((u64) pcibus_info->
pbi_hub_xid << PIC_PCI64_ATTR_TARG_SHFT);
} else
- pci_addr |= TIOCP_PCI64_CMDTYPE_MEM;
+ pci_addr |= (dma_flags & SN_DMA_MSI) ?
+ TIOCP_PCI64_CMDTYPE_MSI :
+ TIOCP_PCI64_CMDTYPE_MEM;
/* If PCI mode, func zero uses VCHAN0, every other func uses VCHAN1 */
if (!IS_PCIX(pcibus_info) && PCI_FUNC(info->pdi_linux_pcidev->devfn))
pci_addr |= PCI64_ATTR_VIRTUAL;
return pci_addr;
-
}
static dma_addr_t
pcibr_dmatrans_direct32(struct pcidev_info * info,
- u64 paddr, size_t req_size, u64 flags)
+ u64 paddr, size_t req_size, u64 flags, int dma_flags)
{
-
struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->
pdi_pcibus_info;
@@ -158,8 +179,14 @@ pcibr_dmatrans_direct32(struct pcidev_info * info,
return 0;
}
- xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
- PHYS_TO_TIODMA(paddr);
+ if (dma_flags & SN_DMA_MSI)
+ return 0;
+
+ if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS)
+ xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
+ PHYS_TO_TIODMA(paddr);
+ else
+ xio_addr = paddr;
xio_base = pcibus_info->pbi_dir_xbase;
offset = xio_addr - xio_base;
@@ -171,11 +198,10 @@ pcibr_dmatrans_direct32(struct pcidev_info * info,
}
return PCI32_DIRECT_BASE | offset;
-
}
/*
- * Wrapper routine for free'ing DMA maps
+ * Wrapper routine for freeing DMA maps
* DMA mappings for Direct 64 and 32 do not have any DMA maps.
*/
void
@@ -202,7 +228,7 @@ pcibr_dma_unmap(struct pci_dev *hwdev, dma_addr_t dma_handle, int direction)
* after doing the read. For PIC this routine then forces a fake interrupt
* on another line, which is logically associated with the slot that the PIO
* is addressed to. It then spins while watching the memory location that
- * the interrupt is targetted to. When the interrupt response arrives, we
+ * the interrupt is targeted to. When the interrupt response arrives, we
* are sure that the DMA has landed in memory and it is safe for the driver
* to proceed. For TIOCP use the Device(x) Write Request Buffer Flush
* Bridge register since it ensures the data has entered the coherence domain,
@@ -215,12 +241,11 @@ void sn_dma_flush(u64 addr)
int is_tio;
int wid_num;
int i, j;
- u64 flags;
+ unsigned long flags;
u64 itte;
struct hubdev_info *hubinfo;
- volatile struct sn_flush_device_kernel *p;
- volatile struct sn_flush_device_common *common;
-
+ struct sn_flush_device_kernel *p;
+ struct sn_flush_device_common *common;
struct sn_flush_nasid_entry *flush_nasid_list;
if (!sn_ioif_inited)
@@ -232,9 +257,7 @@ void sn_dma_flush(u64 addr)
hubinfo = (NODEPDA(nasid_to_cnodeid(nasid)))->pdinfo;
- if (!hubinfo) {
- BUG();
- }
+ BUG_ON(!hubinfo);
flush_nasid_list = &hubinfo->hdi_flush_nasid_list;
if (flush_nasid_list->widget_p == NULL)
@@ -310,8 +333,7 @@ void sn_dma_flush(u64 addr)
(common->sfdl_slot - 1));
}
} else {
- spin_lock_irqsave((spinlock_t *)&p->sfdl_flush_lock,
- flags);
+ spin_lock_irqsave(&p->sfdl_flush_lock, flags);
*common->sfdl_flush_addr = 0;
/* force an interrupt. */
@@ -322,8 +344,7 @@ void sn_dma_flush(u64 addr)
cpu_relax();
/* okay, everything is synched up. */
- spin_unlock_irqrestore((spinlock_t *)&p->sfdl_flush_lock,
- flags);
+ spin_unlock_irqrestore(&p->sfdl_flush_lock, flags);
}
return;
}
@@ -333,7 +354,7 @@ void sn_dma_flush(u64 addr)
*/
dma_addr_t
-pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size)
+pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size, int dma_flags)
{
dma_addr_t dma_handle;
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
@@ -350,11 +371,11 @@ pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size)
*/
dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr,
- PCI64_ATTR_PREF);
+ PCI64_ATTR_PREF, dma_flags);
} else {
/* Handle 32-63 bit cards via direct mapping */
dma_handle = pcibr_dmatrans_direct32(pcidev_info, phys_addr,
- size, 0);
+ size, 0, dma_flags);
if (!dma_handle) {
/*
* It is a 32 bit card and we cannot do direct mapping,
@@ -362,7 +383,8 @@ pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size)
*/
dma_handle = pcibr_dmamap_ate32(pcidev_info, phys_addr,
- size, PCI32_ATE_PREF);
+ size, PCI32_ATE_PREF,
+ dma_flags);
}
}
@@ -371,18 +393,18 @@ pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size)
dma_addr_t
pcibr_dma_map_consistent(struct pci_dev * hwdev, unsigned long phys_addr,
- size_t size)
+ size_t size, int dma_flags)
{
dma_addr_t dma_handle;
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
if (hwdev->dev.coherent_dma_mask == ~0UL) {
dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr,
- PCI64_ATTR_BAR);
+ PCI64_ATTR_BAR, dma_flags);
} else {
dma_handle = (dma_addr_t) pcibr_dmamap_ate32(pcidev_info,
phys_addr, size,
- PCI32_ATE_BAR);
+ PCI32_ATE_BAR, dma_flags);
}
return dma_handle;
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
index 2fac27049bf..8dbbef4a4f4 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
@@ -3,24 +3,28 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001-2004 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2001-2004, 2006 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/interrupt.h>
#include <linux/types.h>
+#include <linux/slab.h>
#include <linux/pci.h>
+#include <linux/export.h>
#include <asm/sn/addrs.h>
#include <asm/sn/geo.h>
#include <asm/sn/pcibr_provider.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h>
#include <asm/sn/sn_sal.h>
+#include <asm/sn/pic.h>
#include <asm/sn/sn2/sn_hwperf.h>
#include "xtalk/xwidgetdev.h"
#include "xtalk/hubdev.h"
int
-sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp)
+sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp,
+ char **ssdt)
{
struct ia64_sal_retval ret_stuff;
u64 busnum;
@@ -32,7 +36,8 @@ sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp)
segment = soft->pbi_buscommon.bs_persist_segment;
busnum = soft->pbi_buscommon.bs_persist_busnum;
SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, segment,
- busnum, (u64) device, (u64) resp, 0, 0, 0);
+ busnum, (u64) device, (u64) resp, (u64)ia64_tpa(ssdt),
+ 0, 0);
return (int)ret_stuff.v0;
}
@@ -74,18 +79,34 @@ static int sal_pcibr_error_interrupt(struct pcibus_info *soft)
return (int)ret_stuff.v0;
}
+u16 sn_ioboard_to_pci_bus(struct pci_bus *pci_bus)
+{
+ long rc;
+ u16 uninitialized_var(ioboard); /* GCC be quiet */
+ nasid_t nasid = NASID_GET(SN_PCIBUS_BUSSOFT(pci_bus)->bs_base);
+
+ rc = ia64_sn_sysctl_ioboard_get(nasid, &ioboard);
+ if (rc) {
+ printk(KERN_WARNING "ia64_sn_sysctl_ioboard_get failed: %ld\n",
+ rc);
+ return 0;
+ }
+
+ return ioboard;
+}
+
/*
* PCI Bridge Error interrupt handler. Gets invoked whenever a PCI
* bridge sends an error interrupt.
*/
static irqreturn_t
-pcibr_error_intr_handler(int irq, void *arg, struct pt_regs *regs)
+pcibr_error_intr_handler(int irq, void *arg)
{
- struct pcibus_info *soft = (struct pcibus_info *)arg;
+ struct pcibus_info *soft = arg;
- if (sal_pcibr_error_interrupt(soft) < 0) {
+ if (sal_pcibr_error_interrupt(soft) < 0)
panic("pcibr_error_intr_handler(): Fatal Bridge Error");
- }
+
return IRQ_HANDLED;
}
@@ -93,7 +114,6 @@ void *
pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller)
{
int nasid, cnode, j;
- cnodeid_t near_cnode;
struct hubdev_info *hubdev_info;
struct pcibus_info *soft;
struct sn_flush_device_kernel *sn_flush_device_kernel;
@@ -107,26 +127,27 @@ pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
* Allocate kernel bus soft and copy from prom.
*/
- soft = kmalloc(sizeof(struct pcibus_info), GFP_KERNEL);
+ soft = kmemdup(prom_bussoft, sizeof(struct pcibus_info), GFP_KERNEL);
if (!soft) {
return NULL;
}
- memcpy(soft, prom_bussoft, sizeof(struct pcibus_info));
- soft->pbi_buscommon.bs_base =
- (((u64) soft->pbi_buscommon.
- bs_base << 4) >> 4) | __IA64_UNCACHED_OFFSET;
+ soft->pbi_buscommon.bs_base = (unsigned long)
+ ioremap(REGION_OFFSET(soft->pbi_buscommon.bs_base),
+ sizeof(struct pic));
spin_lock_init(&soft->pbi_lock);
/*
* register the bridge's error interrupt handler
*/
- if (request_irq(SGI_PCIASIC_ERROR, (void *)pcibr_error_intr_handler,
- SA_SHIRQ, "PCIBR error", (void *)(soft))) {
+ if (request_irq(SGI_PCIASIC_ERROR, pcibr_error_intr_handler,
+ IRQF_SHARED, "PCIBR error", (void *)(soft))) {
printk(KERN_WARNING
"pcibr cannot allocate interrupt for error handler\n");
}
+ irq_set_handler(SGI_PCIASIC_ERROR, handle_level_irq);
+ sn_set_err_irq_affinity(SGI_PCIASIC_ERROR);
/*
* Update the Bridge with the "kernel" pagesize
@@ -163,24 +184,13 @@ pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
/* Setup the PMU ATE map */
soft->pbi_int_ate_resource.lowest_free_index = 0;
soft->pbi_int_ate_resource.ate =
- kmalloc(soft->pbi_int_ate_size * sizeof(u64), GFP_KERNEL);
- memset(soft->pbi_int_ate_resource.ate, 0,
- (soft->pbi_int_ate_size * sizeof(u64)));
-
- if (prom_bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCP) {
- /* TIO PCI Bridge: find nearest node with CPUs */
- int e = sn_hwperf_get_nearest_node(cnode, NULL, &near_cnode);
-
- if (e < 0) {
- near_cnode = (cnodeid_t)-1; /* use any node */
- printk(KERN_WARNING "pcibr_bus_fixup: failed to find "
- "near node with CPUs to TIO node %d, err=%d\n",
- cnode, e);
- }
- controller->node = near_cnode;
+ kzalloc(soft->pbi_int_ate_size * sizeof(u64), GFP_KERNEL);
+
+ if (!soft->pbi_int_ate_resource.ate) {
+ kfree(soft);
+ return NULL;
}
- else
- controller->node = cnode;
+
return soft;
}
@@ -252,3 +262,4 @@ pcibr_init_provider(void)
EXPORT_SYMBOL_GPL(sal_pcibr_slot_enable);
EXPORT_SYMBOL_GPL(sal_pcibr_slot_disable);
+EXPORT_SYMBOL_GPL(sn_ioboard_to_pci_bus);
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c
index 7571a402552..a70b11fd57d 100644
--- a/arch/ia64/sn/pci/tioca_provider.c
+++ b/arch/ia64/sn/pci/tioca_provider.c
@@ -9,6 +9,9 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
+#include <linux/bitmap.h>
+#include <linux/slab.h>
+#include <linux/export.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/addrs.h>
#include <asm/sn/io.h>
@@ -88,7 +91,7 @@ tioca_gart_init(struct tioca_kernel *tioca_kern)
break;
default:
printk(KERN_ERR "%s: Invalid CA_APERATURE_SIZE "
- "0x%lx\n", __FUNCTION__, (ulong) CA_APERATURE_SIZE);
+ "0x%lx\n", __func__, (ulong) CA_APERATURE_SIZE);
return -1;
}
@@ -123,8 +126,8 @@ tioca_gart_init(struct tioca_kernel *tioca_kern)
if (!tmp) {
printk(KERN_ERR "%s: Could not allocate "
- "%lu bytes (order %d) for GART\n",
- __FUNCTION__,
+ "%llu bytes (order %d) for GART\n",
+ __func__,
tioca_kern->ca_gart_size,
get_order(tioca_kern->ca_gart_size));
return -ENOMEM;
@@ -223,7 +226,7 @@ tioca_fastwrite_enable(struct tioca_kernel *tioca_kern)
/*
* Scan all vga controllers on this bus making sure they all
- * suport FW. If not, return.
+ * support FW. If not, return.
*/
list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) {
@@ -341,15 +344,15 @@ tioca_dma_d48(struct pci_dev *pdev, u64 paddr)
if (node_upper > 64) {
printk(KERN_ERR "%s: coretalk addr 0x%p node id out "
- "of range\n", __FUNCTION__, (void *)ct_addr);
+ "of range\n", __func__, (void *)ct_addr);
return 0;
}
agp_dma_extn = __sn_readq_relaxed(&ca_base->ca_agp_dma_addr_extn);
if (node_upper != (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)) {
printk(KERN_ERR "%s: coretalk upper node (%u) "
- "mismatch with ca_agp_dma_addr_extn (%lu)\n",
- __FUNCTION__,
+ "mismatch with ca_agp_dma_addr_extn (%llu)\n",
+ __func__,
node_upper, (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT));
return 0;
}
@@ -364,12 +367,12 @@ tioca_dma_d48(struct pci_dev *pdev, u64 paddr)
* @req_size: len (bytes) to map
*
* Map @paddr into CA address space using the GART mechanism. The mapped
- * dma_addr_t is guarenteed to be contiguous in CA bus space.
+ * dma_addr_t is guaranteed to be contiguous in CA bus space.
*/
static dma_addr_t
-tioca_dma_mapped(struct pci_dev *pdev, u64 paddr, size_t req_size)
+tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size)
{
- int i, ps, ps_shift, entry, entries, mapsize, last_entry;
+ int ps, ps_shift, entry, entries, mapsize;
u64 xio_addr, end_xio_addr;
struct tioca_common *tioca_common;
struct tioca_kernel *tioca_kern;
@@ -377,7 +380,7 @@ tioca_dma_mapped(struct pci_dev *pdev, u64 paddr, size_t req_size)
struct tioca_dmamap *ca_dmamap;
void *map;
unsigned long flags;
- struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);;
+ struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);
tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info;
tioca_kern = (struct tioca_kernel *)tioca_common->ca_kernel_private;
@@ -410,21 +413,13 @@ tioca_dma_mapped(struct pci_dev *pdev, u64 paddr, size_t req_size)
map = tioca_kern->ca_pcigart_pagemap;
mapsize = tioca_kern->ca_pcigart_entries;
- entry = find_first_zero_bit(map, mapsize);
- while (entry < mapsize) {
- last_entry = find_next_bit(map, mapsize, entry);
-
- if (last_entry - entry >= entries)
- break;
-
- entry = find_next_zero_bit(map, mapsize, last_entry);
- }
-
- if (entry > mapsize)
+ entry = bitmap_find_next_zero_area(map, mapsize, 0, entries, 0);
+ if (entry >= mapsize) {
+ kfree(ca_dmamap);
goto map_return;
+ }
- for (i = 0; i < entries; i++)
- set_bit(entry + i, map);
+ bitmap_set(map, entry, entries);
bus_addr = tioca_kern->ca_pciap_base + (entry * ps);
@@ -515,12 +510,18 @@ tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
* use the GART mapped mode.
*/
static u64
-tioca_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count)
+tioca_dma_map(struct pci_dev *pdev, unsigned long paddr, size_t byte_count, int dma_flags)
{
u64 mapaddr;
/*
- * If card is 64 or 48 bit addresable, use a direct mapping. 32
+ * Not supported for now ...
+ */
+ if (dma_flags & SN_DMA_MSI)
+ return 0;
+
+ /*
+ * If card is 64 or 48 bit addressable, use a direct mapping. 32
* bit direct is so restrictive w.r.t. where the memory resides that
* we don't use it even though CA has some support.
*/
@@ -544,13 +545,12 @@ tioca_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count)
* tioca_error_intr_handler - SGI TIO CA error interrupt handler
* @irq: unused
* @arg: pointer to tioca_common struct for the given CA
- * @pt: unused
*
* Handle a CA error interrupt. Simply a wrapper around a SAL call which
* defers processing to the SGI prom.
*/
static irqreturn_t
-tioca_error_intr_handler(int irq, void *arg, struct pt_regs *pt)
+tioca_error_intr_handler(int irq, void *arg)
{
struct tioca_common *soft = arg;
struct ia64_sal_retval ret_stuff;
@@ -589,10 +589,10 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
/* sanity check prom rev */
- if (sn_sal_rev() < 0x0406) {
+ if (is_shub1() && sn_sal_rev() < 0x0406) {
printk
(KERN_ERR "%s: SGI prom rev 4.06 or greater required "
- "for tioca support\n", __FUNCTION__);
+ "for tioca support\n", __func__);
return NULL;
}
@@ -600,12 +600,14 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
* Allocate kernel bus soft and copy from prom.
*/
- tioca_common = kzalloc(sizeof(struct tioca_common), GFP_KERNEL);
+ tioca_common = kmemdup(prom_bussoft, sizeof(struct tioca_common),
+ GFP_KERNEL);
if (!tioca_common)
return NULL;
- memcpy(tioca_common, prom_bussoft, sizeof(struct tioca_common));
- tioca_common->ca_common.bs_base |= __IA64_UNCACHED_OFFSET;
+ tioca_common->ca_common.bs_base = (unsigned long)
+ ioremap(REGION_OFFSET(tioca_common->ca_common.bs_base),
+ sizeof(struct tioca_common));
/* init kernel-private area */
@@ -640,13 +642,16 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
if (request_irq(SGI_TIOCA_ERROR,
tioca_error_intr_handler,
- SA_SHIRQ, "TIOCA error", (void *)tioca_common))
+ IRQF_SHARED, "TIOCA error", (void *)tioca_common))
printk(KERN_WARNING
"%s: Unable to get irq %d. "
"Error interrupts won't be routed for TIOCA bus %d\n",
- __FUNCTION__, SGI_TIOCA_ERROR,
+ __func__, SGI_TIOCA_ERROR,
(int)tioca_common->ca_common.bs_persist_busnum);
+ irq_set_handler(SGI_TIOCA_ERROR, handle_level_irq);
+ sn_set_err_irq_affinity(SGI_TIOCA_ERROR);
+
/* Setup locality information */
controller->node = tioca_kern->ca_closest_node;
return tioca_common;
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c
index e52831ed93e..46d3df4b03a 100644
--- a/arch/ia64/sn/pci/tioce_provider.c
+++ b/arch/ia64/sn/pci/tioce_provider.c
@@ -3,11 +3,12 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2003-2005 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (C) 2003-2006 Silicon Graphics, Inc. All Rights Reserved.
*/
#include <linux/types.h>
#include <linux/interrupt.h>
+#include <linux/slab.h>
#include <linux/pci.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/addrs.h>
@@ -16,6 +17,123 @@
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/tioce_provider.h>
+/*
+ * 1/26/2006
+ *
+ * WAR for SGI PV 944642. For revA TIOCE, need to use the following recipe
+ * (taken from the above PV) before and after accessing tioce internal MMR's
+ * to avoid tioce lockups.
+ *
+ * The recipe as taken from the PV:
+ *
+ * if(mmr address < 0x45000) {
+ * if(mmr address == 0 or 0x80)
+ * mmr wrt or read address 0xc0
+ * else if(mmr address == 0x148 or 0x200)
+ * mmr wrt or read address 0x28
+ * else
+ * mmr wrt or read address 0x158
+ *
+ * do desired mmr access (rd or wrt)
+ *
+ * if(mmr address == 0x100)
+ * mmr wrt or read address 0x38
+ * mmr wrt or read address 0xb050
+ * } else
+ * do desired mmr access
+ *
+ * According to hw, we can use reads instead of writes to the above address
+ *
+ * Note this WAR can only to be used for accessing internal MMR's in the
+ * TIOCE Coretalk Address Range 0x0 - 0x07ff_ffff. This includes the
+ * "Local CE Registers and Memories" and "PCI Compatible Config Space" address
+ * spaces from table 2-1 of the "CE Programmer's Reference Overview" document.
+ *
+ * All registers defined in struct tioce will meet that criteria.
+ */
+
+static void inline
+tioce_mmr_war_pre(struct tioce_kernel *kern, void __iomem *mmr_addr)
+{
+ u64 mmr_base;
+ u64 mmr_offset;
+
+ if (kern->ce_common->ce_rev != TIOCE_REV_A)
+ return;
+
+ mmr_base = kern->ce_common->ce_pcibus.bs_base;
+ mmr_offset = (unsigned long)mmr_addr - mmr_base;
+
+ if (mmr_offset < 0x45000) {
+ u64 mmr_war_offset;
+
+ if (mmr_offset == 0 || mmr_offset == 0x80)
+ mmr_war_offset = 0xc0;
+ else if (mmr_offset == 0x148 || mmr_offset == 0x200)
+ mmr_war_offset = 0x28;
+ else
+ mmr_war_offset = 0x158;
+
+ readq_relaxed((void __iomem *)(mmr_base + mmr_war_offset));
+ }
+}
+
+static void inline
+tioce_mmr_war_post(struct tioce_kernel *kern, void __iomem *mmr_addr)
+{
+ u64 mmr_base;
+ u64 mmr_offset;
+
+ if (kern->ce_common->ce_rev != TIOCE_REV_A)
+ return;
+
+ mmr_base = kern->ce_common->ce_pcibus.bs_base;
+ mmr_offset = (unsigned long)mmr_addr - mmr_base;
+
+ if (mmr_offset < 0x45000) {
+ if (mmr_offset == 0x100)
+ readq_relaxed((void __iomem *)(mmr_base + 0x38));
+ readq_relaxed((void __iomem *)(mmr_base + 0xb050));
+ }
+}
+
+/* load mmr contents into a variable */
+#define tioce_mmr_load(kern, mmrp, varp) do {\
+ tioce_mmr_war_pre(kern, mmrp); \
+ *(varp) = readq_relaxed(mmrp); \
+ tioce_mmr_war_post(kern, mmrp); \
+} while (0)
+
+/* store variable contents into mmr */
+#define tioce_mmr_store(kern, mmrp, varp) do {\
+ tioce_mmr_war_pre(kern, mmrp); \
+ writeq(*varp, mmrp); \
+ tioce_mmr_war_post(kern, mmrp); \
+} while (0)
+
+/* store immediate value into mmr */
+#define tioce_mmr_storei(kern, mmrp, val) do {\
+ tioce_mmr_war_pre(kern, mmrp); \
+ writeq(val, mmrp); \
+ tioce_mmr_war_post(kern, mmrp); \
+} while (0)
+
+/* set bits (immediate value) into mmr */
+#define tioce_mmr_seti(kern, mmrp, bits) do {\
+ u64 tmp; \
+ tioce_mmr_load(kern, mmrp, &tmp); \
+ tmp |= (bits); \
+ tioce_mmr_store(kern, mmrp, &tmp); \
+} while (0)
+
+/* clear bits (immediate value) into mmr */
+#define tioce_mmr_clri(kern, mmrp, bits) do { \
+ u64 tmp; \
+ tioce_mmr_load(kern, mmrp, &tmp); \
+ tmp &= ~(bits); \
+ tioce_mmr_store(kern, mmrp, &tmp); \
+} while (0)
+
/**
* Bus address ranges for the 5 flavors of TIOCE DMA
*/
@@ -52,7 +170,8 @@
(ATE_PAGE((start)+(len)-1, pagesize) - ATE_PAGE(start, pagesize) + 1)
#define ATE_VALID(ate) ((ate) & (1UL << 63))
-#define ATE_MAKE(addr, ps) (((addr) & ~ATE_PAGEMASK(ps)) | (1UL << 63))
+#define ATE_MAKE(addr, ps, msi) \
+ (((addr) & ~ATE_PAGEMASK(ps)) | (1UL << 63) | ((msi)?(1UL << 62):0))
/*
* Flavors of ate-based mapping supported by tioce_alloc_map()
@@ -62,9 +181,9 @@
#define TIOCE_ATE_M40 2
#define TIOCE_ATE_M40S 3
-#define KB(x) ((x) << 10)
-#define MB(x) ((x) << 20)
-#define GB(x) ((x) << 30)
+#define KB(x) ((u64)(x) << 10)
+#define MB(x) ((u64)(x) << 20)
+#define GB(x) ((u64)(x) << 30)
/**
* tioce_dma_d64 - create a DMA mapping using 64-bit direct mode
@@ -78,15 +197,17 @@
*
* 63 - must be 1 to indicate d64 mode to CE hardware
* 62 - barrier bit ... controlled with tioce_dma_barrier()
- * 61 - 0 since this is not an MSI transaction
+ * 61 - msi bit ... specified through dma_flags
* 60:54 - reserved, MBZ
*/
static u64
-tioce_dma_d64(unsigned long ct_addr)
+tioce_dma_d64(unsigned long ct_addr, int dma_flags)
{
u64 bus_addr;
bus_addr = ct_addr | (1UL << 63);
+ if (dma_flags & SN_DMA_MSI)
+ bus_addr |= (1UL << 61);
return bus_addr;
}
@@ -102,7 +223,7 @@ tioce_dma_d64(unsigned long ct_addr)
* @pci_dev.
*/
static inline void
-pcidev_to_tioce(struct pci_dev *pdev, struct tioce **base,
+pcidev_to_tioce(struct pci_dev *pdev, struct tioce __iomem **base,
struct tioce_kernel **kernel, int *port)
{
struct pcidev_info *pcidev_info;
@@ -114,7 +235,7 @@ pcidev_to_tioce(struct pci_dev *pdev, struct tioce **base,
ce_kernel = (struct tioce_kernel *)ce_common->ce_kernel_private;
if (base)
- *base = (struct tioce *)ce_common->ce_pcibus.bs_base;
+ *base = (struct tioce __iomem *)ce_common->ce_pcibus.bs_base;
if (kernel)
*kernel = ce_kernel;
@@ -136,14 +257,14 @@ pcidev_to_tioce(struct pci_dev *pdev, struct tioce **base,
* @ct_addr: the coretalk address to map
* @len: number of bytes to map
*
- * Given the addressing type, set up various paramaters that define the
+ * Given the addressing type, set up various parameters that define the
* ATE pool to use. Search for a contiguous block of entries to cover the
- * length, and if enough resources exist, fill in the ATE's and construct a
+ * length, and if enough resources exist, fill in the ATEs and construct a
* tioce_dmamap struct to track the mapping.
*/
static u64
tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
- u64 ct_addr, int len)
+ u64 ct_addr, int len, int dma_flags)
{
int i;
int j;
@@ -151,15 +272,16 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
int last;
int entries;
int nates;
- int pagesize;
+ u64 pagesize;
+ int msi_capable, msi_wanted;
u64 *ate_shadow;
- u64 *ate_reg;
+ u64 __iomem *ate_reg;
u64 addr;
- struct tioce *ce_mmr;
+ struct tioce __iomem *ce_mmr;
u64 bus_base;
struct tioce_dmamap *map;
- ce_mmr = (struct tioce *)ce_kern->ce_common->ce_pcibus.bs_base;
+ ce_mmr = (struct tioce __iomem *)ce_kern->ce_common->ce_pcibus.bs_base;
switch (type) {
case TIOCE_ATE_M32:
@@ -173,6 +295,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
ate_reg = ce_mmr->ce_ure_ate3240;
pagesize = ce_kern->ce_ate3240_pagesize;
bus_base = TIOCE_M32_MIN;
+ msi_capable = 1;
break;
case TIOCE_ATE_M40:
first = 0;
@@ -181,6 +304,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
ate_reg = ce_mmr->ce_ure_ate40;
pagesize = MB(64);
bus_base = TIOCE_M40_MIN;
+ msi_capable = 0;
break;
case TIOCE_ATE_M40S:
/*
@@ -193,11 +317,16 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
ate_reg = ce_mmr->ce_ure_ate3240;
pagesize = GB(16);
bus_base = TIOCE_M40S_MIN;
+ msi_capable = 0;
break;
default:
return 0;
}
+ msi_wanted = dma_flags & SN_DMA_MSI;
+ if (msi_wanted && !msi_capable)
+ return 0;
+
nates = ATE_NPAGES(ct_addr, len, pagesize);
if (nates > entries)
return 0;
@@ -226,9 +355,9 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
for (j = 0; j < nates; j++) {
u64 ate;
- ate = ATE_MAKE(addr, pagesize);
+ ate = ATE_MAKE(addr, pagesize, msi_wanted);
ate_shadow[i + j] = ate;
- writeq(ate, &ate_reg[i + j]);
+ tioce_mmr_storei(ce_kern, &ate_reg[i + j], ate);
addr += pagesize;
}
@@ -253,16 +382,19 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
* Map @paddr into 32-bit bus space of the CE associated with @pcidev_info.
*/
static u64
-tioce_dma_d32(struct pci_dev *pdev, u64 ct_addr)
+tioce_dma_d32(struct pci_dev *pdev, u64 ct_addr, int dma_flags)
{
int dma_ok;
int port;
- struct tioce *ce_mmr;
+ struct tioce __iomem *ce_mmr;
struct tioce_kernel *ce_kern;
u64 ct_upper;
u64 ct_lower;
dma_addr_t bus_addr;
+ if (dma_flags & SN_DMA_MSI)
+ return 0;
+
ct_upper = ct_addr & ~0x3fffffffUL;
ct_lower = ct_addr & 0x3fffffffUL;
@@ -272,7 +404,8 @@ tioce_dma_d32(struct pci_dev *pdev, u64 ct_addr)
u64 tmp;
ce_kern->ce_port[port].dirmap_shadow = ct_upper;
- writeq(ct_upper, &ce_mmr->ce_ure_dir_map[port]);
+ tioce_mmr_storei(ce_kern, &ce_mmr->ce_ure_dir_map[port],
+ ct_upper);
tmp = ce_mmr->ce_ure_dir_map[port];
dma_ok = 1;
} else
@@ -328,7 +461,7 @@ tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
int i;
int port;
struct tioce_kernel *ce_kern;
- struct tioce *ce_mmr;
+ struct tioce __iomem *ce_mmr;
unsigned long flags;
bus_addr = tioce_dma_barrier(bus_addr, 0);
@@ -344,7 +477,8 @@ tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
if (TIOCE_D32_ADDR(bus_addr)) {
if (--ce_kern->ce_port[port].dirmap_refcnt == 0) {
ce_kern->ce_port[port].dirmap_shadow = 0;
- writeq(0, &ce_mmr->ce_ure_dir_map[port]);
+ tioce_mmr_storei(ce_kern, &ce_mmr->ce_ure_dir_map[port],
+ 0);
}
} else {
struct tioce_dmamap *map;
@@ -360,12 +494,12 @@ tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
if (&map->ce_dmamap_list == &ce_kern->ce_dmamap_list) {
printk(KERN_WARNING
- "%s: %s - no map found for bus_addr 0x%lx\n",
- __FUNCTION__, pci_name(pdev), bus_addr);
+ "%s: %s - no map found for bus_addr 0x%llx\n",
+ __func__, pci_name(pdev), bus_addr);
} else if (--map->refcnt == 0) {
for (i = 0; i < map->ate_count; i++) {
map->ate_shadow[i] = 0;
- map->ate_hw[i] = 0;
+ tioce_mmr_storei(ce_kern, &map->ate_hw[i], 0);
}
list_del(&map->ce_dmamap_list);
@@ -387,7 +521,7 @@ tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
*/
static u64
tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
- int barrier)
+ int barrier, int dma_flags)
{
unsigned long flags;
u64 ct_addr;
@@ -403,15 +537,18 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
if (dma_mask < 0x7fffffffUL)
return 0;
- ct_addr = PHYS_TO_TIODMA(paddr);
+ if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS)
+ ct_addr = PHYS_TO_TIODMA(paddr);
+ else
+ ct_addr = paddr;
/*
* If the device can generate 64 bit addresses, create a D64 map.
- * Since this should never fail, bypass the rest of the checks.
*/
if (dma_mask == ~0UL) {
- mapaddr = tioce_dma_d64(ct_addr);
- goto dma_map_done;
+ mapaddr = tioce_dma_d64(ct_addr, dma_flags);
+ if (mapaddr)
+ goto dma_map_done;
}
pcidev_to_tioce(pdev, NULL, &ce_kern, &port);
@@ -445,8 +582,8 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
*/
if (!mapaddr && !barrier && dma_mask >= 0xffffffffffUL) {
/*
- * We have two options for 40-bit mappings: 16GB "super" ATE's
- * and 64MB "regular" ATE's. We'll try both if needed for a
+ * We have two options for 40-bit mappings: 16GB "super" ATEs
+ * and 64MB "regular" ATEs. We'll try both if needed for a
* given mapping but which one we try first depends on the
* size. For requests >64MB, prefer to use a super page with
* regular as the fallback. Otherwise, try in the reverse order.
@@ -454,18 +591,22 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
if (byte_count > MB(64)) {
mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40S,
- port, ct_addr, byte_count);
+ port, ct_addr, byte_count,
+ dma_flags);
if (!mapaddr)
mapaddr =
tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1,
- ct_addr, byte_count);
+ ct_addr, byte_count,
+ dma_flags);
} else {
mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1,
- ct_addr, byte_count);
+ ct_addr, byte_count,
+ dma_flags);
if (!mapaddr)
mapaddr =
tioce_alloc_map(ce_kern, TIOCE_ATE_M40S,
- port, ct_addr, byte_count);
+ port, ct_addr, byte_count,
+ dma_flags);
}
}
@@ -473,7 +614,7 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
* 32-bit direct is the next mode to try
*/
if (!mapaddr && dma_mask >= 0xffffffffUL)
- mapaddr = tioce_dma_d32(pdev, ct_addr);
+ mapaddr = tioce_dma_d32(pdev, ct_addr, dma_flags);
/*
* Last resort, try 32-bit ATE-based map.
@@ -481,12 +622,12 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
if (!mapaddr)
mapaddr =
tioce_alloc_map(ce_kern, TIOCE_ATE_M32, -1, ct_addr,
- byte_count);
+ byte_count, dma_flags);
spin_unlock_irqrestore(&ce_kern->ce_lock, flags);
dma_map_done:
- if (mapaddr & barrier)
+ if (mapaddr && barrier)
mapaddr = tioce_dma_barrier(mapaddr, 1);
return mapaddr;
@@ -502,9 +643,9 @@ dma_map_done:
* in the address.
*/
static u64
-tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count)
+tioce_dma(struct pci_dev *pdev, unsigned long paddr, size_t byte_count, int dma_flags)
{
- return tioce_do_dma_map(pdev, paddr, byte_count, 0);
+ return tioce_do_dma_map(pdev, paddr, byte_count, 0, dma_flags);
}
/**
@@ -515,22 +656,23 @@ tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count)
*
* Simply call tioce_do_dma_map() to create a map with the barrier bit set
* in the address.
- */ static u64
-tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count)
+ */
+static u64
+tioce_dma_consistent(struct pci_dev *pdev, unsigned long paddr, size_t byte_count, int dma_flags)
{
- return tioce_do_dma_map(pdev, paddr, byte_count, 1);
+ return tioce_do_dma_map(pdev, paddr, byte_count, 1, dma_flags);
}
/**
* tioce_error_intr_handler - SGI TIO CE error interrupt handler
* @irq: unused
* @arg: pointer to tioce_common struct for the given CE
- * @pt: unused
*
* Handle a CE error interrupt. Simply a wrapper around a SAL call which
* defers processing to the SGI prom.
- */ static irqreturn_t
-tioce_error_intr_handler(int irq, void *arg, struct pt_regs *pt)
+ */
+static irqreturn_t
+tioce_error_intr_handler(int irq, void *arg)
{
struct tioce_common *soft = arg;
struct ia64_sal_retval ret_stuff;
@@ -541,18 +683,62 @@ tioce_error_intr_handler(int irq, void *arg, struct pt_regs *pt)
soft->ce_pcibus.bs_persist_segment,
soft->ce_pcibus.bs_persist_busnum, 0, 0, 0, 0, 0);
+ if (ret_stuff.v0)
+ panic("tioce_error_intr_handler: Fatal TIOCE error");
+
return IRQ_HANDLED;
}
/**
+ * tioce_reserve_m32 - reserve M32 ATEs for the indicated address range
+ * @tioce_kernel: TIOCE context to reserve ATEs for
+ * @base: starting bus address to reserve
+ * @limit: last bus address to reserve
+ *
+ * If base/limit falls within the range of bus space mapped through the
+ * M32 space, reserve the resources corresponding to the range.
+ */
+static void
+tioce_reserve_m32(struct tioce_kernel *ce_kern, u64 base, u64 limit)
+{
+ int ate_index, last_ate, ps;
+ struct tioce __iomem *ce_mmr;
+
+ ce_mmr = (struct tioce __iomem *)ce_kern->ce_common->ce_pcibus.bs_base;
+ ps = ce_kern->ce_ate3240_pagesize;
+ ate_index = ATE_PAGE(base, ps);
+ last_ate = ate_index + ATE_NPAGES(base, limit-base+1, ps) - 1;
+
+ if (ate_index < 64)
+ ate_index = 64;
+
+ if (last_ate >= TIOCE_NUM_M3240_ATES)
+ last_ate = TIOCE_NUM_M3240_ATES - 1;
+
+ while (ate_index <= last_ate) {
+ u64 ate;
+
+ ate = ATE_MAKE(0xdeadbeef, ps, 0);
+ ce_kern->ce_ate3240_shadow[ate_index] = ate;
+ tioce_mmr_storei(ce_kern, &ce_mmr->ce_ure_ate3240[ate_index],
+ ate);
+ ate_index++;
+ }
+}
+
+/**
* tioce_kern_init - init kernel structures related to a given TIOCE
* @tioce_common: ptr to a cached tioce_common struct that originated in prom
- */ static struct tioce_kernel *
+ */
+static struct tioce_kernel *
tioce_kern_init(struct tioce_common *tioce_common)
{
int i;
+ int ps;
+ int dev;
u32 tmp;
- struct tioce *tioce_mmr;
+ unsigned int seg, bus;
+ struct tioce __iomem *tioce_mmr;
struct tioce_kernel *tioce_kern;
tioce_kern = kzalloc(sizeof(struct tioce_kernel), GFP_KERNEL);
@@ -569,32 +755,91 @@ tioce_kern_init(struct tioce_common *tioce_common)
* Determine the secondary bus number of the port2 logical PPB.
* This is used to decide whether a given pci device resides on
* port1 or port2. Note: We don't have enough plumbing set up
- * here to use pci_read_config_xxx() so use the raw_pci_ops vector.
+ * here to use pci_read_config_xxx() so use raw_pci_read().
*/
- raw_pci_ops->read(tioce_common->ce_pcibus.bs_persist_segment,
- tioce_common->ce_pcibus.bs_persist_busnum,
- PCI_DEVFN(2, 0), PCI_SECONDARY_BUS, 1, &tmp);
+ seg = tioce_common->ce_pcibus.bs_persist_segment;
+ bus = tioce_common->ce_pcibus.bs_persist_busnum;
+
+ raw_pci_read(seg, bus, PCI_DEVFN(2, 0), PCI_SECONDARY_BUS, 1,&tmp);
tioce_kern->ce_port1_secondary = (u8) tmp;
/*
* Set PMU pagesize to the largest size available, and zero out
- * the ate's.
+ * the ATEs.
*/
- tioce_mmr = (struct tioce *)tioce_common->ce_pcibus.bs_base;
- __sn_clrq_relaxed(&tioce_mmr->ce_ure_page_map, CE_URE_PAGESIZE_MASK);
- __sn_setq_relaxed(&tioce_mmr->ce_ure_page_map, CE_URE_256K_PAGESIZE);
- tioce_kern->ce_ate3240_pagesize = KB(256);
+ tioce_mmr = (struct tioce __iomem *)tioce_common->ce_pcibus.bs_base;
+ tioce_mmr_clri(tioce_kern, &tioce_mmr->ce_ure_page_map,
+ CE_URE_PAGESIZE_MASK);
+ tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_ure_page_map,
+ CE_URE_256K_PAGESIZE);
+ ps = tioce_kern->ce_ate3240_pagesize = KB(256);
for (i = 0; i < TIOCE_NUM_M40_ATES; i++) {
tioce_kern->ce_ate40_shadow[i] = 0;
- writeq(0, &tioce_mmr->ce_ure_ate40[i]);
+ tioce_mmr_storei(tioce_kern, &tioce_mmr->ce_ure_ate40[i], 0);
}
for (i = 0; i < TIOCE_NUM_M3240_ATES; i++) {
tioce_kern->ce_ate3240_shadow[i] = 0;
- writeq(0, &tioce_mmr->ce_ure_ate3240[i]);
+ tioce_mmr_storei(tioce_kern, &tioce_mmr->ce_ure_ate3240[i], 0);
+ }
+
+ /*
+ * Reserve ATEs corresponding to reserved address ranges. These
+ * include:
+ *
+ * Memory space covered by each PPB mem base/limit register
+ * Memory space covered by each PPB prefetch base/limit register
+ *
+ * These bus ranges are for pio (downstream) traffic only, and so
+ * cannot be used for DMA.
+ */
+
+ for (dev = 1; dev <= 2; dev++) {
+ u64 base, limit;
+
+ /* mem base/limit */
+
+ raw_pci_read(seg, bus, PCI_DEVFN(dev, 0),
+ PCI_MEMORY_BASE, 2, &tmp);
+ base = (u64)tmp << 16;
+
+ raw_pci_read(seg, bus, PCI_DEVFN(dev, 0),
+ PCI_MEMORY_LIMIT, 2, &tmp);
+ limit = (u64)tmp << 16;
+ limit |= 0xfffffUL;
+
+ if (base < limit)
+ tioce_reserve_m32(tioce_kern, base, limit);
+
+ /*
+ * prefetch mem base/limit. The tioce ppb's have 64-bit
+ * decoders, so read the upper portions w/o checking the
+ * attributes.
+ */
+
+ raw_pci_read(seg, bus, PCI_DEVFN(dev, 0),
+ PCI_PREF_MEMORY_BASE, 2, &tmp);
+ base = ((u64)tmp & PCI_PREF_RANGE_MASK) << 16;
+
+ raw_pci_read(seg, bus, PCI_DEVFN(dev, 0),
+ PCI_PREF_BASE_UPPER32, 4, &tmp);
+ base |= (u64)tmp << 32;
+
+ raw_pci_read(seg, bus, PCI_DEVFN(dev, 0),
+ PCI_PREF_MEMORY_LIMIT, 2, &tmp);
+
+ limit = ((u64)tmp & PCI_PREF_RANGE_MASK) << 16;
+ limit |= 0xfffffUL;
+
+ raw_pci_read(seg, bus, PCI_DEVFN(dev, 0),
+ PCI_PREF_LIMIT_UPPER32, 4, &tmp);
+ limit |= (u64)tmp << 32;
+
+ if ((base < limit) && TIOCE_M32_ADDR(base))
+ tioce_reserve_m32(tioce_kern, base, limit);
}
return tioce_kern;
@@ -614,7 +859,8 @@ tioce_force_interrupt(struct sn_irq_info *sn_irq_info)
{
struct pcidev_info *pcidev_info;
struct tioce_common *ce_common;
- struct tioce *ce_mmr;
+ struct tioce_kernel *ce_kern;
+ struct tioce __iomem *ce_mmr;
u64 force_int_val;
if (!sn_irq_info->irq_bridge)
@@ -628,7 +874,30 @@ tioce_force_interrupt(struct sn_irq_info *sn_irq_info)
return;
ce_common = (struct tioce_common *)pcidev_info->pdi_pcibus_info;
- ce_mmr = (struct tioce *)ce_common->ce_pcibus.bs_base;
+ ce_mmr = (struct tioce __iomem *)ce_common->ce_pcibus.bs_base;
+ ce_kern = (struct tioce_kernel *)ce_common->ce_kernel_private;
+
+ /*
+ * TIOCE Rev A workaround (PV 945826), force an interrupt by writing
+ * the TIO_INTx register directly (1/26/2006)
+ */
+ if (ce_common->ce_rev == TIOCE_REV_A) {
+ u64 int_bit_mask = (1ULL << sn_irq_info->irq_int_bit);
+ u64 status;
+
+ tioce_mmr_load(ce_kern, &ce_mmr->ce_adm_int_status, &status);
+ if (status & int_bit_mask) {
+ u64 force_irq = (1 << 8) | sn_irq_info->irq_irq;
+ u64 ctalk = sn_irq_info->irq_xtalkaddr;
+ u64 nasid, offset;
+
+ nasid = (ctalk & CTALK_NASID_MASK) >> CTALK_NASID_SHFT;
+ offset = (ctalk & CTALK_NODE_OFFSET);
+ HUB_S(TIO_IOSPACE_ADDR(nasid, offset), force_irq);
+ }
+
+ return;
+ }
/*
* irq_int_bit is originally set up by prom, and holds the interrupt
@@ -666,7 +935,7 @@ tioce_force_interrupt(struct sn_irq_info *sn_irq_info)
default:
return;
}
- writeq(force_int_val, &ce_mmr->ce_adm_force_int);
+ tioce_mmr_storei(ce_kern, &ce_mmr->ce_adm_force_int, force_int_val);
}
/**
@@ -685,7 +954,8 @@ tioce_target_interrupt(struct sn_irq_info *sn_irq_info)
{
struct pcidev_info *pcidev_info;
struct tioce_common *ce_common;
- struct tioce *ce_mmr;
+ struct tioce_kernel *ce_kern;
+ struct tioce __iomem *ce_mmr;
int bit;
u64 vector;
@@ -694,15 +964,16 @@ tioce_target_interrupt(struct sn_irq_info *sn_irq_info)
return;
ce_common = (struct tioce_common *)pcidev_info->pdi_pcibus_info;
- ce_mmr = (struct tioce *)ce_common->ce_pcibus.bs_base;
+ ce_mmr = (struct tioce __iomem *)ce_common->ce_pcibus.bs_base;
+ ce_kern = (struct tioce_kernel *)ce_common->ce_kernel_private;
bit = sn_irq_info->irq_int_bit;
- __sn_setq_relaxed(&ce_mmr->ce_adm_int_mask, (1UL << bit));
+ tioce_mmr_seti(ce_kern, &ce_mmr->ce_adm_int_mask, (1UL << bit));
vector = (u64)sn_irq_info->irq_irq << INTR_VECTOR_SHFT;
vector |= sn_irq_info->irq_xtalkaddr;
- writeq(vector, &ce_mmr->ce_adm_int_dest[bit]);
- __sn_clrq_relaxed(&ce_mmr->ce_adm_int_mask, (1UL << bit));
+ tioce_mmr_storei(ce_kern, &ce_mmr->ce_adm_int_dest[bit], vector);
+ tioce_mmr_clri(ce_kern, &ce_mmr->ce_adm_int_mask, (1UL << bit));
tioce_force_interrupt(sn_irq_info);
}
@@ -722,6 +993,8 @@ static void *
tioce_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller)
{
struct tioce_common *tioce_common;
+ struct tioce_kernel *tioce_kern;
+ struct tioce __iomem *tioce_mmr;
/*
* Allocate kernel bus soft and copy from prom.
@@ -732,24 +1005,40 @@ tioce_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
return NULL;
memcpy(tioce_common, prom_bussoft, sizeof(struct tioce_common));
- tioce_common->ce_pcibus.bs_base |= __IA64_UNCACHED_OFFSET;
+ tioce_common->ce_pcibus.bs_base = (unsigned long)
+ ioremap(REGION_OFFSET(tioce_common->ce_pcibus.bs_base),
+ sizeof(struct tioce_common));
- if (tioce_kern_init(tioce_common) == NULL) {
+ tioce_kern = tioce_kern_init(tioce_common);
+ if (tioce_kern == NULL) {
kfree(tioce_common);
return NULL;
}
+ /*
+ * Clear out any transient errors before registering the error
+ * interrupt handler.
+ */
+
+ tioce_mmr = (struct tioce __iomem *)tioce_common->ce_pcibus.bs_base;
+ tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_adm_int_status_alias, ~0ULL);
+ tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_adm_error_summary_alias,
+ ~0ULL);
+ tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_dre_comp_err_addr, 0ULL);
+
if (request_irq(SGI_PCIASIC_ERROR,
tioce_error_intr_handler,
- SA_SHIRQ, "TIOCE error", (void *)tioce_common))
+ IRQF_SHARED, "TIOCE error", (void *)tioce_common))
printk(KERN_WARNING
"%s: Unable to get irq %d. "
"Error interrupts won't be routed for "
"TIOCE bus %04x:%02x\n",
- __FUNCTION__, SGI_PCIASIC_ERROR,
+ __func__, SGI_PCIASIC_ERROR,
tioce_common->ce_pcibus.bs_persist_segment,
tioce_common->ce_pcibus.bs_persist_busnum);
+ irq_set_handler(SGI_PCIASIC_ERROR, handle_level_irq);
+ sn_set_err_irq_affinity(SGI_PCIASIC_ERROR);
return tioce_common;
}