aboutsummaryrefslogtreecommitdiff
path: root/drivers/misc/sgi-xp
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/misc/sgi-xp')
-rw-r--r--drivers/misc/sgi-xp/xp.h2
-rw-r--r--drivers/misc/sgi-xp/xp_main.c3
-rw-r--r--drivers/misc/sgi-xp/xp_sn2.c10
-rw-r--r--drivers/misc/sgi-xp/xp_uv.c33
-rw-r--r--drivers/misc/sgi-xp/xpc_channel.c5
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c55
-rw-r--r--drivers/misc/sgi-xp/xpc_partition.c39
-rw-r--r--drivers/misc/sgi-xp/xpc_sn2.c1
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c157
-rw-r--r--drivers/misc/sgi-xp/xpnet.c11
10 files changed, 236 insertions, 80 deletions
diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
index 2275126cb33..c862cd4583c 100644
--- a/drivers/misc/sgi-xp/xp.h
+++ b/drivers/misc/sgi-xp/xp.h
@@ -25,7 +25,6 @@
#endif
#if defined CONFIG_IA64
-#include <asm/system.h>
#include <asm/sn/arch.h> /* defines is_shub1() and is_shub2() */
#define is_shub() ia64_platform_is("sn2")
#endif
@@ -339,6 +338,7 @@ extern short xp_partition_id;
extern u8 xp_region_size;
extern unsigned long (*xp_pa) (void *);
+extern unsigned long (*xp_socket_pa) (unsigned long);
extern enum xp_retval (*xp_remote_memcpy) (unsigned long, const unsigned long,
size_t);
extern int (*xp_cpu_to_nasid) (int);
diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
index 7896849b16d..01be66d02ca 100644
--- a/drivers/misc/sgi-xp/xp_main.c
+++ b/drivers/misc/sgi-xp/xp_main.c
@@ -44,6 +44,9 @@ EXPORT_SYMBOL_GPL(xp_region_size);
unsigned long (*xp_pa) (void *addr);
EXPORT_SYMBOL_GPL(xp_pa);
+unsigned long (*xp_socket_pa) (unsigned long gpa);
+EXPORT_SYMBOL_GPL(xp_socket_pa);
+
enum xp_retval (*xp_remote_memcpy) (unsigned long dst_gpa,
const unsigned long src_gpa, size_t len);
EXPORT_SYMBOL_GPL(xp_remote_memcpy);
diff --git a/drivers/misc/sgi-xp/xp_sn2.c b/drivers/misc/sgi-xp/xp_sn2.c
index fb3ec9d735a..d8e463f8724 100644
--- a/drivers/misc/sgi-xp/xp_sn2.c
+++ b/drivers/misc/sgi-xp/xp_sn2.c
@@ -84,6 +84,15 @@ xp_pa_sn2(void *addr)
}
/*
+ * Convert a global physical to a socket physical address.
+ */
+static unsigned long
+xp_socket_pa_sn2(unsigned long gpa)
+{
+ return gpa;
+}
+
+/*
* Wrapper for bte_copy().
*
* dst_pa - physical address of the destination of the transfer.
@@ -162,6 +171,7 @@ xp_init_sn2(void)
xp_region_size = sn_region_size;
xp_pa = xp_pa_sn2;
+ xp_socket_pa = xp_socket_pa_sn2;
xp_remote_memcpy = xp_remote_memcpy_sn2;
xp_cpu_to_nasid = xp_cpu_to_nasid_sn2;
xp_expand_memprotect = xp_expand_memprotect_sn2;
diff --git a/drivers/misc/sgi-xp/xp_uv.c b/drivers/misc/sgi-xp/xp_uv.c
index d238576b26f..a0d093274dc 100644
--- a/drivers/misc/sgi-xp/xp_uv.c
+++ b/drivers/misc/sgi-xp/xp_uv.c
@@ -32,12 +32,44 @@ xp_pa_uv(void *addr)
return uv_gpa(addr);
}
+/*
+ * Convert a global physical to socket physical address.
+ */
+static unsigned long
+xp_socket_pa_uv(unsigned long gpa)
+{
+ return uv_gpa_to_soc_phys_ram(gpa);
+}
+
+static enum xp_retval
+xp_remote_mmr_read(unsigned long dst_gpa, const unsigned long src_gpa,
+ size_t len)
+{
+ int ret;
+ unsigned long *dst_va = __va(uv_gpa_to_soc_phys_ram(dst_gpa));
+
+ BUG_ON(!uv_gpa_in_mmr_space(src_gpa));
+ BUG_ON(len != 8);
+
+ ret = gru_read_gpa(dst_va, src_gpa);
+ if (ret == 0)
+ return xpSuccess;
+
+ dev_err(xp, "gru_read_gpa() failed, dst_gpa=0x%016lx src_gpa=0x%016lx "
+ "len=%ld\n", dst_gpa, src_gpa, len);
+ return xpGruCopyError;
+}
+
+
static enum xp_retval
xp_remote_memcpy_uv(unsigned long dst_gpa, const unsigned long src_gpa,
size_t len)
{
int ret;
+ if (uv_gpa_in_mmr_space(src_gpa))
+ return xp_remote_mmr_read(dst_gpa, src_gpa, len);
+
ret = gru_copy_gpa(dst_gpa, src_gpa, len);
if (ret == 0)
return xpSuccess;
@@ -123,6 +155,7 @@ xp_init_uv(void)
xp_region_size = sn_region_size;
xp_pa = xp_pa_uv;
+ xp_socket_pa = xp_socket_pa_uv;
xp_remote_memcpy = xp_remote_memcpy_uv;
xp_cpu_to_nasid = xp_cpu_to_nasid_uv;
xp_expand_memprotect = xp_expand_memprotect_uv;
diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
index 652593fc486..128d5615c80 100644
--- a/drivers/misc/sgi-xp/xpc_channel.c
+++ b/drivers/misc/sgi-xp/xpc_channel.c
@@ -828,6 +828,7 @@ enum xp_retval
xpc_allocate_msg_wait(struct xpc_channel *ch)
{
enum xp_retval ret;
+ DEFINE_WAIT(wait);
if (ch->flags & XPC_C_DISCONNECTING) {
DBUG_ON(ch->reason == xpInterrupted);
@@ -835,7 +836,9 @@ xpc_allocate_msg_wait(struct xpc_channel *ch)
}
atomic_inc(&ch->n_on_msg_allocate_wq);
- ret = interruptible_sleep_on_timeout(&ch->msg_allocate_wq, 1);
+ prepare_to_wait(&ch->msg_allocate_wq, &wait, TASK_INTERRUPTIBLE);
+ ret = schedule_timeout(1);
+ finish_wait(&ch->msg_allocate_wq, &wait);
atomic_dec(&ch->n_on_msg_allocate_wq);
if (ch->flags & XPC_C_DISCONNECTING) {
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index fd3688a3e23..82dc5748f87 100644
--- a/drivers/misc/sgi-xp/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -44,6 +44,7 @@
*/
#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/sysctl.h>
#include <linux/device.h>
#include <linux/delay.h>
@@ -52,6 +53,10 @@
#include <linux/kthread.h>
#include "xpc.h"
+#ifdef CONFIG_X86_64
+#include <asm/traps.h>
+#endif
+
/* define two XPC debug device structures to be used with dev_dbg() et al */
struct device_driver xpc_dbg_name = {
@@ -87,50 +92,42 @@ int xpc_disengage_timelimit = XPC_DISENGAGE_DEFAULT_TIMELIMIT;
static int xpc_disengage_min_timelimit; /* = 0 */
static int xpc_disengage_max_timelimit = 120;
-static ctl_table xpc_sys_xpc_hb_dir[] = {
+static struct ctl_table xpc_sys_xpc_hb_dir[] = {
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "hb_interval",
.data = &xpc_hb_interval,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &xpc_hb_min_interval,
.extra2 = &xpc_hb_max_interval},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "hb_check_interval",
.data = &xpc_hb_check_interval,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &xpc_hb_check_min_interval,
.extra2 = &xpc_hb_check_max_interval},
{}
};
-static ctl_table xpc_sys_xpc_dir[] = {
+static struct ctl_table xpc_sys_xpc_dir[] = {
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "hb",
.mode = 0555,
.child = xpc_sys_xpc_hb_dir},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "disengage_timelimit",
.data = &xpc_disengage_timelimit,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &xpc_disengage_min_timelimit,
.extra2 = &xpc_disengage_max_timelimit},
{}
};
-static ctl_table xpc_sys_dir[] = {
+static struct ctl_table xpc_sys_dir[] = {
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "xpc",
.mode = 0555,
.child = xpc_sys_xpc_dir},
@@ -1086,6 +1083,9 @@ xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
return NOTIFY_DONE;
}
+/* Used to only allow one cpu to complete disconnect */
+static unsigned int xpc_die_disconnecting;
+
/*
* Notify other partitions to deactivate from us by first disengaging from all
* references to our memory.
@@ -1099,6 +1099,9 @@ xpc_die_deactivate(void)
long keep_waiting;
long wait_to_print;
+ if (cmpxchg(&xpc_die_disconnecting, 0, 1))
+ return;
+
/* keep xpc_hb_checker thread from doing anything (just in case) */
xpc_exiting = 1;
@@ -1166,7 +1169,7 @@ xpc_die_deactivate(void)
* about the lack of a heartbeat.
*/
static int
-xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
+xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
{
#ifdef CONFIG_IA64 /* !!! temporary kludge */
switch (event) {
@@ -1198,7 +1201,27 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
break;
}
#else
- xpc_die_deactivate();
+ struct die_args *die_args = _die_args;
+
+ switch (event) {
+ case DIE_TRAP:
+ if (die_args->trapnr == X86_TRAP_DF)
+ xpc_die_deactivate();
+
+ if (((die_args->trapnr == X86_TRAP_MF) ||
+ (die_args->trapnr == X86_TRAP_XF)) &&
+ !user_mode_vm(die_args->regs))
+ xpc_die_deactivate();
+
+ break;
+ case DIE_INT3:
+ case DIE_DEBUG:
+ break;
+ case DIE_OOPS:
+ case DIE_GPF:
+ default:
+ xpc_die_deactivate();
+ }
#endif
return NOTIFY_DONE;
diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c
index 65877bc5eda..6956f7e7d43 100644
--- a/drivers/misc/sgi-xp/xpc_partition.c
+++ b/drivers/misc/sgi-xp/xpc_partition.c
@@ -17,7 +17,9 @@
#include <linux/device.h>
#include <linux/hardirq.h>
+#include <linux/slab.h>
#include "xpc.h"
+#include <asm/uv/uv_hub.h>
/* XPC is exiting flag */
int xpc_exiting;
@@ -92,8 +94,12 @@ xpc_get_rsvd_page_pa(int nasid)
break;
/* !!! L1_CACHE_ALIGN() is only a sn2-bte_copy requirement */
- if (L1_CACHE_ALIGN(len) > buf_len) {
- kfree(buf_base);
+ if (is_shub())
+ len = L1_CACHE_ALIGN(len);
+
+ if (len > buf_len) {
+ if (buf_base != NULL)
+ kfree(buf_base);
buf_len = L1_CACHE_ALIGN(len);
buf = xpc_kmalloc_cacheline_aligned(buf_len, GFP_KERNEL,
&buf_base);
@@ -105,7 +111,7 @@ xpc_get_rsvd_page_pa(int nasid)
}
}
- ret = xp_remote_memcpy(xp_pa(buf), rp_pa, buf_len);
+ ret = xp_remote_memcpy(xp_pa(buf), rp_pa, len);
if (ret != xpSuccess) {
dev_dbg(xpc_part, "xp_remote_memcpy failed %d\n", ret);
break;
@@ -143,7 +149,7 @@ xpc_setup_rsvd_page(void)
dev_err(xpc_part, "SAL failed to locate the reserved page\n");
return -ESRCH;
}
- rp = (struct xpc_rsvd_page *)__va(rp_pa);
+ rp = (struct xpc_rsvd_page *)__va(xp_socket_pa(rp_pa));
if (rp->SAL_version < 3) {
/* SAL_versions < 3 had a SAL_partid defined as a u8 */
@@ -433,18 +439,23 @@ xpc_discovery(void)
* nodes that can comprise an access protection grouping. The access
* protection is in regards to memory, IOI and IPI.
*/
- max_regions = 64;
region_size = xp_region_size;
- switch (region_size) {
- case 128:
- max_regions *= 2;
- case 64:
- max_regions *= 2;
- case 32:
- max_regions *= 2;
- region_size = 16;
- DBUG_ON(!is_shub2());
+ if (is_uv())
+ max_regions = 256;
+ else {
+ max_regions = 64;
+
+ switch (region_size) {
+ case 128:
+ max_regions *= 2;
+ case 64:
+ max_regions *= 2;
+ case 32:
+ max_regions *= 2;
+ region_size = 16;
+ DBUG_ON(!is_shub2());
+ }
}
for (region = 0; region < max_regions; region++) {
diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
index 8b70e03f939..7d71c04fc93 100644
--- a/drivers/misc/sgi-xp/xpc_sn2.c
+++ b/drivers/misc/sgi-xp/xpc_sn2.c
@@ -14,6 +14,7 @@
*/
#include <linux/delay.h>
+#include <linux/slab.h>
#include <asm/uncached.h>
#include <asm/sn/mspec.h>
#include <asm/sn/sn_sal.h>
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index c76677afda1..95c894482fd 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -18,7 +18,10 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/cpu.h>
+#include <linux/module.h>
#include <linux/err.h>
+#include <linux/slab.h>
#include <asm/uv/uv_hub.h>
#if defined CONFIG_X86_64
#include <asm/uv/bios.h>
@@ -58,6 +61,8 @@ static struct xpc_heartbeat_uv *xpc_heartbeat_uv;
XPC_NOTIFY_MSG_SIZE_UV)
#define XPC_NOTIFY_IRQ_NAME "xpc_notify"
+static int xpc_mq_node = -1;
+
static struct xpc_gru_mq_uv *xpc_activate_mq_uv;
static struct xpc_gru_mq_uv *xpc_notify_mq_uv;
@@ -106,12 +111,10 @@ xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
#if defined CONFIG_X86_64
- mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset);
- if (mq->irq < 0) {
- dev_err(xpc_part, "uv_setup_irq() returned error=%d\n",
- -mq->irq);
+ mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset,
+ UV_AFFINITY_CPU);
+ if (mq->irq < 0)
return mq->irq;
- }
mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset);
@@ -136,7 +139,7 @@ static void
xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq)
{
#if defined CONFIG_X86_64
- uv_teardown_irq(mq->irq, mq->mmr_blade, mq->mmr_offset);
+ uv_teardown_irq(mq->irq);
#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
int mmr_pnode;
@@ -156,22 +159,24 @@ xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq)
{
int ret;
-#if defined CONFIG_X86_64
- ret = uv_bios_mq_watchlist_alloc(mq->mmr_blade, uv_gpa(mq->address),
- mq->order, &mq->mmr_offset);
- if (ret < 0) {
- dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, "
- "ret=%d\n", ret);
- return ret;
- }
-#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
- ret = sn_mq_watchlist_alloc(mq->mmr_blade, (void *)uv_gpa(mq->address),
+#if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+ int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
+
+ ret = sn_mq_watchlist_alloc(mmr_pnode, (void *)uv_gpa(mq->address),
mq->order, &mq->mmr_offset);
if (ret < 0) {
dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n",
ret);
return -EBUSY;
}
+#elif defined CONFIG_X86_64
+ ret = uv_bios_mq_watchlist_alloc(uv_gpa(mq->address),
+ mq->order, &mq->mmr_offset);
+ if (ret < 0) {
+ dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, "
+ "ret=%d\n", ret);
+ return ret;
+ }
#else
#error not a supported configuration
#endif
@@ -184,12 +189,13 @@ static void
xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq)
{
int ret;
+ int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
#if defined CONFIG_X86_64
- ret = uv_bios_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num);
+ ret = uv_bios_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
BUG_ON(ret != BIOS_STATUS_SUCCESS);
#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
- ret = sn_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num);
+ ret = sn_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
BUG_ON(ret != SALRET_OK);
#else
#error not a supported configuration
@@ -203,6 +209,7 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
enum xp_retval xp_ret;
int ret;
int nid;
+ int nasid;
int pg_order;
struct page *page;
struct xpc_gru_mq_uv *mq;
@@ -232,8 +239,9 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
mq->mmr_blade = uv_cpu_to_blade_id(cpu);
nid = cpu_to_node(cpu);
- page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
- pg_order);
+ page = alloc_pages_exact_node(nid,
+ GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
+ pg_order);
if (page == NULL) {
dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
"bytes of memory on nid=%d for GRU mq\n", mq_size, nid);
@@ -258,9 +266,11 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
goto out_5;
}
+ nasid = UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpu));
+
mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value;
ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size,
- nid, mmr_value->vector, mmr_value->dest);
+ nasid, mmr_value->vector, mmr_value->dest);
if (ret != 0) {
dev_err(xpc_part, "gru_create_message_queue() returned "
"error=%d\n", ret);
@@ -409,6 +419,7 @@ xpc_process_activate_IRQ_rcvd_uv(void)
static void
xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
struct xpc_activate_mq_msghdr_uv *msg_hdr,
+ int part_setup,
int *wakeup_hb_checker)
{
unsigned long irq_flags;
@@ -443,9 +454,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
if (msg->activate_gru_mq_desc_gpa !=
part_uv->activate_gru_mq_desc_gpa) {
- spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
+ spin_lock(&part_uv->flags_lock);
part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
- spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
+ spin_unlock(&part_uv->flags_lock);
part_uv->activate_gru_mq_desc_gpa =
msg->activate_gru_mq_desc_gpa;
}
@@ -473,6 +484,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: {
struct xpc_activate_mq_msg_chctl_closerequest_uv *msg;
+ if (!part_setup)
+ break;
+
msg = container_of(msg_hdr, struct
xpc_activate_mq_msg_chctl_closerequest_uv,
hdr);
@@ -489,6 +503,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: {
struct xpc_activate_mq_msg_chctl_closereply_uv *msg;
+ if (!part_setup)
+ break;
+
msg = container_of(msg_hdr, struct
xpc_activate_mq_msg_chctl_closereply_uv,
hdr);
@@ -503,6 +520,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: {
struct xpc_activate_mq_msg_chctl_openrequest_uv *msg;
+ if (!part_setup)
+ break;
+
msg = container_of(msg_hdr, struct
xpc_activate_mq_msg_chctl_openrequest_uv,
hdr);
@@ -520,6 +540,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: {
struct xpc_activate_mq_msg_chctl_openreply_uv *msg;
+ if (!part_setup)
+ break;
+
msg = container_of(msg_hdr, struct
xpc_activate_mq_msg_chctl_openreply_uv, hdr);
args = &part->remote_openclose_args[msg->ch_number];
@@ -537,6 +560,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV: {
struct xpc_activate_mq_msg_chctl_opencomplete_uv *msg;
+ if (!part_setup)
+ break;
+
msg = container_of(msg_hdr, struct
xpc_activate_mq_msg_chctl_opencomplete_uv, hdr);
spin_lock_irqsave(&part->chctl_lock, irq_flags);
@@ -613,6 +639,7 @@ xpc_handle_activate_IRQ_uv(int irq, void *dev_id)
part_referenced = xpc_part_ref(part);
xpc_handle_activate_mq_msg_uv(part, msg_hdr,
+ part_referenced,
&wakeup_hb_checker);
if (part_referenced)
xpc_part_deref(part);
@@ -945,11 +972,13 @@ xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv *head)
head->first = first->next;
if (head->first == NULL)
head->last = NULL;
+
+ head->n_entries--;
+ BUG_ON(head->n_entries < 0);
+
+ first->next = NULL;
}
- head->n_entries--;
- BUG_ON(head->n_entries < 0);
spin_unlock_irqrestore(&head->lock, irq_flags);
- first->next = NULL;
return first;
}
@@ -1018,7 +1047,8 @@ xpc_make_first_contact_uv(struct xpc_partition *part)
xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV);
- while (part->sn.uv.remote_act_state != XPC_P_AS_ACTIVATING) {
+ while (!((part->sn.uv.remote_act_state == XPC_P_AS_ACTIVATING) ||
+ (part->sn.uv.remote_act_state == XPC_P_AS_ACTIVE))) {
dev_dbg(xpc_part, "waiting to make first contact with "
"partition %d\n", XPC_PARTID(part));
@@ -1421,7 +1451,6 @@ xpc_handle_notify_mq_msg_uv(struct xpc_partition *part,
msg_slot = ch_uv->recv_msg_slots +
(msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size;
- BUG_ON(msg->hdr.msg_slot_number != msg_slot->hdr.msg_slot_number);
BUG_ON(msg_slot->hdr.size != 0);
memcpy(msg_slot, msg, msg->hdr.size);
@@ -1645,8 +1674,6 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
sizeof(struct xpc_notify_mq_msghdr_uv));
if (ret != xpSuccess)
XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
-
- msg->hdr.msg_slot_number += ch->remote_nentries;
}
static struct xpc_arch_operations xpc_arch_ops_uv = {
@@ -1706,9 +1733,50 @@ static struct xpc_arch_operations xpc_arch_ops_uv = {
.notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv,
};
+static int
+xpc_init_mq_node(int nid)
+{
+ int cpu;
+
+ get_online_cpus();
+
+ for_each_cpu(cpu, cpumask_of_node(nid)) {
+ xpc_activate_mq_uv =
+ xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, nid,
+ XPC_ACTIVATE_IRQ_NAME,
+ xpc_handle_activate_IRQ_uv);
+ if (!IS_ERR(xpc_activate_mq_uv))
+ break;
+ }
+ if (IS_ERR(xpc_activate_mq_uv)) {
+ put_online_cpus();
+ return PTR_ERR(xpc_activate_mq_uv);
+ }
+
+ for_each_cpu(cpu, cpumask_of_node(nid)) {
+ xpc_notify_mq_uv =
+ xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, nid,
+ XPC_NOTIFY_IRQ_NAME,
+ xpc_handle_notify_IRQ_uv);
+ if (!IS_ERR(xpc_notify_mq_uv))
+ break;
+ }
+ if (IS_ERR(xpc_notify_mq_uv)) {
+ xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
+ put_online_cpus();
+ return PTR_ERR(xpc_notify_mq_uv);
+ }
+
+ put_online_cpus();
+ return 0;
+}
+
int
xpc_init_uv(void)
{
+ int nid;
+ int ret = 0;
+
xpc_arch_ops = xpc_arch_ops_uv;
if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
@@ -1717,21 +1785,21 @@ xpc_init_uv(void)
return -E2BIG;
}
- xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0,
- XPC_ACTIVATE_IRQ_NAME,
- xpc_handle_activate_IRQ_uv);
- if (IS_ERR(xpc_activate_mq_uv))
- return PTR_ERR(xpc_activate_mq_uv);
+ if (xpc_mq_node < 0)
+ for_each_online_node(nid) {
+ ret = xpc_init_mq_node(nid);
- xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0,
- XPC_NOTIFY_IRQ_NAME,
- xpc_handle_notify_IRQ_uv);
- if (IS_ERR(xpc_notify_mq_uv)) {
- xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
- return PTR_ERR(xpc_notify_mq_uv);
- }
+ if (!ret)
+ break;
+ }
+ else
+ ret = xpc_init_mq_node(xpc_mq_node);
- return 0;
+ if (ret < 0)
+ dev_err(xpc_part, "xpc_init_mq_node() returned error=%d\n",
+ -ret);
+
+ return ret;
}
void
@@ -1740,3 +1808,6 @@ xpc_exit_uv(void)
xpc_destroy_gru_mq_uv(xpc_notify_mq_uv);
xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
}
+
+module_param(xpc_mq_node, int, 0);
+MODULE_PARM_DESC(xpc_mq_node, "Node number on which to allocate message queues.");
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index 16f0abda142..3fac67a5204 100644
--- a/drivers/misc/sgi-xp/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -20,6 +20,7 @@
*
*/
+#include <linux/slab.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -475,7 +476,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (skb->data[0] == 0xff) {
/* we are being asked to broadcast to all partitions */
- for_each_bit(dest_partid, xpnet_broadcast_partitions,
+ for_each_set_bit(dest_partid, xpnet_broadcast_partitions,
xp_max_npartitions) {
xpnet_send(skb, queued_msg, start_addr, end_addr,
@@ -494,14 +495,14 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+
if (atomic_dec_return(&queued_msg->use_count) == 0) {
dev_kfree_skb(skb);
kfree(queued_msg);
}
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
-
return NETDEV_TX_OK;
}
@@ -575,7 +576,7 @@ xpnet_init(void)
* report an error if the data is not retrievable and the
* packet will be dropped.
*/
- xpnet_device->features = NETIF_F_NO_CSUM;
+ xpnet_device->features = NETIF_F_HW_CSUM;
result = register_netdev(xpnet_device);
if (result != 0) {