diff options
Diffstat (limited to 'drivers/misc/sgi-xp/xpc_uv.c')
| -rw-r--r-- | drivers/misc/sgi-xp/xpc_uv.c | 157 |
1 files changed, 114 insertions, 43 deletions
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index c76677afda1..95c894482fd 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c @@ -18,7 +18,10 @@ #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/device.h> +#include <linux/cpu.h> +#include <linux/module.h> #include <linux/err.h> +#include <linux/slab.h> #include <asm/uv/uv_hub.h> #if defined CONFIG_X86_64 #include <asm/uv/bios.h> @@ -58,6 +61,8 @@ static struct xpc_heartbeat_uv *xpc_heartbeat_uv; XPC_NOTIFY_MSG_SIZE_UV) #define XPC_NOTIFY_IRQ_NAME "xpc_notify" +static int xpc_mq_node = -1; + static struct xpc_gru_mq_uv *xpc_activate_mq_uv; static struct xpc_gru_mq_uv *xpc_notify_mq_uv; @@ -106,12 +111,10 @@ xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name) int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); #if defined CONFIG_X86_64 - mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset); - if (mq->irq < 0) { - dev_err(xpc_part, "uv_setup_irq() returned error=%d\n", - -mq->irq); + mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset, + UV_AFFINITY_CPU); + if (mq->irq < 0) return mq->irq; - } mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset); @@ -136,7 +139,7 @@ static void xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq) { #if defined CONFIG_X86_64 - uv_teardown_irq(mq->irq, mq->mmr_blade, mq->mmr_offset); + uv_teardown_irq(mq->irq); #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV int mmr_pnode; @@ -156,22 +159,24 @@ xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq) { int ret; -#if defined CONFIG_X86_64 - ret = uv_bios_mq_watchlist_alloc(mq->mmr_blade, uv_gpa(mq->address), - mq->order, &mq->mmr_offset); - if (ret < 0) { - dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, " - "ret=%d\n", ret); - return ret; - } -#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV - ret = sn_mq_watchlist_alloc(mq->mmr_blade, (void *)uv_gpa(mq->address), +#if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV + int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); + + ret = sn_mq_watchlist_alloc(mmr_pnode, (void *)uv_gpa(mq->address), mq->order, &mq->mmr_offset); if (ret < 0) { dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n", ret); return -EBUSY; } +#elif defined CONFIG_X86_64 + ret = uv_bios_mq_watchlist_alloc(uv_gpa(mq->address), + mq->order, &mq->mmr_offset); + if (ret < 0) { + dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, " + "ret=%d\n", ret); + return ret; + } #else #error not a supported configuration #endif @@ -184,12 +189,13 @@ static void xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq) { int ret; + int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); #if defined CONFIG_X86_64 - ret = uv_bios_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num); + ret = uv_bios_mq_watchlist_free(mmr_pnode, mq->watchlist_num); BUG_ON(ret != BIOS_STATUS_SUCCESS); #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV - ret = sn_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num); + ret = sn_mq_watchlist_free(mmr_pnode, mq->watchlist_num); BUG_ON(ret != SALRET_OK); #else #error not a supported configuration @@ -203,6 +209,7 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name, enum xp_retval xp_ret; int ret; int nid; + int nasid; int pg_order; struct page *page; struct xpc_gru_mq_uv *mq; @@ -232,8 +239,9 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name, mq->mmr_blade = uv_cpu_to_blade_id(cpu); nid = cpu_to_node(cpu); - page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, - pg_order); + page = alloc_pages_exact_node(nid, + GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, + pg_order); if (page == NULL) { dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " "bytes of memory on nid=%d for GRU mq\n", mq_size, nid); @@ -258,9 +266,11 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name, goto out_5; } + nasid = UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpu)); + mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value; ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size, - nid, mmr_value->vector, mmr_value->dest); + nasid, mmr_value->vector, mmr_value->dest); if (ret != 0) { dev_err(xpc_part, "gru_create_message_queue() returned " "error=%d\n", ret); @@ -409,6 +419,7 @@ xpc_process_activate_IRQ_rcvd_uv(void) static void xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, struct xpc_activate_mq_msghdr_uv *msg_hdr, + int part_setup, int *wakeup_hb_checker) { unsigned long irq_flags; @@ -443,9 +454,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, if (msg->activate_gru_mq_desc_gpa != part_uv->activate_gru_mq_desc_gpa) { - spin_lock_irqsave(&part_uv->flags_lock, irq_flags); + spin_lock(&part_uv->flags_lock); part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV; - spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); + spin_unlock(&part_uv->flags_lock); part_uv->activate_gru_mq_desc_gpa = msg->activate_gru_mq_desc_gpa; } @@ -473,6 +484,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: { struct xpc_activate_mq_msg_chctl_closerequest_uv *msg; + if (!part_setup) + break; + msg = container_of(msg_hdr, struct xpc_activate_mq_msg_chctl_closerequest_uv, hdr); @@ -489,6 +503,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: { struct xpc_activate_mq_msg_chctl_closereply_uv *msg; + if (!part_setup) + break; + msg = container_of(msg_hdr, struct xpc_activate_mq_msg_chctl_closereply_uv, hdr); @@ -503,6 +520,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: { struct xpc_activate_mq_msg_chctl_openrequest_uv *msg; + if (!part_setup) + break; + msg = container_of(msg_hdr, struct xpc_activate_mq_msg_chctl_openrequest_uv, hdr); @@ -520,6 +540,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: { struct xpc_activate_mq_msg_chctl_openreply_uv *msg; + if (!part_setup) + break; + msg = container_of(msg_hdr, struct xpc_activate_mq_msg_chctl_openreply_uv, hdr); args = &part->remote_openclose_args[msg->ch_number]; @@ -537,6 +560,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV: { struct xpc_activate_mq_msg_chctl_opencomplete_uv *msg; + if (!part_setup) + break; + msg = container_of(msg_hdr, struct xpc_activate_mq_msg_chctl_opencomplete_uv, hdr); spin_lock_irqsave(&part->chctl_lock, irq_flags); @@ -613,6 +639,7 @@ xpc_handle_activate_IRQ_uv(int irq, void *dev_id) part_referenced = xpc_part_ref(part); xpc_handle_activate_mq_msg_uv(part, msg_hdr, + part_referenced, &wakeup_hb_checker); if (part_referenced) xpc_part_deref(part); @@ -945,11 +972,13 @@ xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv *head) head->first = first->next; if (head->first == NULL) head->last = NULL; + + head->n_entries--; + BUG_ON(head->n_entries < 0); + + first->next = NULL; } - head->n_entries--; - BUG_ON(head->n_entries < 0); spin_unlock_irqrestore(&head->lock, irq_flags); - first->next = NULL; return first; } @@ -1018,7 +1047,8 @@ xpc_make_first_contact_uv(struct xpc_partition *part) xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV); - while (part->sn.uv.remote_act_state != XPC_P_AS_ACTIVATING) { + while (!((part->sn.uv.remote_act_state == XPC_P_AS_ACTIVATING) || + (part->sn.uv.remote_act_state == XPC_P_AS_ACTIVE))) { dev_dbg(xpc_part, "waiting to make first contact with " "partition %d\n", XPC_PARTID(part)); @@ -1421,7 +1451,6 @@ xpc_handle_notify_mq_msg_uv(struct xpc_partition *part, msg_slot = ch_uv->recv_msg_slots + (msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size; - BUG_ON(msg->hdr.msg_slot_number != msg_slot->hdr.msg_slot_number); BUG_ON(msg_slot->hdr.size != 0); memcpy(msg_slot, msg, msg->hdr.size); @@ -1645,8 +1674,6 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload) sizeof(struct xpc_notify_mq_msghdr_uv)); if (ret != xpSuccess) XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret); - - msg->hdr.msg_slot_number += ch->remote_nentries; } static struct xpc_arch_operations xpc_arch_ops_uv = { @@ -1706,9 +1733,50 @@ static struct xpc_arch_operations xpc_arch_ops_uv = { .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv, }; +static int +xpc_init_mq_node(int nid) +{ + int cpu; + + get_online_cpus(); + + for_each_cpu(cpu, cpumask_of_node(nid)) { + xpc_activate_mq_uv = + xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, nid, + XPC_ACTIVATE_IRQ_NAME, + xpc_handle_activate_IRQ_uv); + if (!IS_ERR(xpc_activate_mq_uv)) + break; + } + if (IS_ERR(xpc_activate_mq_uv)) { + put_online_cpus(); + return PTR_ERR(xpc_activate_mq_uv); + } + + for_each_cpu(cpu, cpumask_of_node(nid)) { + xpc_notify_mq_uv = + xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, nid, + XPC_NOTIFY_IRQ_NAME, + xpc_handle_notify_IRQ_uv); + if (!IS_ERR(xpc_notify_mq_uv)) + break; + } + if (IS_ERR(xpc_notify_mq_uv)) { + xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); + put_online_cpus(); + return PTR_ERR(xpc_notify_mq_uv); + } + + put_online_cpus(); + return 0; +} + int xpc_init_uv(void) { + int nid; + int ret = 0; + xpc_arch_ops = xpc_arch_ops_uv; if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) { @@ -1717,21 +1785,21 @@ xpc_init_uv(void) return -E2BIG; } - xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0, - XPC_ACTIVATE_IRQ_NAME, - xpc_handle_activate_IRQ_uv); - if (IS_ERR(xpc_activate_mq_uv)) - return PTR_ERR(xpc_activate_mq_uv); + if (xpc_mq_node < 0) + for_each_online_node(nid) { + ret = xpc_init_mq_node(nid); - xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0, - XPC_NOTIFY_IRQ_NAME, - xpc_handle_notify_IRQ_uv); - if (IS_ERR(xpc_notify_mq_uv)) { - xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); - return PTR_ERR(xpc_notify_mq_uv); - } + if (!ret) + break; + } + else + ret = xpc_init_mq_node(xpc_mq_node); - return 0; + if (ret < 0) + dev_err(xpc_part, "xpc_init_mq_node() returned error=%d\n", + -ret); + + return ret; } void @@ -1740,3 +1808,6 @@ xpc_exit_uv(void) xpc_destroy_gru_mq_uv(xpc_notify_mq_uv); xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); } + +module_param(xpc_mq_node, int, 0); +MODULE_PARM_DESC(xpc_mq_node, "Node number on which to allocate message queues."); |
