diff options
author | Dean Nelson <dcn@sgi.com> | 2008-04-22 14:46:56 -0500 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2008-04-22 15:08:17 -0700 |
commit | 45d9ca492e4bd1522d1b5bd125c2908f1cee3d4a (patch) | |
tree | dfbe831a5f71159855c3a252856664411ca53f8a /arch | |
parent | 9010eff0eadfe4eb60c3f0c71573f0fc505c31e3 (diff) |
[IA64] move XP and XPC to drivers/misc/sgi-xp
Move XPC and XPNET from arch/ia64/sn/kernel to drivers/misc/sgi-xp.
Signed-off-by: Dean Nelson <dcn@sgi.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/ia64/Kconfig | 11 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/Makefile | 7 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/xp_main.c | 290 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/xp_nofault.S | 36 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/xpc_channel.c | 2379 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/xpc_main.c | 1431 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/xpc_partition.c | 1239 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/xpnet.c | 718 |
8 files changed, 1 insertions, 6110 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index ed21737a00c..cd13e138bd0 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -266,17 +266,6 @@ config IOSAPIC depends on !IA64_HP_SIM default y -config IA64_SGI_SN_XP - tristate "Support communication between SGI SSIs" - depends on IA64_GENERIC || IA64_SGI_SN2 - select IA64_UNCACHED_ALLOCATOR - help - An SGI machine can be divided into multiple Single System - Images which act independently of each other and have - hardware based memory protection from the others. Enabling - this feature will allow for direct communication between SSIs - based on a network adapter and DMA messaging. - config FORCE_MAX_ZONEORDER int "MAX_ORDER (11 - 17)" if !HUGETLB_PAGE range 11 17 if !HUGETLB_PAGE diff --git a/arch/ia64/sn/kernel/Makefile b/arch/ia64/sn/kernel/Makefile index 688a3c27e0f..0591038735a 100644 --- a/arch/ia64/sn/kernel/Makefile +++ b/arch/ia64/sn/kernel/Makefile @@ -4,7 +4,7 @@ # License. See the file "COPYING" in the main directory of this archive # for more details. # -# Copyright (C) 1999,2001-2006 Silicon Graphics, Inc. All Rights Reserved. +# Copyright (C) 1999,2001-2006,2008 Silicon Graphics, Inc. All Rights Reserved. # EXTRA_CFLAGS += -Iarch/ia64/sn/include @@ -15,9 +15,4 @@ obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \ sn2/ obj-$(CONFIG_IA64_GENERIC) += machvec.o obj-$(CONFIG_SGI_TIOCX) += tiocx.o -obj-$(CONFIG_IA64_SGI_SN_XP) += xp.o -xp-y := xp_main.o xp_nofault.o -obj-$(CONFIG_IA64_SGI_SN_XP) += xpc.o -xpc-y := xpc_main.o xpc_channel.o xpc_partition.o -obj-$(CONFIG_IA64_SGI_SN_XP) += xpnet.o obj-$(CONFIG_PCI_MSI) += msi_sn.o diff --git a/arch/ia64/sn/kernel/xp_main.c b/arch/ia64/sn/kernel/xp_main.c deleted file mode 100644 index b7ea46645e1..00000000000 --- a/arch/ia64/sn/kernel/xp_main.c +++ /dev/null @@ -1,290 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. - */ - - -/* - * Cross Partition (XP) base. - * - * XP provides a base from which its users can interact - * with XPC, yet not be dependent on XPC. - * - */ - - -#include <linux/kernel.h> -#include <linux/interrupt.h> -#include <linux/module.h> -#include <linux/mutex.h> -#include <asm/sn/intr.h> -#include <asm/sn/sn_sal.h> -#include <asm/sn/xp.h> - - -/* - * Target of nofault PIO read. - */ -u64 xp_nofault_PIOR_target; - - -/* - * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level - * users of XPC. - */ -struct xpc_registration xpc_registrations[XPC_NCHANNELS]; - - -/* - * Initialize the XPC interface to indicate that XPC isn't loaded. - */ -static enum xpc_retval xpc_notloaded(void) { return xpcNotLoaded; } - -struct xpc_interface xpc_interface = { - (void (*)(int)) xpc_notloaded, - (void (*)(int)) xpc_notloaded, - (enum xpc_retval (*)(partid_t, int, u32, void **)) xpc_notloaded, - (enum xpc_retval (*)(partid_t, int, void *)) xpc_notloaded, - (enum xpc_retval (*)(partid_t, int, void *, xpc_notify_func, void *)) - xpc_notloaded, - (void (*)(partid_t, int, void *)) xpc_notloaded, - (enum xpc_retval (*)(partid_t, void *)) xpc_notloaded -}; - - -/* - * XPC calls this when it (the XPC module) has been loaded. - */ -void -xpc_set_interface(void (*connect)(int), - void (*disconnect)(int), - enum xpc_retval (*allocate)(partid_t, int, u32, void **), - enum xpc_retval (*send)(partid_t, int, void *), - enum xpc_retval (*send_notify)(partid_t, int, void *, - xpc_notify_func, void *), - void (*received)(partid_t, int, void *), - enum xpc_retval (*partid_to_nasids)(partid_t, void *)) -{ - xpc_interface.connect = connect; - xpc_interface.disconnect = disconnect; - xpc_interface.allocate = allocate; - xpc_interface.send = send; - xpc_interface.send_notify = send_notify; - xpc_interface.received = received; - xpc_interface.partid_to_nasids = partid_to_nasids; -} - - -/* - * XPC calls this when it (the XPC module) is being unloaded. - */ -void -xpc_clear_interface(void) -{ - xpc_interface.connect = (void (*)(int)) xpc_notloaded; - xpc_interface.disconnect = (void (*)(int)) xpc_notloaded; - xpc_interface.allocate = (enum xpc_retval (*)(partid_t, int, u32, - void **)) xpc_notloaded; - xpc_interface.send = (enum xpc_retval (*)(partid_t, int, void *)) - xpc_notloaded; - xpc_interface.send_notify = (enum xpc_retval (*)(partid_t, int, void *, - xpc_notify_func, void *)) xpc_notloaded; - xpc_interface.received = (void (*)(partid_t, int, void *)) - xpc_notloaded; - xpc_interface.partid_to_nasids = (enum xpc_retval (*)(partid_t, void *)) - xpc_notloaded; -} - - -/* - * Register for automatic establishment of a channel connection whenever - * a partition comes up. - * - * Arguments: - * - * ch_number - channel # to register for connection. - * func - function to call for asynchronous notification of channel - * state changes (i.e., connection, disconnection, error) and - * the arrival of incoming messages. - * key - pointer to optional user-defined value that gets passed back - * to the user on any callouts made to func. - * payload_size - size in bytes of the XPC message's payload area which - * contains a user-defined message. The user should make - * this large enough to hold their largest message. - * nentries - max #of XPC message entries a message queue can contain. - * The actual number, which is determined when a connection - * is established and may be less then requested, will be - * passed to the user via the xpcConnected callout. - * assigned_limit - max number of kthreads allowed to be processing - * messages (per connection) at any given instant. - * idle_limit - max number of kthreads allowed to be idle at any given - * instant. - */ -enum xpc_retval -xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, - u16 nentries, u32 assigned_limit, u32 idle_limit) -{ - struct xpc_registration *registration; - - - DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); - DBUG_ON(payload_size == 0 || nentries == 0); - DBUG_ON(func == NULL); - DBUG_ON(assigned_limit == 0 || idle_limit > assigned_limit); - - registration = &xpc_registrations[ch_number]; - - if (mutex_lock_interruptible(®istration->mutex) != 0) { - return xpcInterrupted; - } - - /* if XPC_CHANNEL_REGISTERED(ch_number) */ - if (registration->func != NULL) { - mutex_unlock(®istration->mutex); - return xpcAlreadyRegistered; - } - - /* register the channel for connection */ - registration->msg_size = XPC_MSG_SIZE(payload_size); - registration->nentries = nentries; - registration->assigned_limit = assigned_limit; - registration->idle_limit = idle_limit; - registration->key = key; - registration->func = func; - - mutex_unlock(®istration->mutex); - - xpc_interface.connect(ch_number); - - return xpcSuccess; -} - - -/* - * Remove the registration for automatic connection of the specified channel - * when a partition comes up. - * - * Before returning this xpc_disconnect() will wait for all connections on the - * specified channel have been closed/torndown. So the caller can be assured - * that they will not be receiving any more callouts from XPC to their - * function registered via xpc_connect(). - * - * Arguments: - * - * ch_number - channel # to unregister. - */ -void -xpc_disconnect(int ch_number) -{ - struct xpc_registration *registration; - - - DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); - - registration = &xpc_registrations[ch_number]; - - /* - * We've decided not to make this a down_interruptible(), since we - * figured XPC's users will just turn around and call xpc_disconnect() - * again anyways, so we might as well wait, if need be. - */ - mutex_lock(®istration->mutex); - - /* if !XPC_CHANNEL_REGISTERED(ch_number) */ - if (registration->func == NULL) { - mutex_unlock(®istration->mutex); - return; - } - - /* remove the connection registration for the specified channel */ - registration->func = NULL; - registration->key = NULL; - registration->nentries = 0; - registration->msg_size = 0; - registration->assigned_limit = 0; - registration->idle_limit = 0; - - xpc_interface.disconnect(ch_number); - - mutex_unlock(®istration->mutex); - - return; -} - - -int __init -xp_init(void) -{ - int ret, ch_number; - u64 func_addr = *(u64 *) xp_nofault_PIOR; - u64 err_func_addr = *(u64 *) xp_error_PIOR; - - - if (!ia64_platform_is("sn2")) { - return -ENODEV; - } - - /* - * Register a nofault code region which performs a cross-partition - * PIO read. If the PIO read times out, the MCA handler will consume - * the error and return to a kernel-provided instruction to indicate - * an error. This PIO read exists because it is guaranteed to timeout - * if the destination is down (AMO operations do not timeout on at - * least some CPUs on Shubs <= v1.2, which unfortunately we have to - * work around). - */ - if ((ret = sn_register_nofault_code(func_addr, err_func_addr, - err_func_addr, 1, 1)) != 0) { - printk(KERN_ERR "XP: can't register nofault code, error=%d\n", - ret); - } - /* - * Setup the nofault PIO read target. (There is no special reason why - * SH_IPI_ACCESS was selected.) - */ - if (is_shub2()) { - xp_nofault_PIOR_target = SH2_IPI_ACCESS0; - } else { - xp_nofault_PIOR_target = SH1_IPI_ACCESS; - } - - /* initialize the connection registration mutex */ - for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) { - mutex_init(&xpc_registrations[ch_number].mutex); - } - - return 0; -} -module_init(xp_init); - - -void __exit -xp_exit(void) -{ - u64 func_addr = *(u64 *) xp_nofault_PIOR; - u64 err_func_addr = *(u64 *) xp_error_PIOR; - - - /* unregister the PIO read nofault code region */ - (void) sn_register_nofault_code(func_addr, err_func_addr, - err_func_addr, 1, 0); -} -module_exit(xp_exit); - - -MODULE_AUTHOR("Silicon Graphics, Inc."); -MODULE_DESCRIPTION("Cross Partition (XP) base"); -MODULE_LICENSE("GPL"); - -EXPORT_SYMBOL(xp_nofault_PIOR); -EXPORT_SYMBOL(xp_nofault_PIOR_target); -EXPORT_SYMBOL(xpc_registrations); -EXPORT_SYMBOL(xpc_interface); -EXPORT_SYMBOL(xpc_clear_interface); -EXPORT_SYMBOL(xpc_set_interface); -EXPORT_SYMBOL(xpc_connect); -EXPORT_SYMBOL(xpc_disconnect); - diff --git a/arch/ia64/sn/kernel/xp_nofault.S b/arch/ia64/sn/kernel/xp_nofault.S deleted file mode 100644 index 98e7c7dbfdd..00000000000 --- a/arch/ia64/sn/kernel/xp_nofault.S +++ /dev/null @@ -1,36 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (c) 2004-2007 Silicon Graphics, Inc. All Rights Reserved. - */ - - -/* - * The xp_nofault_PIOR function takes a pointer to a remote PIO register - * and attempts to load and consume a value from it. This function - * will be registered as a nofault code block. In the event that the - * PIO read fails, the MCA handler will force the error to look - * corrected and vector to the xp_error_PIOR which will return an error. - * - * The definition of "consumption" and the time it takes for an MCA - * to surface is processor implementation specific. This code - * is sufficient on Itanium through the Montvale processor family. - * It may need to be adjusted for future processor implementations. - * - * extern int xp_nofault_PIOR(void *remote_register); - */ - - .global xp_nofault_PIOR -xp_nofault_PIOR: - mov r8=r0 // Stage a success return value - ld8.acq r9=[r32];; // PIO Read the specified register - adds r9=1,r9;; // Add to force consumption - srlz.i;; // Allow time for MCA to surface - br.ret.sptk.many b0;; // Return success - - .global xp_error_PIOR -xp_error_PIOR: - mov r8=1 // Return value of 1 - br.ret.sptk.many b0;; // Return failure diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c deleted file mode 100644 index 44ccc0d789c..00000000000 --- a/arch/ia64/sn/kernel/xpc_channel.c +++ /dev/null @@ -1,2379 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (c) 2004-2006 Silicon Graphics, Inc. All Rights Reserved. - */ - - -/* - * Cross Partition Communication (XPC) channel support. - * - * This is the part of XPC that manages the channels and - * sends/receives messages across them to/from other partitions. - * - */ - - -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/sched.h> -#include <linux/cache.h> -#include <linux/interrupt.h> -#include <linux/mutex.h> -#include <linux/completion.h> -#include <asm/sn/bte.h> -#include <asm/sn/sn_sal.h> -#include <asm/sn/xpc.h> - - -/* - * Guarantee that the kzalloc'd memory is cacheline aligned. - */ -static void * -xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) -{ - /* see if kzalloc will give us cachline aligned memory by default */ - *base = kzalloc(size, flags); - if (*base == NULL) { - return NULL; - } - if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) { - return *base; - } - kfree(*base); - - /* nope, we'll have to do it ourselves */ - *base = kzalloc(size + L1_CACHE_BYTES, flags); - if (*base == NULL) { - return NULL; - } - return (void *) L1_CACHE_ALIGN((u64) *base); -} - - -/* - * Set up the initial values for the XPartition Communication channels. - */ -static void -xpc_initialize_channels(struct xpc_partition *part, partid_t partid) -{ - int ch_number; - struct xpc_channel *ch; - - - for (ch_number = 0; ch_number < part->nchannels; ch_number++) { - ch = &part->channels[ch_number]; - - ch->partid = partid; - ch->number = ch_number; - ch->flags = XPC_C_DISCONNECTED; - - ch->local_GP = &part->local_GPs[ch_number]; - ch->local_openclose_args = - &part->local_openclose_args[ch_number]; - - atomic_set(&ch->kthreads_assigned, 0); - atomic_set(&ch->kthreads_idle, 0); - atomic_set(&ch->kthreads_active, 0); - - atomic_set(&ch->references, 0); - atomic_set(&ch->n_to_notify, 0); - - spin_lock_init(&ch->lock); - mutex_init(&ch->msg_to_pull_mutex); - init_completion(&ch->wdisconnect_wait); - - atomic_set(&ch->n_on_msg_allocate_wq, 0); - init_waitqueue_head(&ch->msg_allocate_wq); - init_waitqueue_head(&ch->idle_wq); - } -} - - -/* - * Setup the infrastructure necessary to support XPartition Communication - * between the specified remote partition and the local one. - */ -enum xpc_retval -xpc_setup_infrastructure(struct xpc_partition *part) -{ - int ret, cpuid; - struct timer_list *timer; - partid_t partid = XPC_PARTID(part); - - - /* - * Zero out MOST of the entry for this partition. Only the fields - * starting with `nchannels' will be zeroed. The preceding fields must - * remain `viable' across partition ups and downs, since they may be - * referenced during this memset() operation. - */ - memset(&part->nchannels, 0, sizeof(struct xpc_partition) - - offsetof(struct xpc_partition, nchannels)); - - /* - * Allocate all of the channel structures as a contiguous chunk of - * memory. - */ - part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_NCHANNELS, - GFP_KERNEL); - if (part->channels == NULL) { - dev_err(xpc_chan, "can't get memory for channels\n"); - return xpcNoMemory; - } - - part->nchannels = XPC_NCHANNELS; - - - /* allocate all the required GET/PUT values */ - - part->local_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, - GFP_KERNEL, &part->local_GPs_base); - if (part->local_GPs == NULL) { - kfree(part->channels); - part->channels = NULL; - dev_err(xpc_chan, "can't get memory for local get/put " - "values\n"); - return xpcNoMemory; - } - - part->remote_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, - GFP_KERNEL, &part->remote_GPs_base); - if (part->remote_GPs == NULL) { - dev_err(xpc_chan, "can't get memory for remote get/put " - "values\n"); - kfree(part->local_GPs_base); - part->local_GPs = NULL; - kfree(part->channels); - part->channels = NULL; - return xpcNoMemory; - } - - - /* allocate all the required open and close args */ - - part->local_openclose_args = xpc_kzalloc_cacheline_aligned( - XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL, - &part->local_openclose_args_base); - if (part->local_openclose_args == NULL) { - dev_err(xpc_chan, "can't get memory for local connect args\n"); - kfree(part->remote_GPs_base); - part->remote_GPs = NULL; - kfree(part->local_GPs_base); - part->local_GPs = NULL; - kfree(part->channels); - part->channels = NULL; - return xpcNoMemory; - } - - part->remote_openclose_args = xpc_kzalloc_cacheline_aligned( - XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL, - &part->remote_openclose_args_base); - if (part->remote_openclose_args == NULL) { - dev_err(xpc_chan, "can't get memory for remote connect args\n"); - kfree(part->local_openclose_args_base); - part->local_openclose_args = NULL; - kfree(part->remote_GPs_base); - part->remote_GPs = NULL; - kfree(part->local_GPs_base); - part->local_GPs = NULL; - kfree(part->channels); - part->channels = NULL; - return xpcNoMemory; - } - - - xpc_initialize_channels(part, partid); - - atomic_set(&part->nchannels_active, 0); - atomic_set(&part->nchannels_engaged, 0); - - - /* local_IPI_amo were set to 0 by an earlier memset() */ - - /* Initialize this partitions AMO_t structure */ - part->local_IPI_amo_va = xpc_IPI_init(partid); - - spin_lock_init(&part->IPI_lock); - - atomic_set(&part->channel_mgr_requests, 1); - init_waitqueue_head(&part->channel_mgr_wq); - - sprintf(part->IPI_owner, "xpc%02d", partid); - ret = request_irq(SGI_XPC_NOTIFY, xpc_notify_IRQ_handler, IRQF_SHARED, - part->IPI_owner, (void *) (u64) partid); - if (ret != 0) { - dev_err(xpc_chan, "can't register NOTIFY IRQ handler, " - "errno=%d\n", -ret); - kfree(part->remote_openclose_args_base); - part->remote_openclose_args = NULL; - kfree(part->local_openclose_args_base); - part->local_openclose_args = NULL; - kfree(part->remote_GPs_base); - part->remote_GPs = NULL; - kfree(part->local_GPs_base); - part->local_GPs = NULL; - kfree(part->channels); - part->channels = NULL; - return xpcLackOfResources; - } - - /* Setup a timer to check for dropped IPIs */ - timer = &part->dropped_IPI_timer; - init_timer(timer); - timer->function = (void (*)(unsigned long)) xpc_dropped_IPI_check; - timer->data = (unsigned long) part; - timer->expires = jiffies + XPC_P_DROPPED_IPI_WAIT; - add_timer(timer); - - /* - * With the setting of the partition setup_state to XPC_P_SETUP, we're - * declaring that this partition is ready to go. - */ - part->setup_state = XPC_P_SETUP; - - - /* - * Setup the per partition specific variables required by the - * remote partition to establish channel connections with us. - * - * The setting of the magic # indicates that these per partition - * specific variables are ready to be used. - */ - xpc_vars_part[partid].GPs_pa = __pa(part->local_GPs); - xpc_vars_part[partid].openclose_args_pa = - __pa(part->local_openclose_args); - xpc_vars_part[partid].IPI_amo_pa = __pa(part->local_IPI_amo_va); - cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */ - xpc_vars_part[partid].IPI_nasid = cpuid_to_nasid(cpuid); - xpc_vars_part[partid].IPI_phys_cpuid = cpu_physical_id(cpuid); - xpc_vars_part[partid].nchannels = part->nchannels; - xpc_vars_part[partid].magic = XPC_VP_MAGIC1; - - return xpcSuccess; -} - - -/* - * Create a wrapper that hides the underlying mechanism for pulling a cacheline - * (or multiple cachelines) from a remote partition. - * - * src must be a cacheline aligned physical address on the remote partition. - * dst must be a cacheline aligned virtual address on this partition. - * cnt must be an cacheline sized - */ -static enum xpc_retval -xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst, - const void *src, size_t cnt) -{ - bte_result_t bte_ret; - - - DBUG_ON((u64) src != L1_CACHE_ALIGN((u64) src)); - DBUG_ON((u64) dst != L1_CACHE_ALIGN((u64) dst)); - DBUG_ON(cnt != L1_CACHE_ALIGN(cnt)); - - if (part->act_state == XPC_P_DEACTIVATING) { - return part->reason; - } - - bte_ret = xp_bte_copy((u64) src, (u64) dst, (u64) cnt, - (BTE_NORMAL | BTE_WACQUIRE), NULL); - if (bte_ret == BTE_SUCCESS) { - return xpcSuccess; - } - - dev_dbg(xpc_chan, "xp_bte_copy() from partition %d failed, ret=%d\n", - XPC_PARTID(part), bte_ret); - - return xpc_map_bte_errors(bte_ret); -} - - -/* - * Pull the remote per partition specific variables from the specified - * partition. - */ -enum xpc_retval -xpc_pull_remote_vars_part(struct xpc_partition *part) -{ - u8 buffer[L1_CACHE_BYTES * 2]; - struct xpc_vars_part *pulled_entry_cacheline = - (struct xpc_vars_part *) L1_CACHE_ALIGN((u64) buffer); - struct xpc_vars_part *pulled_entry; - u64 remote_entry_cacheline_pa, remote_entry_pa; - partid_t partid = XPC_PARTID(part); - enum xpc_retval ret; - - - /* pull the cacheline that contains the variables we're interested in */ - - DBUG_ON(part->remote_vars_part_pa != - L1_CACHE_ALIGN(part->remote_vars_part_pa)); - DBUG_ON(sizeof(struct xpc_vars_part) != L1_CACHE_BYTES / 2); - - remote_entry_pa = part->remote_vars_part_pa + - sn_partition_id * sizeof(struct xpc_vars_part); - - remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1)); - - pulled_entry = (struct xpc_vars_part *) ((u64) pulled_entry_cacheline + - (remote_entry_pa & (L1_CACHE_BYTES - 1))); - - ret = xpc_pull_remote_cachelines(part, pulled_entry_cacheline, - (void *) remote_entry_cacheline_pa, - L1_CACHE_BYTES); - if (ret != xpcSuccess) { - dev_dbg(xpc_chan, "failed to pull XPC vars_part from " - "partition %d, ret=%d\n", partid, ret); - return ret; - } - - - /* see if they've been set up yet */ - - if (pulled_entry->magic != XPC_VP_MAGIC1 && - pulled_entry->magic != XPC_VP_MAGIC2) { - - if (pulled_entry->magic != 0) { - dev_dbg(xpc_chan, "partition %d's XPC vars_part for " - "partition %d has bad magic value (=0x%lx)\n", - partid, sn_partition_id, pulled_entry->magic); - return xpcBadMagic; - } - - /* they've not been initialized yet */ - return xpcRetry; - } - - if (xpc_vars_part[partid].magic == XPC_VP_MAGIC1) { - - /* validate the variables */ - - if (pulled_entry->GPs_pa == 0 || - pulled_entry->openclose_args_pa == 0 || - pulled_entry->IPI_amo_pa == 0) { - - dev_err(xpc_chan, "partition %d's XPC vars_part for " - "partition %d are not valid\n", partid, - sn_partition_id); - return xpcInvalidAddress; - } - - /* the variables we imported look to be valid */ - - part->remote_GPs_pa = pulled_entry->GPs_pa; - part->remote_openclose_args_pa = - pulled_entry->openclose_args_pa; - part->remote_IPI_amo_va = - (AMO_t *) __va(pulled_entry->IPI_amo_pa); - part->remote_IPI_nasid = pulled_entry->IPI_nasid; - part->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid; - - if (part->nchannels > pulled_entry->nchannels) { - part->nchannels = pulled_entry->nchannels; - } - - /* let the other side know that we've pulled their variables */ - - xpc_vars_part[partid].magic = XPC_VP_MAGIC2; - } - - if (pulled_entry->magic == XPC_VP_MAGIC1) { - return xpcRetry; - } - - return xpcSuccess; -} - - -/* - * Get the IPI flags and pull the openclose args and/or remote GPs as needed. - */ -static u64 -xpc_get_IPI_flags(struct xpc_partition *part) -{ - unsigned long irq_flags; - u64 IPI_amo; - enum xpc_retval ret; - - - /* - * See if there are any IPI flags to be handled. - */ - - spin_lock_irqsave(&part->IPI_lock, irq_flags); - if ((IPI_amo = part->local_IPI_amo) != 0) { - part->local_IPI_amo = 0; - } - spin_unlock_irqrestore(&part->IPI_lock, irq_flags); - - - if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) { - ret = xpc_pull_remote_cachelines(part, - part->remote_openclose_args, - (void *) part->remote_openclose_args_pa, - XPC_OPENCLOSE_ARGS_SIZE); - if (ret != xpcSuccess) { - XPC_DEACTIVATE_PARTITION(part, ret); - - dev_dbg(xpc_chan, "failed to pull openclose args from " - "partition %d, ret=%d\n", XPC_PARTID(part), - ret); - - /* don't bother processing IPIs anymore */ - IPI_amo = 0; - } - } - - if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_amo)) { - ret = xpc_pull_remote_cachelines(part, part->remote_GPs, - (void *) part->remote_GPs_pa, - XPC_GP_SIZE); - if (ret != xpcSuccess) { - XPC_DEACTIVATE_PARTITION(part, ret); - - dev_dbg(xpc_chan, "failed to pull GPs from partition " - "%d, ret=%d\n", XPC_PARTID(part), ret); - - /* don't bother processing IPIs anymore */ - IPI_amo = 0; - } - } - - return IPI_amo; -} - - -/* - * Allocate the local message queue and the notify queue. - */ -static enum xpc_retval -xpc_allocate_local_msgqueue(struct xpc_channel *ch) -{ - unsigned long irq_flags; - int nentries; - size_t nbytes; - - - // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between - // >>> iterations of the for-loop, bail if set? - - // >>> should we impose a minimum #of entries? like 4 or 8? - for (nentries = ch->local_nentries; nentries > 0; nentries--) { - - nbytes = nentries * ch->msg_size; - ch->local_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes, - GFP_KERNEL, - &ch->local_msgqueue_base); - if (ch->local_msgqueue == NULL) { - continue; - } - - nbytes = nentries * sizeof(struct xpc_notify); - ch->notify_queue = kzalloc(nbytes, GFP_KERNEL); - if (ch->notify_queue == NULL) { - kfree(ch->local_msgqueue_base); - ch->local_msgqueue = NULL; - continue; - } - - spin_lock_irqsave(&ch->lock, irq_flags); - if (nentries < ch->local_nentries) { - dev_dbg(xpc_chan, "nentries=%d local_nentries=%d, " - "partid=%d, channel=%d\n", nentries, - ch->local_nentries, ch->partid, ch->number); - - ch->local_nentries = nentries; - } - spin_unlock_irqrestore(&ch->lock, irq_flags); - return xpcSuccess; - } - - dev_dbg(xpc_chan, "can't get memory for local message queue and notify " - "queue, partid=%d, channel=%d\n", ch->partid, ch->number); - return xpcNoMemory; -} - - -/* - * Allocate the cached remote message queue. - */ -static enum xpc_retval -xpc_allocate_remote_msgqueue(struct xpc_channel *ch) -{ - unsigned long irq_flags; - int nentries; - size_t nbytes; - - - DBUG_ON(ch->remote_nentries <= 0); - - // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between - // >>> iterations of the for-loop, bail if set? - - // >>> should we impose a minimum #of entries? like 4 or 8? - for (nentries = ch->remote_nentries; nentries > 0; nentries--) { - - nbytes = nentries * ch->msg_size; - ch->remote_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes, - GFP_KERNEL, - &ch->remote_msgqueue_base); - if (ch->remote_msgqueue == NULL) { - continue; - } - - spin_lock_irqsave(&ch->lock, irq_flags); - if (nentries < ch->remote_nentries) { - dev_dbg(xpc_chan, "nentries=%d remote_nentries=%d, " - "partid=%d, channel=%d\n", nentries, - ch->remote_nentries, ch->partid, ch->number); - - ch->remote_nentries = nentries; - } - spin_unlock_irqrestore(&ch->lock, irq_flags); - return xpcSuccess; - } - - dev_dbg(xpc_chan, "can't get memory for cached remote message queue, " - "partid=%d, channel=%d\n", ch->partid, ch->number); - return xpcNoMemory; -} - - -/* - * Allocate message queues and other stuff associated with a channel. - * - * Note: Assumes all of the channel sizes are filled in. - */ -static enum xpc_retval -xpc_allocate_msgqueues(struct xpc_channel *ch) -{ - unsigned long irq_flags; - enum xpc_retval ret; - - - DBUG_ON(ch->flags & XPC_C_SETUP); - - if ((ret = xpc_allocate_local_msgqueue(ch)) != xpcSuccess) { - return ret; - } - - if ((ret = xpc_allocate_remote_msgqueue(ch)) != xpcSuccess) { - kfree(ch->local_msgqueue_base); - ch->local_msgqueue = NULL; - kfree(ch->notify_queue); - ch->notify_queue = NULL; - return ret; - } - - spin_lock_irqsave(&ch->lock, irq_flags); - ch->flags |= XPC_C_SETUP; - spin_unlock_irqrestore(&ch->lock, irq_flags); - - return xpcSuccess; -} - - -/* - * Process a connect message from a remote partition. - * - * Note: xpc_process_connect() is expecting to be called with the - * spin_lock_irqsave held and will leave it locked upon return. - */ -static void -xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags) -{ - enum xpc_retval ret; - - - DBUG_ON(!spin_is_locked(&ch->lock)); - - if (!(ch->flags & XPC_C_OPENREQUEST) || - !(ch->flags & XPC_C_ROPENREQUEST)) { - /* nothing more to do for now */ - return; - } - DBUG_ON(!(ch->flags & XPC_C_CONNECTING)); - - if (!(ch->flags & XPC_C_SETUP)) { - spin_unlock_irqrestore(&ch->lock, *irq_flags); - ret = xpc_allocate_msgqueues(ch); - spin_lock_irqsave(&ch->lock, *irq_flags); - - if (ret != xpcSuccess) { - XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags); - } - if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) { - return; - } - - DBUG_ON(!(ch->flags & XPC_C_SETUP)); - DBUG_ON(ch->local_msgqueue == NULL); - DBUG_ON(ch->remote_msgqueue == NULL); - } - - if (!(ch->flags & XPC_C_OPENREPLY)) { - ch->flags |= XPC_C_OPENREPLY; - xpc_IPI_send_openreply(ch, irq_flags); - } - - if (!(ch->flags & XPC_C_ROPENREPLY)) { - return; - } - - DBUG_ON(ch->remote_msgqueue_pa == 0); - - ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */ - - dev_info(xpc_chan, "channel %d to partition %d connected\n", - ch->number, ch->partid); - - spin_unlock_irqrestore(&ch->lock, *irq_flags); - xpc_create_kthreads(ch, 1, 0); - spin_lock_irqsave(&ch->lock, *irq_flags); -} - - -/* - * Notify those who wanted to be notified upon delivery of their message. - */ -static void -xpc_notify_senders(struct xpc_channel *ch, enum xpc_retval reason, s64 put) -{ - struct xpc_notify *notify; - u8 notify_type; - s64 get = ch->w_remote_GP.get - 1; - - - while (++get < put && atomic_read(&ch->n_to_notify) > 0) { - - notify = &ch->notify_queue[get % ch->local_nentries]; - - /* - * See if the notify entry indicates it was associated with - * a message who's sender wants to be notified. It is possible - * that it is, but someone else is doing or has done the - * notification. - */ - notify_type = notify->type; - if (notify_type == 0 || - cmpxchg(¬ify->type, notify_type, 0) != - notify_type) { - continue; - } - - DBUG_ON(notify_type != XPC_N_CALL); - - atomic_dec(&ch->n_to_notify); - - if (notify->func != NULL) { - dev_dbg(xpc_chan, "notify->func() called, notify=0x%p, " - "msg_number=%ld, partid=%d, channel=%d\n", - (void *) notify, get, ch->partid, ch->number); - - notify->func(reason, ch->partid, ch->number, - notify->key); - - dev_dbg(xpc_chan, "notify->func() returned, " - "notify=0x%p, msg_number=%ld, partid=%d, " - "channel=%d\n", (void *) notify, get, - ch->partid, ch->number); - } - } |