diff options
author | Jes Sorensen <jes@sgi.com> | 2006-01-17 12:52:21 -0500 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2006-01-17 13:53:24 -0800 |
commit | f9e505a9a03df5acace6e758c8d12982635a1c64 (patch) | |
tree | c336b36fd18dd1fc1f316cb55ad52c67da2ffd8a /arch/ia64 | |
parent | ac354a899b91239aac4d5893fc4288bc400e82b4 (diff) |
[IA64-SGI] sn2 mutex conversion
Migrate sn2 code to use mutex and completion events rather than
semaphores.
Signed-off-by: Jes Sorensen <jes@sgi.com>
Acked-by: Dean Nelson <dcn@sgi.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/sn/kernel/mca.c | 7 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/xp_main.c | 17 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/xpc_channel.c | 34 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/xpc_main.c | 17 |
4 files changed, 36 insertions, 39 deletions
diff --git a/arch/ia64/sn/kernel/mca.c b/arch/ia64/sn/kernel/mca.c index 6546db6abdb..9ab684d1bb5 100644 --- a/arch/ia64/sn/kernel/mca.c +++ b/arch/ia64/sn/kernel/mca.c @@ -10,6 +10,7 @@ #include <linux/kernel.h> #include <linux/timer.h> #include <linux/vmalloc.h> +#include <linux/mutex.h> #include <asm/mca.h> #include <asm/sal.h> #include <asm/sn/sn_sal.h> @@ -27,7 +28,7 @@ void sn_init_cpei_timer(void); /* Printing oemdata from mca uses data that is not passed through SAL, it is * global. Only one user at a time. */ -static DECLARE_MUTEX(sn_oemdata_mutex); +static DEFINE_MUTEX(sn_oemdata_mutex); static u8 **sn_oemdata; static u64 *sn_oemdata_size, sn_oemdata_bufsize; @@ -89,7 +90,7 @@ static int sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata, u64 * oemdata_size) { - down(&sn_oemdata_mutex); + mutex_lock(&sn_oemdata_mutex); sn_oemdata = oemdata; sn_oemdata_size = oemdata_size; sn_oemdata_bufsize = 0; @@ -107,7 +108,7 @@ sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata, *sn_oemdata_size = 0; ia64_sn_plat_specific_err_print(print_hook, (char *)sect_header); } - up(&sn_oemdata_mutex); + mutex_unlock(&sn_oemdata_mutex); return 0; } diff --git a/arch/ia64/sn/kernel/xp_main.c b/arch/ia64/sn/kernel/xp_main.c index 3be52a34c80..b7ea46645e1 100644 --- a/arch/ia64/sn/kernel/xp_main.c +++ b/arch/ia64/sn/kernel/xp_main.c @@ -19,6 +19,7 @@ #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/module.h> +#include <linux/mutex.h> #include <asm/sn/intr.h> #include <asm/sn/sn_sal.h> #include <asm/sn/xp.h> @@ -136,13 +137,13 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, registration = &xpc_registrations[ch_number]; - if (down_interruptible(®istration->sema) != 0) { + if (mutex_lock_interruptible(®istration->mutex) != 0) { return xpcInterrupted; } /* if XPC_CHANNEL_REGISTERED(ch_number) */ if (registration->func != NULL) { - up(®istration->sema); + mutex_unlock(®istration->mutex); return xpcAlreadyRegistered; } @@ -154,7 +155,7 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, registration->key = key; registration->func = func; - up(®istration->sema); + mutex_unlock(®istration->mutex); xpc_interface.connect(ch_number); @@ -190,11 +191,11 @@ xpc_disconnect(int ch_number) * figured XPC's users will just turn around and call xpc_disconnect() * again anyways, so we might as well wait, if need be. */ - down(®istration->sema); + mutex_lock(®istration->mutex); /* if !XPC_CHANNEL_REGISTERED(ch_number) */ if (registration->func == NULL) { - up(®istration->sema); + mutex_unlock(®istration->mutex); return; } @@ -208,7 +209,7 @@ xpc_disconnect(int ch_number) xpc_interface.disconnect(ch_number); - up(®istration->sema); + mutex_unlock(®istration->mutex); return; } @@ -250,9 +251,9 @@ xp_init(void) xp_nofault_PIOR_target = SH1_IPI_ACCESS; } - /* initialize the connection registration semaphores */ + /* initialize the connection registration mutex */ for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) { - sema_init(&xpc_registrations[ch_number].sema, 1); /* mutex */ + mutex_init(&xpc_registrations[ch_number].mutex); } return 0; diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c index 0c0a6890240..8d950c778bb 100644 --- a/arch/ia64/sn/kernel/xpc_channel.c +++ b/arch/ia64/sn/kernel/xpc_channel.c @@ -22,6 +22,8 @@ #include <linux/cache.h> #include <linux/interrupt.h> #include <linux/slab.h> +#include <linux/mutex.h> +#include <linux/completion.h> #include <asm/sn/bte.h> #include <asm/sn/sn_sal.h> #include <asm/sn/xpc.h> @@ -56,8 +58,8 @@ xpc_initialize_channels(struct xpc_partition *part, partid_t partid) atomic_set(&ch->n_to_notify, 0); spin_lock_init(&ch->lock); - sema_init(&ch->msg_to_pull_sema, 1); /* mutex */ - sema_init(&ch->wdisconnect_sema, 0); /* event wait */ + mutex_init(&ch->msg_to_pull_mutex); + init_completion(&ch->wdisconnect_wait); atomic_set(&ch->n_on_msg_allocate_wq, 0); init_waitqueue_head(&ch->msg_allocate_wq); @@ -534,7 +536,6 @@ static enum xpc_retval xpc_allocate_msgqueues(struct xpc_channel *ch) { unsigned long irq_flags; - int i; enum xpc_retval ret; @@ -552,11 +553,6 @@ xpc_allocate_msgqueues(struct xpc_channel *ch) return ret; } - for (i = 0; i < ch->local_nentries; i++) { - /* use a semaphore as an event wait queue */ - sema_init(&ch->notify_queue[i].sema, 0); - } - spin_lock_irqsave(&ch->lock, irq_flags); ch->flags |= XPC_C_SETUP; spin_unlock_irqrestore(&ch->lock, irq_flags); @@ -799,10 +795,8 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) } if (ch->flags & XPC_C_WDISCONNECT) { - spin_unlock_irqrestore(&ch->lock, *irq_flags); - up(&ch->wdisconnect_sema); - spin_lock_irqsave(&ch->lock, *irq_flags); - + /* we won't lose the CPU since we're holding ch->lock */ + complete(&ch->wdisconnect_wait); } else if (ch->delayed_IPI_flags) { if (part->act_state != XPC_P_DEACTIVATING) { /* time to take action on any delayed IPI flags */ @@ -1092,12 +1086,12 @@ xpc_connect_channel(struct xpc_channel *ch) struct xpc_registration *registration = &xpc_registrations[ch->number]; - if (down_trylock(®istration->sema) != 0) { + if (mutex_trylock(®istration->mutex) == 0) { return xpcRetry; } if (!XPC_CHANNEL_REGISTERED(ch->number)) { - up(®istration->sema); + mutex_unlock(®istration->mutex); return xpcUnregistered; } @@ -1108,7 +1102,7 @@ xpc_connect_channel(struct xpc_channel *ch) if (ch->flags & XPC_C_DISCONNECTING) { spin_unlock_irqrestore(&ch->lock, irq_flags); - up(®istration->sema); + mutex_unlock(®istration->mutex); return ch->reason; } @@ -1140,7 +1134,7 @@ xpc_connect_channel(struct xpc_channel *ch) * channel lock be locked and will unlock and relock * the channel lock as needed. */ - up(®istration->sema); + mutex_unlock(®istration->mutex); XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes, &irq_flags); spin_unlock_irqrestore(&ch->lock, irq_flags); @@ -1155,7 +1149,7 @@ xpc_connect_channel(struct xpc_channel *ch) atomic_inc(&xpc_partitions[ch->partid].nchannels_active); } - up(®istration->sema); + mutex_unlock(®istration->mutex); /* initiate the connection */ @@ -2089,7 +2083,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get) enum xpc_retval ret; - if (down_interruptible(&ch->msg_to_pull_sema) != 0) { + if (mutex_lock_interruptible(&ch->msg_to_pull_mutex) != 0) { /* we were interrupted by a signal */ return NULL; } @@ -2125,7 +2119,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get) XPC_DEACTIVATE_PARTITION(part, ret); - up(&ch->msg_to_pull_sema); + mutex_unlock(&ch->msg_to_pull_mutex); return NULL; } @@ -2134,7 +2128,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get) ch->next_msg_to_pull += nmsgs; } - up(&ch->msg_to_pull_sema); + mutex_unlock(&ch->msg_to_pull_mutex); /* return the message we were looking for */ msg_offset = (get % ch->remote_nentries) * ch->msg_size; diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c index 8930586e0eb..c75f8aeefc2 100644 --- a/arch/ia64/sn/kernel/xpc_main.c +++ b/arch/ia64/sn/kernel/xpc_main.c @@ -55,6 +55,7 @@ #include <linux/slab.h> #include <linux/delay.h> #include <linux/reboot.h> +#include <linux/completion.h> #include <asm/sn/intr.h> #include <asm/sn/sn_sal.h> #include <asm/kdebug.h> @@ -177,10 +178,10 @@ static DECLARE_WAIT_QUEUE_HEAD(xpc_act_IRQ_wq); static unsigned long xpc_hb_check_timeout; /* notification that the xpc_hb_checker thread has exited */ -static DECLARE_MUTEX_LOCKED(xpc_hb_checker_exited); +static DECLARE_COMPLETION(xpc_hb_checker_exited); /* notification that the xpc_discovery thread has exited */ -static DECLARE_MUTEX_LOCKED(xpc_discovery_exited); +static DECLARE_COMPLETION(xpc_discovery_exited); static struct timer_list xpc_hb_timer; @@ -321,7 +322,7 @@ xpc_hb_checker(void *ignore) /* mark this thread as having exited */ - up(&xpc_hb_checker_exited); + complete(&xpc_hb_checker_exited); return 0; } @@ -341,7 +342,7 @@ xpc_initiate_discovery(void *ignore) dev_dbg(xpc_part, "discovery thread is exiting\n"); /* mark this thread as having exited */ - up(&xpc_discovery_exited); + complete(&xpc_discovery_exited); return 0; } @@ -893,7 +894,7 @@ xpc_disconnect_wait(int ch_number) continue; } - (void) down(&ch->wdisconnect_sema); + wait_for_completion(&ch->wdisconnect_wait); spin_lock_irqsave(&ch->lock, irq_flags); DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED)); @@ -946,10 +947,10 @@ xpc_do_exit(enum xpc_retval reason) free_irq(SGI_XPC_ACTIVATE, NULL); /* wait for the discovery thread to exit */ - down(&xpc_discovery_exited); + wait_for_completion(&xpc_discovery_exited); /* wait for the heartbeat checker thread to exit */ - down(&xpc_hb_checker_exited); + wait_for_completion(&xpc_hb_checker_exited); /* sleep for a 1/3 of a second or so */ @@ -1367,7 +1368,7 @@ xpc_init(void) dev_err(xpc_part, "failed while forking discovery thread\n"); /* mark this new thread as a non-starter */ - up(&xpc_discovery_exited); + complete(&xpc_discovery_exited); xpc_do_exit(xpcUnloading); return -EBUSY; |