diff options
| author | H. Peter Anvin <hpa@zytor.com> | 2010-04-29 16:53:17 -0700 |
|---|---|---|
| committer | H. Peter Anvin <hpa@zytor.com> | 2010-04-29 16:53:17 -0700 |
| commit | d9c5841e22231e4e49fd0a1004164e6fce59b7a6 (patch) | |
| tree | e1f589c46b3ff79bbe7b1b2469f6362f94576da6 /net/tipc/ref.c | |
| parent | b701a47ba48b698976fb2fe05fb285b0edc1d26a (diff) | |
| parent | 5967ed87ade85a421ef814296c3c7f182b08c225 (diff) | |
Merge branch 'x86/asm' into x86/atomic
Merge reason:
Conflict between LOCK_PREFIX_HERE and relative alternatives
pointers
Resolved Conflicts:
arch/x86/include/asm/alternative.h
arch/x86/kernel/alternative.c
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'net/tipc/ref.c')
| -rw-r--r-- | net/tipc/ref.c | 26 |
1 files changed, 16 insertions, 10 deletions
diff --git a/net/tipc/ref.c b/net/tipc/ref.c index 414fc34b8be..8dea66500cf 100644 --- a/net/tipc/ref.c +++ b/net/tipc/ref.c @@ -153,11 +153,11 @@ void tipc_ref_table_stop(void) u32 tipc_ref_acquire(void *object, spinlock_t **lock) { - struct reference *entry; u32 index; u32 index_mask; u32 next_plus_upper; u32 ref; + struct reference *entry = NULL; if (!object) { err("Attempt to acquire reference to non-existent object\n"); @@ -175,30 +175,36 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock) index = tipc_ref_table.first_free; entry = &(tipc_ref_table.entries[index]); index_mask = tipc_ref_table.index_mask; - /* take lock in case a previous user of entry still holds it */ - spin_lock_bh(&entry->lock); next_plus_upper = entry->ref; tipc_ref_table.first_free = next_plus_upper & index_mask; ref = (next_plus_upper & ~index_mask) + index; - entry->ref = ref; - entry->object = object; - *lock = &entry->lock; } else if (tipc_ref_table.init_point < tipc_ref_table.capacity) { index = tipc_ref_table.init_point++; entry = &(tipc_ref_table.entries[index]); spin_lock_init(&entry->lock); - spin_lock_bh(&entry->lock); ref = tipc_ref_table.start_mask + index; - entry->ref = ref; - entry->object = object; - *lock = &entry->lock; } else { ref = 0; } write_unlock_bh(&ref_table_lock); + /* + * Grab the lock so no one else can modify this entry + * While we assign its ref value & object pointer + */ + if (entry) { + spin_lock_bh(&entry->lock); + entry->ref = ref; + entry->object = object; + *lock = &entry->lock; + /* + * keep it locked, the caller is responsible + * for unlocking this when they're done with it + */ + } + return ref; } |
