diff options
| author | Michal Marek <mmarek@suse.cz> | 2010-08-04 14:05:07 +0200 |
|---|---|---|
| committer | Michal Marek <mmarek@suse.cz> | 2010-08-04 14:05:07 +0200 |
| commit | 7a996d3ab150bb0e1b71fa182f70199a703efdd1 (patch) | |
| tree | 96a36947d90c9b96580899abd38cb3b70cd9d40b /net/tipc/ref.c | |
| parent | 7cf3d73b4360e91b14326632ab1aeda4cb26308d (diff) | |
| parent | 9fe6206f400646a2322096b56c59891d530e8d51 (diff) | |
Merge commit 'v2.6.35' into kbuild/kconfig
Conflicts:
scripts/kconfig/Makefile
Diffstat (limited to 'net/tipc/ref.c')
| -rw-r--r-- | net/tipc/ref.c | 26 |
1 files changed, 16 insertions, 10 deletions
diff --git a/net/tipc/ref.c b/net/tipc/ref.c index 414fc34b8be..8dea66500cf 100644 --- a/net/tipc/ref.c +++ b/net/tipc/ref.c @@ -153,11 +153,11 @@ void tipc_ref_table_stop(void) u32 tipc_ref_acquire(void *object, spinlock_t **lock) { - struct reference *entry; u32 index; u32 index_mask; u32 next_plus_upper; u32 ref; + struct reference *entry = NULL; if (!object) { err("Attempt to acquire reference to non-existent object\n"); @@ -175,30 +175,36 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock) index = tipc_ref_table.first_free; entry = &(tipc_ref_table.entries[index]); index_mask = tipc_ref_table.index_mask; - /* take lock in case a previous user of entry still holds it */ - spin_lock_bh(&entry->lock); next_plus_upper = entry->ref; tipc_ref_table.first_free = next_plus_upper & index_mask; ref = (next_plus_upper & ~index_mask) + index; - entry->ref = ref; - entry->object = object; - *lock = &entry->lock; } else if (tipc_ref_table.init_point < tipc_ref_table.capacity) { index = tipc_ref_table.init_point++; entry = &(tipc_ref_table.entries[index]); spin_lock_init(&entry->lock); - spin_lock_bh(&entry->lock); ref = tipc_ref_table.start_mask + index; - entry->ref = ref; - entry->object = object; - *lock = &entry->lock; } else { ref = 0; } write_unlock_bh(&ref_table_lock); + /* + * Grab the lock so no one else can modify this entry + * While we assign its ref value & object pointer + */ + if (entry) { + spin_lock_bh(&entry->lock); + entry->ref = ref; + entry->object = object; + *lock = &entry->lock; + /* + * keep it locked, the caller is responsible + * for unlocking this when they're done with it + */ + } + return ref; } |
