diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-18 18:02:35 -0700 | 
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-18 18:02:35 -0700 | 
| commit | 334d094504c2fe1c44211ecb49146ae6bca8c321 (patch) | |
| tree | d3c0f68e4b9f8e3d2ccc39e7dfe5de0534a5fad9 /net/tipc/ref.c | |
| parent | d1a4be630fb068f251d64b62919f143c49ca8057 (diff) | |
| parent | d1643d24c61b725bef399cc1cf2944b4c9c23177 (diff) | |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.26
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.26: (1090 commits)
  [NET]: Fix and allocate less memory for ->priv'less netdevices
  [IPV6]: Fix dangling references on error in fib6_add().
  [NETLABEL]: Fix NULL deref in netlbl_unlabel_staticlist_gen() if ifindex not found
  [PKT_SCHED]: Fix datalen check in tcf_simp_init().
  [INET]: Uninline the __inet_inherit_port call.
  [INET]: Drop the inet_inherit_port() call.
  SCTP: Initialize partial_bytes_acked to 0, when all of the data is acked.
  [netdrvr] forcedeth: internal simplifications; changelog removal
  phylib: factor out get_phy_id from within get_phy_device
  PHY: add BCM5464 support to broadcom PHY driver
  cxgb3: Fix __must_check warning with dev_dbg.
  tc35815: Statistics cleanup
  natsemi: fix MMIO for PPC 44x platforms
  [TIPC]: Cleanup of TIPC reference table code
  [TIPC]: Optimized initialization of TIPC reference table
  [TIPC]: Remove inlining of reference table locking routines
  e1000: convert uint16_t style integers to u16
  ixgb: convert uint16_t style integers to u16
  sb1000.c: make const arrays static
  sb1000.c: stop inlining largish static functions
  ...
Diffstat (limited to 'net/tipc/ref.c')
| -rw-r--r-- | net/tipc/ref.c | 211 | 
1 files changed, 155 insertions, 56 deletions
| diff --git a/net/tipc/ref.c b/net/tipc/ref.c index c38744c96ed..89cbab24d08 100644 --- a/net/tipc/ref.c +++ b/net/tipc/ref.c @@ -2,7 +2,7 @@   * net/tipc/ref.c: TIPC object registry code   *   * Copyright (c) 1991-2006, Ericsson AB - * Copyright (c) 2004-2005, Wind River Systems + * Copyright (c) 2004-2007, Wind River Systems   * All rights reserved.   *   * Redistribution and use in source and binary forms, with or without @@ -36,32 +36,60 @@  #include "core.h"  #include "ref.h" -#include "port.h" -#include "subscr.h" -#include "name_distr.h" -#include "name_table.h" -#include "config.h" -#include "discover.h" -#include "bearer.h" -#include "node.h" -#include "bcast.h" + +/** + * struct reference - TIPC object reference entry + * @object: pointer to object associated with reference entry + * @lock: spinlock controlling access to object + * @ref: reference value for object (combines instance & array index info) + */ + +struct reference { +	void *object; +	spinlock_t lock; +	u32 ref; +}; + +/** + * struct tipc_ref_table - table of TIPC object reference entries + * @entries: pointer to array of reference entries + * @capacity: array index of first unusable entry + * @init_point: array index of first uninitialized entry + * @first_free: array index of first unused object reference entry + * @last_free: array index of last unused object reference entry + * @index_mask: bitmask for array index portion of reference values + * @start_mask: initial value for instance value portion of reference values + */ + +struct ref_table { +	struct reference *entries; +	u32 capacity; +	u32 init_point; +	u32 first_free; +	u32 last_free; +	u32 index_mask; +	u32 start_mask; +};  /*   * Object reference table consists of 2**N entries.   * - * A used entry has object ptr != 0, reference == XXXX|own index - *				     (XXXX changes each time entry is acquired) - * A free entry has object ptr == 0, reference == YYYY|next free index - *				     (YYYY is one more than last used XXXX) + * State	Object ptr	Reference + * -----        ----------      --------- + * In use        non-NULL       XXXX|own index + *				(XXXX changes each time entry is acquired) + * Free            NULL         YYYY|next free index + *				(YYYY is one more than last used XXXX) + * Uninitialized   NULL         0   * - * Free list is initially chained from entry (2**N)-1 to entry 1. - * Entry 0 is not used to allow index 0 to indicate the end of the free list. + * Entry 0 is not used; this allows index 0 to denote the end of the free list.   * - * Note: Any accidental reference of the form XXXX|0--0 won't match entry 0 - * because entry 0's reference field has the form XXXX|1--1. + * Note that a reference value of 0 does not necessarily indicate that an + * entry is uninitialized, since the last entry in the free list could also + * have a reference value of 0 (although this is unlikely).   */ -struct ref_table tipc_ref_table = { NULL }; +static struct ref_table tipc_ref_table = { NULL };  static DEFINE_RWLOCK(ref_table_lock); @@ -72,29 +100,29 @@ static DEFINE_RWLOCK(ref_table_lock);  int tipc_ref_table_init(u32 requested_size, u32 start)  {  	struct reference *table; -	u32 sz = 1 << 4; -	u32 index_mask; -	int i; +	u32 actual_size; -	while (sz < requested_size) { -		sz <<= 1; -	} -	table = vmalloc(sz * sizeof(*table)); +	/* account for unused entry, then round up size to a power of 2 */ + +	requested_size++; +	for (actual_size = 16; actual_size < requested_size; actual_size <<= 1) +		/* do nothing */ ; + +	/* allocate table & mark all entries as uninitialized */ + +	table = __vmalloc(actual_size * sizeof(struct reference), +			  GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);  	if (table == NULL)  		return -ENOMEM; -	write_lock_bh(&ref_table_lock); -	index_mask = sz - 1; -	for (i = sz - 1; i >= 0; i--) { -		table[i].object = NULL; -		spin_lock_init(&table[i].lock); -		table[i].data.next_plus_upper = (start & ~index_mask) + i - 1; -	}  	tipc_ref_table.entries = table; -	tipc_ref_table.index_mask = index_mask; -	tipc_ref_table.first_free = sz - 1; -	tipc_ref_table.last_free = 1; -	write_unlock_bh(&ref_table_lock); +	tipc_ref_table.capacity = requested_size; +	tipc_ref_table.init_point = 1; +	tipc_ref_table.first_free = 0; +	tipc_ref_table.last_free = 0; +	tipc_ref_table.index_mask = actual_size - 1; +	tipc_ref_table.start_mask = start & ~tipc_ref_table.index_mask; +  	return TIPC_OK;  } @@ -125,7 +153,7 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)  	u32 index;  	u32 index_mask;  	u32 next_plus_upper; -	u32 reference = 0; +	u32 ref;  	if (!object) {  		err("Attempt to acquire reference to non-existent object\n"); @@ -136,6 +164,8 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)  		return 0;  	} +	/* take a free entry, if available; otherwise initialize a new entry */ +  	write_lock_bh(&ref_table_lock);  	if (tipc_ref_table.first_free) {  		index = tipc_ref_table.first_free; @@ -143,17 +173,29 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)  		index_mask = tipc_ref_table.index_mask;  		/* take lock in case a previous user of entry still holds it */  		spin_lock_bh(&entry->lock); -		next_plus_upper = entry->data.next_plus_upper; +		next_plus_upper = entry->ref;  		tipc_ref_table.first_free = next_plus_upper & index_mask; -		reference = (next_plus_upper & ~index_mask) + index; -		entry->data.reference = reference; +		ref = (next_plus_upper & ~index_mask) + index; +		entry->ref = ref;  		entry->object = object; -		if (lock != NULL) -			*lock = &entry->lock;  		spin_unlock_bh(&entry->lock); +		*lock = &entry->lock; +	} +	else if (tipc_ref_table.init_point < tipc_ref_table.capacity) { +		index = tipc_ref_table.init_point++; +		entry = &(tipc_ref_table.entries[index]); +		spin_lock_init(&entry->lock); +		ref = tipc_ref_table.start_mask + index; +		entry->ref = ref; +		entry->object = object; +		*lock = &entry->lock; +	} +	else { +		ref = 0;  	}  	write_unlock_bh(&ref_table_lock); -	return reference; + +	return ref;  }  /** @@ -169,42 +211,99 @@ void tipc_ref_discard(u32 ref)  	u32 index;  	u32 index_mask; -	if (!ref) { -		err("Attempt to discard reference 0\n"); -		return; -	}  	if (!tipc_ref_table.entries) {  		err("Reference table not found during discard attempt\n");  		return;  	} -	write_lock_bh(&ref_table_lock);  	index_mask = tipc_ref_table.index_mask;  	index = ref & index_mask;  	entry = &(tipc_ref_table.entries[index]); +	write_lock_bh(&ref_table_lock); +  	if (!entry->object) {  		err("Attempt to discard reference to non-existent object\n");  		goto exit;  	} -	if (entry->data.reference != ref) { +	if (entry->ref != ref) {  		err("Attempt to discard non-existent reference\n");  		goto exit;  	} -	/* mark entry as unused */ +	/* +	 * mark entry as unused; increment instance part of entry's reference +	 * to invalidate any subsequent references +	 */ +  	entry->object = NULL; +	entry->ref = (ref & ~index_mask) + (index_mask + 1); + +	/* append entry to free entry list */ +  	if (tipc_ref_table.first_free == 0)  		tipc_ref_table.first_free = index;  	else -		/* next_plus_upper is always XXXX|0--0 for last free entry */ -		tipc_ref_table.entries[tipc_ref_table.last_free].data.next_plus_upper -			|= index; +		tipc_ref_table.entries[tipc_ref_table.last_free].ref |= index;  	tipc_ref_table.last_free = index; -	/* increment upper bits of entry to invalidate subsequent references */ -	entry->data.next_plus_upper = (ref & ~index_mask) + (index_mask + 1);  exit:  	write_unlock_bh(&ref_table_lock);  } +/** + * tipc_ref_lock - lock referenced object and return pointer to it + */ + +void *tipc_ref_lock(u32 ref) +{ +	if (likely(tipc_ref_table.entries)) { +		struct reference *entry; + +		entry = &tipc_ref_table.entries[ref & +						tipc_ref_table.index_mask]; +		if (likely(entry->ref != 0)) { +			spin_lock_bh(&entry->lock); +			if (likely((entry->ref == ref) && (entry->object))) +				return entry->object; +			spin_unlock_bh(&entry->lock); +		} +	} +	return NULL; +} + +/** + * tipc_ref_unlock - unlock referenced object + */ + +void tipc_ref_unlock(u32 ref) +{ +	if (likely(tipc_ref_table.entries)) { +		struct reference *entry; + +		entry = &tipc_ref_table.entries[ref & +						tipc_ref_table.index_mask]; +		if (likely((entry->ref == ref) && (entry->object))) +			spin_unlock_bh(&entry->lock); +		else +			err("Attempt to unlock non-existent reference\n"); +	} +} + +/** + * tipc_ref_deref - return pointer referenced object (without locking it) + */ + +void *tipc_ref_deref(u32 ref) +{ +	if (likely(tipc_ref_table.entries)) { +		struct reference *entry; + +		entry = &tipc_ref_table.entries[ref & +						tipc_ref_table.index_mask]; +		if (likely(entry->ref == ref)) +			return entry->object; +	} +	return NULL; +} + | 
