diff options
Diffstat (limited to 'net/tipc/node.c')
| -rw-r--r-- | net/tipc/node.c | 119 | 
1 files changed, 62 insertions, 57 deletions
| diff --git a/net/tipc/node.c b/net/tipc/node.c index efe4d41bf11..1d3a4999a70 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -2,7 +2,7 @@   * net/tipc/node.c: TIPC node management routines   *   * Copyright (c) 2000-2006, 2012 Ericsson AB - * Copyright (c) 2005-2006, 2010-2011, Wind River Systems + * Copyright (c) 2005-2006, 2010-2014, Wind River Systems   * All rights reserved.   *   * Redistribution and use in source and binary forms, with or without @@ -44,13 +44,11 @@  static void node_lost_contact(struct tipc_node *n_ptr);  static void node_established_contact(struct tipc_node *n_ptr); -static DEFINE_SPINLOCK(node_create_lock); -  static struct hlist_head node_htable[NODE_HTABLE_SIZE];  LIST_HEAD(tipc_node_list);  static u32 tipc_num_nodes; - -static atomic_t tipc_num_links = ATOMIC_INIT(0); +static u32 tipc_num_links; +static DEFINE_SPINLOCK(node_list_lock);  /*   * A trivial power-of-two bitmask technique is used for speed, since this @@ -73,37 +71,26 @@ struct tipc_node *tipc_node_find(u32 addr)  	if (unlikely(!in_own_cluster_exact(addr)))  		return NULL; -	hlist_for_each_entry(node, &node_htable[tipc_hashfn(addr)], hash) { -		if (node->addr == addr) +	rcu_read_lock(); +	hlist_for_each_entry_rcu(node, &node_htable[tipc_hashfn(addr)], hash) { +		if (node->addr == addr) { +			rcu_read_unlock();  			return node; +		}  	} +	rcu_read_unlock();  	return NULL;  } -/** - * tipc_node_create - create neighboring node - * - * Currently, this routine is called by neighbor discovery code, which holds - * net_lock for reading only.  We must take node_create_lock to ensure a node - * isn't created twice if two different bearers discover the node at the same - * time.  (It would be preferable to switch to holding net_lock in write mode, - * but this is a non-trivial change.) - */  struct tipc_node *tipc_node_create(u32 addr)  {  	struct tipc_node *n_ptr, *temp_node; -	spin_lock_bh(&node_create_lock); - -	n_ptr = tipc_node_find(addr); -	if (n_ptr) { -		spin_unlock_bh(&node_create_lock); -		return n_ptr; -	} +	spin_lock_bh(&node_list_lock);  	n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC);  	if (!n_ptr) { -		spin_unlock_bh(&node_create_lock); +		spin_unlock_bh(&node_list_lock);  		pr_warn("Node creation failed, no memory\n");  		return NULL;  	} @@ -114,31 +101,41 @@ struct tipc_node *tipc_node_create(u32 addr)  	INIT_LIST_HEAD(&n_ptr->list);  	INIT_LIST_HEAD(&n_ptr->nsub); -	hlist_add_head(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]); +	hlist_add_head_rcu(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]); -	list_for_each_entry(temp_node, &tipc_node_list, list) { +	list_for_each_entry_rcu(temp_node, &tipc_node_list, list) {  		if (n_ptr->addr < temp_node->addr)  			break;  	} -	list_add_tail(&n_ptr->list, &temp_node->list); +	list_add_tail_rcu(&n_ptr->list, &temp_node->list);  	n_ptr->block_setup = WAIT_PEER_DOWN;  	n_ptr->signature = INVALID_NODE_SIG;  	tipc_num_nodes++; -	spin_unlock_bh(&node_create_lock); +	spin_unlock_bh(&node_list_lock);  	return n_ptr;  } -void tipc_node_delete(struct tipc_node *n_ptr) +static void tipc_node_delete(struct tipc_node *n_ptr)  { -	list_del(&n_ptr->list); -	hlist_del(&n_ptr->hash); -	kfree(n_ptr); +	list_del_rcu(&n_ptr->list); +	hlist_del_rcu(&n_ptr->hash); +	kfree_rcu(n_ptr, rcu);  	tipc_num_nodes--;  } +void tipc_node_stop(void) +{ +	struct tipc_node *node, *t_node; + +	spin_lock_bh(&node_list_lock); +	list_for_each_entry_safe(node, t_node, &tipc_node_list, list) +		tipc_node_delete(node); +	spin_unlock_bh(&node_list_lock); +} +  /**   * tipc_node_link_up - handle addition of link   * @@ -162,7 +159,7 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)  		pr_info("New link <%s> becomes standby\n", l_ptr->name);  		return;  	} -	tipc_link_dup_send_queue(active[0], l_ptr); +	tipc_link_dup_queue_xmit(active[0], l_ptr);  	if (l_ptr->priority == active[0]->priority) {  		active[0] = l_ptr;  		return; @@ -243,15 +240,25 @@ int tipc_node_is_up(struct tipc_node *n_ptr)  void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)  {  	n_ptr->links[l_ptr->b_ptr->identity] = l_ptr; -	atomic_inc(&tipc_num_links); +	spin_lock_bh(&node_list_lock); +	tipc_num_links++; +	spin_unlock_bh(&node_list_lock);  	n_ptr->link_cnt++;  }  void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)  { -	n_ptr->links[l_ptr->b_ptr->identity] = NULL; -	atomic_dec(&tipc_num_links); -	n_ptr->link_cnt--; +	int i; + +	for (i = 0; i < MAX_BEARERS; i++) { +		if (l_ptr != n_ptr->links[i]) +			continue; +		n_ptr->links[i] = NULL; +		spin_lock_bh(&node_list_lock); +		tipc_num_links--; +		spin_unlock_bh(&node_list_lock); +		n_ptr->link_cnt--; +	}  }  static void node_established_contact(struct tipc_node *n_ptr) @@ -335,27 +342,28 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)  		return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE  						   " (network address)"); -	read_lock_bh(&tipc_net_lock); +	spin_lock_bh(&node_list_lock);  	if (!tipc_num_nodes) { -		read_unlock_bh(&tipc_net_lock); +		spin_unlock_bh(&node_list_lock);  		return tipc_cfg_reply_none();  	}  	/* For now, get space for all other nodes */  	payload_size = TLV_SPACE(sizeof(node_info)) * tipc_num_nodes;  	if (payload_size > 32768u) { -		read_unlock_bh(&tipc_net_lock); +		spin_unlock_bh(&node_list_lock);  		return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED  						   " (too many nodes)");  	} +	spin_unlock_bh(&node_list_lock); +  	buf = tipc_cfg_reply_alloc(payload_size); -	if (!buf) { -		read_unlock_bh(&tipc_net_lock); +	if (!buf)  		return NULL; -	}  	/* Add TLVs for all nodes in scope */ -	list_for_each_entry(n_ptr, &tipc_node_list, list) { +	rcu_read_lock(); +	list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {  		if (!tipc_in_scope(domain, n_ptr->addr))  			continue;  		node_info.addr = htonl(n_ptr->addr); @@ -363,8 +371,7 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)  		tipc_cfg_append_tlv(buf, TIPC_TLV_NODE_INFO,  				    &node_info, sizeof(node_info));  	} - -	read_unlock_bh(&tipc_net_lock); +	rcu_read_unlock();  	return buf;  } @@ -387,21 +394,19 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)  	if (!tipc_own_addr)  		return tipc_cfg_reply_none(); -	read_lock_bh(&tipc_net_lock); - +	spin_lock_bh(&node_list_lock);  	/* Get space for all unicast links + broadcast link */ -	payload_size = TLV_SPACE(sizeof(link_info)) * -		(atomic_read(&tipc_num_links) + 1); +	payload_size = TLV_SPACE((sizeof(link_info)) * (tipc_num_links + 1));  	if (payload_size > 32768u) { -		read_unlock_bh(&tipc_net_lock); +		spin_unlock_bh(&node_list_lock);  		return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED  						   " (too many links)");  	} +	spin_unlock_bh(&node_list_lock); +  	buf = tipc_cfg_reply_alloc(payload_size); -	if (!buf) { -		read_unlock_bh(&tipc_net_lock); +	if (!buf)  		return NULL; -	}  	/* Add TLV for broadcast link */  	link_info.dest = htonl(tipc_cluster_mask(tipc_own_addr)); @@ -410,7 +415,8 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)  	tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info));  	/* Add TLVs for any other links in scope */ -	list_for_each_entry(n_ptr, &tipc_node_list, list) { +	rcu_read_lock(); +	list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {  		u32 i;  		if (!tipc_in_scope(domain, n_ptr->addr)) @@ -427,7 +433,6 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)  		}  		tipc_node_unlock(n_ptr);  	} - -	read_unlock_bh(&tipc_net_lock); +	rcu_read_unlock();  	return buf;  } | 
