diff options
Diffstat (limited to 'net/tipc/node.c')
| -rw-r--r-- | net/tipc/node.c | 241 | 
1 files changed, 144 insertions, 97 deletions
diff --git a/net/tipc/node.c b/net/tipc/node.c index 6e6c434872e..5b44c3041be 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -2,7 +2,7 @@   * net/tipc/node.c: TIPC node management routines   *   * Copyright (c) 2000-2006, 2012 Ericsson AB - * Copyright (c) 2005-2006, 2010-2011, Wind River Systems + * Copyright (c) 2005-2006, 2010-2014, Wind River Systems   * All rights reserved.   *   * Redistribution and use in source and binary forms, with or without @@ -44,13 +44,11 @@  static void node_lost_contact(struct tipc_node *n_ptr);  static void node_established_contact(struct tipc_node *n_ptr); -static DEFINE_SPINLOCK(node_create_lock); -  static struct hlist_head node_htable[NODE_HTABLE_SIZE];  LIST_HEAD(tipc_node_list);  static u32 tipc_num_nodes; - -static atomic_t tipc_num_links = ATOMIC_INIT(0); +static u32 tipc_num_links; +static DEFINE_SPINLOCK(node_list_lock);  /*   * A trivial power-of-two bitmask technique is used for speed, since this @@ -73,37 +71,26 @@ struct tipc_node *tipc_node_find(u32 addr)  	if (unlikely(!in_own_cluster_exact(addr)))  		return NULL; -	hlist_for_each_entry(node, &node_htable[tipc_hashfn(addr)], hash) { -		if (node->addr == addr) +	rcu_read_lock(); +	hlist_for_each_entry_rcu(node, &node_htable[tipc_hashfn(addr)], hash) { +		if (node->addr == addr) { +			rcu_read_unlock();  			return node; +		}  	} +	rcu_read_unlock();  	return NULL;  } -/** - * tipc_node_create - create neighboring node - * - * Currently, this routine is called by neighbor discovery code, which holds - * net_lock for reading only.  We must take node_create_lock to ensure a node - * isn't created twice if two different bearers discover the node at the same - * time.  (It would be preferable to switch to holding net_lock in write mode, - * but this is a non-trivial change.) - */  struct tipc_node *tipc_node_create(u32 addr)  {  	struct tipc_node *n_ptr, *temp_node; -	spin_lock_bh(&node_create_lock); - -	n_ptr = tipc_node_find(addr); -	if (n_ptr) { -		spin_unlock_bh(&node_create_lock); -		return n_ptr; -	} +	spin_lock_bh(&node_list_lock);  	n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC);  	if (!n_ptr) { -		spin_unlock_bh(&node_create_lock); +		spin_unlock_bh(&node_list_lock);  		pr_warn("Node creation failed, no memory\n");  		return NULL;  	} @@ -114,31 +101,41 @@ struct tipc_node *tipc_node_create(u32 addr)  	INIT_LIST_HEAD(&n_ptr->list);  	INIT_LIST_HEAD(&n_ptr->nsub); -	hlist_add_head(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]); +	hlist_add_head_rcu(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]); -	list_for_each_entry(temp_node, &tipc_node_list, list) { +	list_for_each_entry_rcu(temp_node, &tipc_node_list, list) {  		if (n_ptr->addr < temp_node->addr)  			break;  	} -	list_add_tail(&n_ptr->list, &temp_node->list); -	n_ptr->block_setup = WAIT_PEER_DOWN; +	list_add_tail_rcu(&n_ptr->list, &temp_node->list); +	n_ptr->action_flags = TIPC_WAIT_PEER_LINKS_DOWN;  	n_ptr->signature = INVALID_NODE_SIG;  	tipc_num_nodes++; -	spin_unlock_bh(&node_create_lock); +	spin_unlock_bh(&node_list_lock);  	return n_ptr;  } -void tipc_node_delete(struct tipc_node *n_ptr) +static void tipc_node_delete(struct tipc_node *n_ptr)  { -	list_del(&n_ptr->list); -	hlist_del(&n_ptr->hash); -	kfree(n_ptr); +	list_del_rcu(&n_ptr->list); +	hlist_del_rcu(&n_ptr->hash); +	kfree_rcu(n_ptr, rcu);  	tipc_num_nodes--;  } +void tipc_node_stop(void) +{ +	struct tipc_node *node, *t_node; + +	spin_lock_bh(&node_list_lock); +	list_for_each_entry_safe(node, t_node, &tipc_node_list, list) +		tipc_node_delete(node); +	spin_unlock_bh(&node_list_lock); +} +  /**   * tipc_node_link_up - handle addition of link   * @@ -147,11 +144,13 @@ void tipc_node_delete(struct tipc_node *n_ptr)  void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)  {  	struct tipc_link **active = &n_ptr->active_links[0]; +	u32 addr = n_ptr->addr;  	n_ptr->working_links++; - +	tipc_nametbl_publish(TIPC_LINK_STATE, addr, addr, TIPC_NODE_SCOPE, +			     l_ptr->bearer_id, addr);  	pr_info("Established link <%s> on network plane %c\n", -		l_ptr->name, l_ptr->b_ptr->net_plane); +		l_ptr->name, l_ptr->net_plane);  	if (!active[0]) {  		active[0] = active[1] = l_ptr; @@ -162,7 +161,7 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)  		pr_info("New link <%s> becomes standby\n", l_ptr->name);  		return;  	} -	tipc_link_send_duplicate(active[0], l_ptr); +	tipc_link_dup_queue_xmit(active[0], l_ptr);  	if (l_ptr->priority == active[0]->priority) {  		active[0] = l_ptr;  		return; @@ -206,16 +205,18 @@ static void node_select_active_links(struct tipc_node *n_ptr)  void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)  {  	struct tipc_link **active; +	u32 addr = n_ptr->addr;  	n_ptr->working_links--; +	tipc_nametbl_withdraw(TIPC_LINK_STATE, addr, l_ptr->bearer_id, addr);  	if (!tipc_link_is_active(l_ptr)) {  		pr_info("Lost standby link <%s> on network plane %c\n", -			l_ptr->name, l_ptr->b_ptr->net_plane); +			l_ptr->name, l_ptr->net_plane);  		return;  	}  	pr_info("Lost link <%s> on network plane %c\n", -		l_ptr->name, l_ptr->b_ptr->net_plane); +		l_ptr->name, l_ptr->net_plane);  	active = &n_ptr->active_links[0];  	if (active[0] == l_ptr) @@ -225,7 +226,7 @@ void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)  	if (active[0] == l_ptr)  		node_select_active_links(n_ptr);  	if (tipc_node_is_up(n_ptr)) -		tipc_link_changeover(l_ptr); +		tipc_link_failover_send_queue(l_ptr);  	else  		node_lost_contact(n_ptr);  } @@ -235,11 +236,6 @@ int tipc_node_active_links(struct tipc_node *n_ptr)  	return n_ptr->active_links[0] != NULL;  } -int tipc_node_redundant_links(struct tipc_node *n_ptr) -{ -	return n_ptr->working_links > 1; -} -  int tipc_node_is_up(struct tipc_node *n_ptr)  {  	return tipc_node_active_links(n_ptr); @@ -247,40 +243,36 @@ int tipc_node_is_up(struct tipc_node *n_ptr)  void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)  { -	n_ptr->links[l_ptr->b_ptr->identity] = l_ptr; -	atomic_inc(&tipc_num_links); +	n_ptr->links[l_ptr->bearer_id] = l_ptr; +	spin_lock_bh(&node_list_lock); +	tipc_num_links++; +	spin_unlock_bh(&node_list_lock);  	n_ptr->link_cnt++;  }  void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)  { -	n_ptr->links[l_ptr->b_ptr->identity] = NULL; -	atomic_dec(&tipc_num_links); -	n_ptr->link_cnt--; +	int i; + +	for (i = 0; i < MAX_BEARERS; i++) { +		if (l_ptr != n_ptr->links[i]) +			continue; +		n_ptr->links[i] = NULL; +		spin_lock_bh(&node_list_lock); +		tipc_num_links--; +		spin_unlock_bh(&node_list_lock); +		n_ptr->link_cnt--; +	}  }  static void node_established_contact(struct tipc_node *n_ptr)  { -	tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr); +	n_ptr->action_flags |= TIPC_NOTIFY_NODE_UP;  	n_ptr->bclink.oos_state = 0;  	n_ptr->bclink.acked = tipc_bclink_get_last_sent();  	tipc_bclink_add_node(n_ptr->addr);  } -static void node_name_purge_complete(unsigned long node_addr) -{ -	struct tipc_node *n_ptr; - -	read_lock_bh(&tipc_net_lock); -	n_ptr = tipc_node_find(node_addr); -	if (n_ptr) { -		tipc_node_lock(n_ptr); -		n_ptr->block_setup &= ~WAIT_NAMES_GONE; -		tipc_node_unlock(n_ptr); -	} -	read_unlock_bh(&tipc_net_lock); -} -  static void node_lost_contact(struct tipc_node *n_ptr)  {  	char addr_string[16]; @@ -291,16 +283,12 @@ static void node_lost_contact(struct tipc_node *n_ptr)  	/* Flush broadcast link info associated with lost node */  	if (n_ptr->bclink.recv_permitted) { -		while (n_ptr->bclink.deferred_head) { -			struct sk_buff *buf = n_ptr->bclink.deferred_head; -			n_ptr->bclink.deferred_head = buf->next; -			kfree_skb(buf); -		} +		kfree_skb_list(n_ptr->bclink.deferred_head);  		n_ptr->bclink.deferred_size = 0; -		if (n_ptr->bclink.defragm) { -			kfree_skb(n_ptr->bclink.defragm); -			n_ptr->bclink.defragm = NULL; +		if (n_ptr->bclink.reasm_buf) { +			kfree_skb(n_ptr->bclink.reasm_buf); +			n_ptr->bclink.reasm_buf = NULL;  		}  		tipc_bclink_remove_node(n_ptr->addr); @@ -319,12 +307,13 @@ static void node_lost_contact(struct tipc_node *n_ptr)  		tipc_link_reset_fragments(l_ptr);  	} -	/* Notify subscribers */ -	tipc_nodesub_notify(n_ptr); +	n_ptr->action_flags &= ~TIPC_WAIT_OWN_LINKS_DOWN; -	/* Prevent re-contact with node until cleanup is done */ -	n_ptr->block_setup = WAIT_PEER_DOWN | WAIT_NAMES_GONE; -	tipc_k_signal((Handler)node_name_purge_complete, n_ptr->addr); +	/* Notify subscribers and prevent re-contact with node until +	 * cleanup is done. +	 */ +	n_ptr->action_flags |= TIPC_WAIT_PEER_LINKS_DOWN | +			       TIPC_NOTIFY_NODE_DOWN;  }  struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space) @@ -343,27 +332,28 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)  		return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE  						   " (network address)"); -	read_lock_bh(&tipc_net_lock); +	spin_lock_bh(&node_list_lock);  	if (!tipc_num_nodes) { -		read_unlock_bh(&tipc_net_lock); +		spin_unlock_bh(&node_list_lock);  		return tipc_cfg_reply_none();  	}  	/* For now, get space for all other nodes */  	payload_size = TLV_SPACE(sizeof(node_info)) * tipc_num_nodes;  	if (payload_size > 32768u) { -		read_unlock_bh(&tipc_net_lock); +		spin_unlock_bh(&node_list_lock);  		return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED  						   " (too many nodes)");  	} +	spin_unlock_bh(&node_list_lock); +  	buf = tipc_cfg_reply_alloc(payload_size); -	if (!buf) { -		read_unlock_bh(&tipc_net_lock); +	if (!buf)  		return NULL; -	}  	/* Add TLVs for all nodes in scope */ -	list_for_each_entry(n_ptr, &tipc_node_list, list) { +	rcu_read_lock(); +	list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {  		if (!tipc_in_scope(domain, n_ptr->addr))  			continue;  		node_info.addr = htonl(n_ptr->addr); @@ -371,8 +361,7 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)  		tipc_cfg_append_tlv(buf, TIPC_TLV_NODE_INFO,  				    &node_info, sizeof(node_info));  	} - -	read_unlock_bh(&tipc_net_lock); +	rcu_read_unlock();  	return buf;  } @@ -395,21 +384,19 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)  	if (!tipc_own_addr)  		return tipc_cfg_reply_none(); -	read_lock_bh(&tipc_net_lock); - +	spin_lock_bh(&node_list_lock);  	/* Get space for all unicast links + broadcast link */ -	payload_size = TLV_SPACE(sizeof(link_info)) * -		(atomic_read(&tipc_num_links) + 1); +	payload_size = TLV_SPACE((sizeof(link_info)) * (tipc_num_links + 1));  	if (payload_size > 32768u) { -		read_unlock_bh(&tipc_net_lock); +		spin_unlock_bh(&node_list_lock);  		return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED  						   " (too many links)");  	} +	spin_unlock_bh(&node_list_lock); +  	buf = tipc_cfg_reply_alloc(payload_size); -	if (!buf) { -		read_unlock_bh(&tipc_net_lock); +	if (!buf)  		return NULL; -	}  	/* Add TLV for broadcast link */  	link_info.dest = htonl(tipc_cluster_mask(tipc_own_addr)); @@ -418,7 +405,8 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)  	tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info));  	/* Add TLVs for any other links in scope */ -	list_for_each_entry(n_ptr, &tipc_node_list, list) { +	rcu_read_lock(); +	list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {  		u32 i;  		if (!tipc_in_scope(domain, n_ptr->addr)) @@ -435,7 +423,66 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)  		}  		tipc_node_unlock(n_ptr);  	} - -	read_unlock_bh(&tipc_net_lock); +	rcu_read_unlock();  	return buf;  } + +/** + * tipc_node_get_linkname - get the name of a link + * + * @bearer_id: id of the bearer + * @node: peer node address + * @linkname: link name output buffer + * + * Returns 0 on success + */ +int tipc_node_get_linkname(u32 bearer_id, u32 addr, char *linkname, size_t len) +{ +	struct tipc_link *link; +	struct tipc_node *node = tipc_node_find(addr); + +	if ((bearer_id >= MAX_BEARERS) || !node) +		return -EINVAL; +	tipc_node_lock(node); +	link = node->links[bearer_id]; +	if (link) { +		strncpy(linkname, link->name, len); +		tipc_node_unlock(node); +		return 0; +	} +	tipc_node_unlock(node); +	return -EINVAL; +} + +void tipc_node_unlock(struct tipc_node *node) +{ +	LIST_HEAD(nsub_list); +	struct tipc_link *link; +	int pkt_sz = 0; +	u32 addr = 0; + +	if (likely(!node->action_flags)) { +		spin_unlock_bh(&node->lock); +		return; +	} + +	if (node->action_flags & TIPC_NOTIFY_NODE_DOWN) { +		list_replace_init(&node->nsub, &nsub_list); +		node->action_flags &= ~TIPC_NOTIFY_NODE_DOWN; +	} +	if (node->action_flags & TIPC_NOTIFY_NODE_UP) { +		link = node->active_links[0]; +		node->action_flags &= ~TIPC_NOTIFY_NODE_UP; +		if (link) { +			pkt_sz = ((link->max_pkt - INT_H_SIZE) / ITEM_SIZE) * +				  ITEM_SIZE; +			addr = node->addr; +		} +	} +	spin_unlock_bh(&node->lock); + +	if (!list_empty(&nsub_list)) +		tipc_nodesub_notify(&nsub_list); +	if (pkt_sz) +		tipc_named_node_up(pkt_sz, addr); +}  | 
