aboutsummaryrefslogtreecommitdiff
path: root/net/tipc
diff options
context:
space:
mode:
Diffstat (limited to 'net/tipc')
-rw-r--r--net/tipc/Kconfig78
-rw-r--r--net/tipc/Makefile9
-rw-r--r--net/tipc/addr.c26
-rw-r--r--net/tipc/addr.h40
-rw-r--r--net/tipc/bcast.c881
-rw-r--r--net/tipc/bcast.h38
-rw-r--r--net/tipc/bearer.c799
-rw-r--r--net/tipc/bearer.h216
-rw-r--r--net/tipc/cluster.c550
-rw-r--r--net/tipc/cluster.h92
-rw-r--r--net/tipc/config.c359
-rw-r--r--net/tipc/config.h5
-rw-r--r--net/tipc/core.c193
-rw-r--r--net/tipc/core.h231
-rw-r--r--net/tipc/dbg.c432
-rw-r--r--net/tipc/discover.c431
-rw-r--r--net/tipc/discover.h18
-rw-r--r--net/tipc/eth_media.c317
-rw-r--r--net/tipc/handler.c132
-rw-r--r--net/tipc/ib_media.c101
-rw-r--r--net/tipc/link.c2629
-rw-r--r--net/tipc/link.h207
-rw-r--r--net/tipc/log.c (renamed from net/tipc/user_reg.h)36
-rw-r--r--net/tipc/msg.c397
-rw-r--r--net/tipc/msg.h312
-rw-r--r--net/tipc/name_distr.c245
-rw-r--r--net/tipc/name_distr.h37
-rw-r--r--net/tipc/name_table.c696
-rw-r--r--net/tipc/name_table.h40
-rw-r--r--net/tipc/net.c177
-rw-r--r--net/tipc/net.h21
-rw-r--r--net/tipc/netlink.c31
-rw-r--r--net/tipc/node.c655
-rw-r--r--net/tipc/node.h151
-rw-r--r--net/tipc/node_subscr.c28
-rw-r--r--net/tipc/node_subscr.h4
-rw-r--r--net/tipc/port.c1249
-rw-r--r--net/tipc/port.h278
-rw-r--r--net/tipc/ref.c64
-rw-r--r--net/tipc/ref.h1
-rw-r--r--net/tipc/server.c600
-rw-r--r--net/tipc/server.h92
-rw-r--r--net/tipc/socket.c1598
-rw-r--r--net/tipc/socket.h (renamed from net/tipc/zone.h)56
-rw-r--r--net/tipc/subscr.c459
-rw-r--r--net/tipc/subscr.h33
-rw-r--r--net/tipc/sysctl.c (renamed from net/tipc/dbg.h)55
-rw-r--r--net/tipc/user_reg.c218
-rw-r--r--net/tipc/zone.c159
49 files changed, 5998 insertions, 9478 deletions
diff --git a/net/tipc/Kconfig b/net/tipc/Kconfig
index b74f78d0c03..c890848f9d5 100644
--- a/net/tipc/Kconfig
+++ b/net/tipc/Kconfig
@@ -3,8 +3,8 @@
#
menuconfig TIPC
- tristate "The TIPC Protocol (EXPERIMENTAL)"
- depends on INET && EXPERIMENTAL
+ tristate "The TIPC Protocol"
+ depends on INET
---help---
The Transparent Inter Process Communication (TIPC) protocol is
specially designed for intra cluster communication. This protocol
@@ -20,81 +20,21 @@ menuconfig TIPC
If in doubt, say N.
-if TIPC
-
-config TIPC_ADVANCED
- bool "Advanced TIPC configuration"
- default n
- help
- Saying Y here will open some advanced configuration for TIPC.
- Most users do not need to bother; if unsure, just say N.
-
-config TIPC_ZONES
- int "Maximum number of zones in a network"
- depends on TIPC_ADVANCED
- range 1 255
- default "3"
- help
- Specifies how many zones can be supported in a TIPC network.
- Can range from 1 to 255 zones; default is 3.
-
- Setting this to a smaller value saves some memory;
- setting it to a higher value allows for more zones.
-
-config TIPC_CLUSTERS
- int "Maximum number of clusters in a zone"
- depends on TIPC_ADVANCED
- range 1 1
- default "1"
- help
- Specifies how many clusters can be supported in a TIPC zone.
-
- *** Currently TIPC only supports a single cluster per zone. ***
-
-config TIPC_NODES
- int "Maximum number of nodes in a cluster"
- depends on TIPC_ADVANCED
- range 8 2047
- default "255"
- help
- Specifies how many nodes can be supported in a TIPC cluster.
- Can range from 8 to 2047 nodes; default is 255.
-
- Setting this to a smaller value saves some memory;
- setting it to higher allows for more nodes.
-
config TIPC_PORTS
int "Maximum number of ports in a node"
- depends on TIPC_ADVANCED
+ depends on TIPC
range 127 65535
default "8191"
help
Specifies how many ports can be supported by a node.
Can range from 127 to 65535 ports; default is 8191.
- Setting this to a smaller value saves some memory,
+ Setting this to a smaller value saves some memory,
setting it to higher allows for more ports.
-config TIPC_LOG
- int "Size of log buffer"
- depends on TIPC_ADVANCED
- range 0 32768
- default "0"
- help
- Size (in bytes) of TIPC's internal log buffer, which records the
- occurrence of significant events. Can range from 0 to 32768 bytes;
- default is 0.
-
- There is no need to enable the log buffer unless the node will be
- managed remotely via TIPC.
-
-config TIPC_DEBUG
- bool "Enable debug messages"
- default n
+config TIPC_MEDIA_IB
+ bool "InfiniBand media type support"
+ depends on TIPC && INFINIBAND_IPOIB
help
- This enables debugging of TIPC.
-
- Only say Y here if you are having trouble with TIPC. It will
- enable the display of detailed information about what is going on.
-
-endif # TIPC
+ Saying Y here will enable support for running TIPC on
+ IP-over-InfiniBand devices.
diff --git a/net/tipc/Makefile b/net/tipc/Makefile
index dceb7027946..a080c66d819 100644
--- a/net/tipc/Makefile
+++ b/net/tipc/Makefile
@@ -4,10 +4,11 @@
obj-$(CONFIG_TIPC) := tipc.o
-tipc-y += addr.o bcast.o bearer.o config.o cluster.o \
- core.o handler.o link.o discover.o msg.o \
+tipc-y += addr.o bcast.o bearer.o config.o \
+ core.o link.o discover.o msg.o \
name_distr.o subscr.o name_table.o net.o \
netlink.o node.o node_subscr.o port.o ref.o \
- socket.o user_reg.o zone.o dbg.o eth_media.o
+ socket.o log.o eth_media.o server.o
-# End of file
+tipc-$(CONFIG_TIPC_MEDIA_IB) += ib_media.o
+tipc-$(CONFIG_SYSCTL) += sysctl.o
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
index 886715a7525..357b74b26f9 100644
--- a/net/tipc/addr.c
+++ b/net/tipc/addr.c
@@ -2,7 +2,7 @@
* net/tipc/addr.c: TIPC address utility routines
*
* Copyright (c) 2000-2006, Ericsson AB
- * Copyright (c) 2004-2005, Wind River Systems
+ * Copyright (c) 2004-2005, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -36,33 +36,20 @@
#include "core.h"
#include "addr.h"
-#include "zone.h"
-#include "cluster.h"
/**
* tipc_addr_domain_valid - validates a network domain address
*
* Accepts <Z.C.N>, <Z.C.0>, <Z.0.0>, and <0.0.0>,
- * where Z, C, and N are non-zero and do not exceed the configured limits.
+ * where Z, C, and N are non-zero.
*
* Returns 1 if domain address is valid, otherwise 0
*/
-
int tipc_addr_domain_valid(u32 addr)
{
u32 n = tipc_node(addr);
u32 c = tipc_cluster(addr);
u32 z = tipc_zone(addr);
- u32 max_nodes = tipc_max_nodes;
-
- if (is_slave(addr))
- max_nodes = LOWEST_SLAVE + tipc_max_slaves;
- if (n > max_nodes)
- return 0;
- if (c > tipc_max_clusters)
- return 0;
- if (z > tipc_max_zones)
- return 0;
if (n && (!z || !c))
return 0;
@@ -74,12 +61,10 @@ int tipc_addr_domain_valid(u32 addr)
/**
* tipc_addr_node_valid - validates a proposed network address for this node
*
- * Accepts <Z.C.N>, where Z, C, and N are non-zero and do not exceed
- * the configured limits.
+ * Accepts <Z.C.N>, where Z, C, and N are non-zero.
*
* Returns 1 if address can be used, otherwise 0
*/
-
int tipc_addr_node_valid(u32 addr)
{
return tipc_addr_domain_valid(addr) && tipc_node(addr);
@@ -89,9 +74,9 @@ int tipc_in_scope(u32 domain, u32 addr)
{
if (!domain || (domain == addr))
return 1;
- if (domain == (addr & 0xfffff000u)) /* domain <Z.C.0> */
+ if (domain == tipc_cluster_mask(addr)) /* domain <Z.C.0> */
return 1;
- if (domain == (addr & 0xff000000u)) /* domain <Z.0.0> */
+ if (domain == tipc_zone_mask(addr)) /* domain <Z.0.0> */
return 1;
return 0;
}
@@ -99,7 +84,6 @@ int tipc_in_scope(u32 domain, u32 addr)
/**
* tipc_addr_scope - convert message lookup domain to a 2-bit scope value
*/
-
int tipc_addr_scope(u32 domain)
{
if (likely(!domain))
diff --git a/net/tipc/addr.h b/net/tipc/addr.h
index c1cc5724d8c..a74acf9ee80 100644
--- a/net/tipc/addr.h
+++ b/net/tipc/addr.h
@@ -37,34 +37,40 @@
#ifndef _TIPC_ADDR_H
#define _TIPC_ADDR_H
-static inline u32 own_node(void)
-{
- return tipc_node(tipc_own_addr);
-}
+#include "core.h"
+
+#define TIPC_ZONE_MASK 0xff000000u
+#define TIPC_CLUSTER_MASK 0xfffff000u
-static inline u32 own_cluster(void)
+static inline u32 tipc_zone_mask(u32 addr)
{
- return tipc_cluster(tipc_own_addr);
+ return addr & TIPC_ZONE_MASK;
}
-static inline u32 own_zone(void)
+static inline u32 tipc_cluster_mask(u32 addr)
{
- return tipc_zone(tipc_own_addr);
+ return addr & TIPC_CLUSTER_MASK;
}
-static inline int in_own_cluster(u32 addr)
+static inline int in_own_cluster_exact(u32 addr)
{
return !((addr ^ tipc_own_addr) >> 12);
}
-static inline int is_slave(u32 addr)
+/**
+ * in_own_node - test for node inclusion; <0.0.0> always matches
+ */
+static inline int in_own_node(u32 addr)
{
- return addr & 0x800;
+ return (addr == tipc_own_addr) || !addr;
}
-static inline int may_route(u32 addr)
+/**
+ * in_own_cluster - test for cluster inclusion; <0.0.0> always matches
+ */
+static inline int in_own_cluster(u32 addr)
{
- return(addr ^ tipc_own_addr) >> 11;
+ return in_own_cluster_exact(addr) || !addr;
}
/**
@@ -73,15 +79,13 @@ static inline int may_route(u32 addr)
* Needed when address of a named message must be looked up a second time
* after a network hop.
*/
-
-static inline int addr_domain(int sc)
+static inline u32 addr_domain(u32 sc)
{
if (likely(sc == TIPC_NODE_SCOPE))
return tipc_own_addr;
if (sc == TIPC_CLUSTER_SCOPE)
- return tipc_addr(tipc_zone(tipc_own_addr),
- tipc_cluster(tipc_own_addr), 0);
- return tipc_addr(tipc_zone(tipc_own_addr), 0, 0);
+ return tipc_cluster_mask(tipc_own_addr);
+ return tipc_zone_mask(tipc_own_addr);
}
int tipc_addr_domain_valid(u32);
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 6d828d9eda4..55c6c9d3e1c 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -3,7 +3,7 @@
*
* Copyright (c) 2004-2006, Ericsson AB
* Copyright (c) 2004, Intel Corporation.
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -38,24 +38,15 @@
#include "core.h"
#include "link.h"
#include "port.h"
-#include "name_distr.h"
#include "bcast.h"
+#include "name_distr.h"
-#define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
-
-#define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */
-
-#define BCLINK_LOG_BUF_SIZE 0
-
-/*
- * Loss rate for incoming broadcast frames; used to test retransmission code.
- * Set to N to cause every N'th frame to be discarded; 0 => don't discard any.
- */
-
-#define TIPC_BCAST_LOSS_RATE 0
+#define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
+#define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */
+#define BCBEARER MAX_BEARERS
/**
- * struct bcbearer_pair - a pair of bearers used by broadcast link
+ * struct tipc_bcbearer_pair - a pair of bearers used by broadcast link
* @primary: pointer to primary bearer
* @secondary: pointer to secondary bearer
*
@@ -63,13 +54,13 @@
* to be paired.
*/
-struct bcbearer_pair {
- struct bearer *primary;
- struct bearer *secondary;
+struct tipc_bcbearer_pair {
+ struct tipc_bearer *primary;
+ struct tipc_bearer *secondary;
};
/**
- * struct bcbearer - bearer used by broadcast link
+ * struct tipc_bcbearer - bearer used by broadcast link
* @bearer: (non-standard) broadcast bearer structure
* @media: (non-standard) broadcast media structure
* @bpairs: array of bearer pairs
@@ -80,46 +71,76 @@ struct bcbearer_pair {
* Note: The fields labelled "temporary" are incorporated into the bearer
* to avoid consuming potentially limited stack space through the use of
* large local variables within multicast routines. Concurrent access is
- * prevented through use of the spinlock "bc_lock".
+ * prevented through use of the spinlock "bclink_lock".
*/
-
-struct bcbearer {
- struct bearer bearer;
- struct media media;
- struct bcbearer_pair bpairs[MAX_BEARERS];
- struct bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
+struct tipc_bcbearer {
+ struct tipc_bearer bearer;
+ struct tipc_media media;
+ struct tipc_bcbearer_pair bpairs[MAX_BEARERS];
+ struct tipc_bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
struct tipc_node_map remains;
struct tipc_node_map remains_new;
};
/**
- * struct bclink - link used for broadcast messages
+ * struct tipc_bclink - link used for broadcast messages
+ * @lock: spinlock governing access to structure
* @link: (non-standard) broadcast link structure
* @node: (non-standard) node structure representing b'cast link's peer node
+ * @flags: represent bclink states
+ * @bcast_nodes: map of broadcast-capable nodes
+ * @retransmit_to: node that most recently requested a retransmit
*
* Handles sequence numbering, fragmentation, bundling, etc.
*/
-
-struct bclink {
- struct link link;
+struct tipc_bclink {
+ spinlock_t lock;
+ struct tipc_link link;
struct tipc_node node;
+ unsigned int flags;
+ struct tipc_node_map bcast_nodes;
+ struct tipc_node *retransmit_to;
};
-
-static struct bcbearer *bcbearer = NULL;
-static struct bclink *bclink = NULL;
-static struct link *bcl = NULL;
-static DEFINE_SPINLOCK(bc_lock);
+static struct tipc_bcbearer *bcbearer;
+static struct tipc_bclink *bclink;
+static struct tipc_link *bcl;
const char tipc_bclink_name[] = "broadcast-link";
static void tipc_nmap_diff(struct tipc_node_map *nm_a,
struct tipc_node_map *nm_b,
struct tipc_node_map *nm_diff);
+static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
+static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
-static u32 buf_seqno(struct sk_buff *buf)
+static void tipc_bclink_lock(void)
{
- return msg_seqno(buf_msg(buf));
+ spin_lock_bh(&bclink->lock);
+}
+
+static void tipc_bclink_unlock(void)
+{
+ struct tipc_node *node = NULL;
+
+ if (likely(!bclink->flags)) {
+ spin_unlock_bh(&bclink->lock);
+ return;
+ }
+
+ if (bclink->flags & TIPC_BCLINK_RESET) {
+ bclink->flags &= ~TIPC_BCLINK_RESET;
+ node = tipc_bclink_retransmit_to();
+ }
+ spin_unlock_bh(&bclink->lock);
+
+ if (node)
+ tipc_link_reset_all(node);
+}
+
+void tipc_bclink_set_flags(unsigned int flags)
+{
+ bclink->flags |= flags;
}
static u32 bcbuf_acks(struct sk_buff *buf)
@@ -137,6 +158,19 @@ static void bcbuf_decr_acks(struct sk_buff *buf)
bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
}
+void tipc_bclink_add_node(u32 addr)
+{
+ tipc_bclink_lock();
+ tipc_nmap_add(&bclink->bcast_nodes, addr);
+ tipc_bclink_unlock();
+}
+
+void tipc_bclink_remove_node(u32 addr)
+{
+ tipc_bclink_lock();
+ tipc_nmap_remove(&bclink->bcast_nodes, addr);
+ tipc_bclink_unlock();
+}
static void bclink_set_last_sent(void)
{
@@ -151,54 +185,37 @@ u32 tipc_bclink_get_last_sent(void)
return bcl->fsm_msg_cnt;
}
-/**
- * bclink_set_gap - set gap according to contents of current deferred pkt queue
- *
- * Called with 'node' locked, bc_lock unlocked
- */
-
-static void bclink_set_gap(struct tipc_node *n_ptr)
+static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
{
- struct sk_buff *buf = n_ptr->bclink.deferred_head;
-
- n_ptr->bclink.gap_after = n_ptr->bclink.gap_to =
- mod(n_ptr->bclink.last_in);
- if (unlikely(buf != NULL))
- n_ptr->bclink.gap_to = mod(buf_seqno(buf) - 1);
+ node->bclink.last_sent = less_eq(node->bclink.last_sent, seqno) ?
+ seqno : node->bclink.last_sent;
}
+
/**
- * bclink_ack_allowed - test if ACK or NACK message can be sent at this moment
+ * tipc_bclink_retransmit_to - get most recent node to request retransmission
*
- * This mechanism endeavours to prevent all nodes in network from trying
- * to ACK or NACK at the same time.
- *
- * Note: TIPC uses a different trigger to distribute ACKs than it does to
- * distribute NACKs, but tries to use the same spacing (divide by 16).
+ * Called with bclink_lock locked
*/
-
-static int bclink_ack_allowed(u32 n)
+struct tipc_node *tipc_bclink_retransmit_to(void)
{
- return (n % TIPC_MIN_LINK_WIN) == tipc_own_tag;
+ return bclink->retransmit_to;
}
-
/**
* bclink_retransmit_pkt - retransmit broadcast packets
* @after: sequence number of last packet to *not* retransmit
* @to: sequence number of last packet to retransmit
*
- * Called with bc_lock locked
+ * Called with bclink_lock locked
*/
-
static void bclink_retransmit_pkt(u32 after, u32 to)
{
struct sk_buff *buf;
buf = bcl->first_out;
- while (buf && less_eq(buf_seqno(buf), after)) {
+ while (buf && less_eq(buf_seqno(buf), after))
buf = buf->next;
- }
tipc_link_retransmit(bcl, buf, mod(to - after));
}
@@ -207,36 +224,63 @@ static void bclink_retransmit_pkt(u32 after, u32 to)
* @n_ptr: node that sent acknowledgement info
* @acked: broadcast sequence # that has been acknowledged
*
- * Node is locked, bc_lock unlocked.
+ * Node is locked, bclink_lock unlocked.
*/
-
void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
{
struct sk_buff *crs;
struct sk_buff *next;
unsigned int released = 0;
- if (less_eq(acked, n_ptr->bclink.acked))
- return;
+ tipc_bclink_lock();
+ /* Bail out if tx queue is empty (no clean up is required) */
+ crs = bcl->first_out;
+ if (!crs)
+ goto exit;
- spin_lock_bh(&bc_lock);
+ /* Determine which messages need to be acknowledged */
+ if (acked == INVALID_LINK_SEQ) {
+ /*
+ * Contact with specified node has been lost, so need to
+ * acknowledge sent messages only (if other nodes still exist)
+ * or both sent and unsent messages (otherwise)
+ */
+ if (bclink->bcast_nodes.count)
+ acked = bcl->fsm_msg_cnt;
+ else
+ acked = bcl->next_out_no;
+ } else {
+ /*
+ * Bail out if specified sequence number does not correspond
+ * to a message that has been sent and not yet acknowledged
+ */
+ if (less(acked, buf_seqno(crs)) ||
+ less(bcl->fsm_msg_cnt, acked) ||
+ less_eq(acked, n_ptr->bclink.acked))
+ goto exit;
+ }
/* Skip over packets that node has previously acknowledged */
-
- crs = bcl->first_out;
- while (crs && less_eq(buf_seqno(crs), n_ptr->bclink.acked)) {
+ while (crs && less_eq(buf_seqno(crs), n_ptr->bclink.acked))
crs = crs->next;
- }
/* Update packets that node is now acknowledging */
while (crs && less_eq(buf_seqno(crs), acked)) {
next = crs->next;
- bcbuf_decr_acks(crs);
+
+ if (crs != bcl->next_out)
+ bcbuf_decr_acks(crs);
+ else {
+ bcbuf_set_acks(crs, 0);
+ bcl->next_out = next;
+ bclink_set_last_sent();
+ }
+
if (bcbuf_acks(crs) == 0) {
bcl->first_out = next;
bcl->out_queue_size--;
- buf_discard(crs);
+ kfree_skb(crs);
released = 1;
}
crs = next;
@@ -251,297 +295,305 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
}
if (unlikely(released && !list_empty(&bcl->waiting_ports)))
tipc_link_wakeup_ports(bcl, 0);
- spin_unlock_bh(&bc_lock);
+exit:
+ tipc_bclink_unlock();
}
/**
- * bclink_send_ack - unicast an ACK msg
+ * tipc_bclink_update_link_state - update broadcast link state
*
- * tipc_net_lock and node lock set
+ * RCU and node lock set
*/
-
-static void bclink_send_ack(struct tipc_node *n_ptr)
+void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
{
- struct link *l_ptr = n_ptr->active_links[n_ptr->addr & 1];
+ struct sk_buff *buf;
- if (l_ptr != NULL)
- tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
-}
+ /* Ignore "stale" link state info */
-/**
- * bclink_send_nack- broadcast a NACK msg
- *
- * tipc_net_lock and node lock set
- */
+ if (less_eq(last_sent, n_ptr->bclink.last_in))
+ return;
-static void bclink_send_nack(struct tipc_node *n_ptr)
-{
- struct sk_buff *buf;
- struct tipc_msg *msg;
+ /* Update link synchronization state; quit if in sync */
- if (!less(n_ptr->bclink.gap_after, n_ptr->bclink.gap_to))
- return;
+ bclink_update_last_sent(n_ptr, last_sent);
- buf = tipc_buf_acquire(INT_H_SIZE);
- if (buf) {
- msg = buf_msg(buf);
- tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
- INT_H_SIZE, n_ptr->addr);
- msg_set_mc_netid(msg, tipc_net_id);
- msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in));
- msg_set_bcgap_after(msg, n_ptr->bclink.gap_after);
- msg_set_bcgap_to(msg, n_ptr->bclink.gap_to);
- msg_set_bcast_tag(msg, tipc_own_tag);
-
- if (tipc_bearer_send(&bcbearer->bearer, buf, NULL)) {
- bcl->stats.sent_nacks++;
- buf_discard(buf);
- } else {
- tipc_bearer_schedule(bcl->b_ptr, bcl);
- bcl->proto_msg_queue = buf;
- bcl->stats.bearer_congs++;
- }
+ if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in)
+ return;
- /*
- * Ensure we doesn't send another NACK msg to the node
- * until 16 more deferred messages arrive from it
- * (i.e. helps prevent all nodes from NACK'ing at same time)
- */
+ /* Update out-of-sync state; quit if loss is still unconfirmed */
- n_ptr->bclink.nack_sync = tipc_own_tag;
+ if ((++n_ptr->bclink.oos_state) == 1) {
+ if (n_ptr->bclink.deferred_size < (TIPC_MIN_LINK_WIN / 2))
+ return;
+ n_ptr->bclink.oos_state++;
}
-}
-/**
- * tipc_bclink_check_gap - send a NACK if a sequence gap exists
- *
- * tipc_net_lock and node lock set
- */
+ /* Don't NACK if one has been recently sent (or seen) */
-void tipc_bclink_check_gap(struct tipc_node *n_ptr, u32 last_sent)
-{
- if (!n_ptr->bclink.supported ||
- less_eq(last_sent, mod(n_ptr->bclink.last_in)))
+ if (n_ptr->bclink.oos_state & 0x1)
return;
- bclink_set_gap(n_ptr);
- if (n_ptr->bclink.gap_after == n_ptr->bclink.gap_to)
- n_ptr->bclink.gap_to = last_sent;
- bclink_send_nack(n_ptr);
+ /* Send NACK */
+
+ buf = tipc_buf_acquire(INT_H_SIZE);
+ if (buf) {
+ struct tipc_msg *msg = buf_msg(buf);
+
+ tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
+ INT_H_SIZE, n_ptr->addr);
+ msg_set_non_seq(msg, 1);
+ msg_set_mc_netid(msg, tipc_net_id);
+ msg_set_bcast_ack(msg, n_ptr->bclink.last_in);
+ msg_set_bcgap_after(msg, n_ptr->bclink.last_in);
+ msg_set_bcgap_to(msg, n_ptr->bclink.deferred_head
+ ? buf_seqno(n_ptr->bclink.deferred_head) - 1
+ : n_ptr->bclink.last_sent);
+
+ tipc_bclink_lock();
+ tipc_bearer_send(MAX_BEARERS, buf, NULL);
+ bcl->stats.sent_nacks++;
+ tipc_bclink_unlock();
+ kfree_skb(buf);
+
+ n_ptr->bclink.oos_state++;
+ }
}
/**
- * tipc_bclink_peek_nack - process a NACK msg meant for another node
+ * bclink_peek_nack - monitor retransmission requests sent by other nodes
*
- * Only tipc_net_lock set.
+ * Delay any upcoming NACK by this node if another node has already
+ * requested the first message this node is going to ask for.
*/
-
-static void tipc_bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to)
+static void bclink_peek_nack(struct tipc_msg *msg)
{
- struct tipc_node *n_ptr = tipc_node_find(dest);
- u32 my_after, my_to;
+ struct tipc_node *n_ptr = tipc_node_find(msg_destnode(msg));
- if (unlikely(!n_ptr || !tipc_node_is_up(n_ptr)))
+ if (unlikely(!n_ptr))
return;
+
tipc_node_lock(n_ptr);
- /*
- * Modify gap to suppress unnecessary NACKs from this node
- */
- my_after = n_ptr->bclink.gap_after;
- my_to = n_ptr->bclink.gap_to;
-
- if (less_eq(gap_after, my_after)) {
- if (less(my_after, gap_to) && less(gap_to, my_to))
- n_ptr->bclink.gap_after = gap_to;
- else if (less_eq(my_to, gap_to))
- n_ptr->bclink.gap_to = n_ptr->bclink.gap_after;
- } else if (less_eq(gap_after, my_to)) {
- if (less_eq(my_to, gap_to))
- n_ptr->bclink.gap_to = gap_after;
- } else {
- /*
- * Expand gap if missing bufs not in deferred queue:
- */
- struct sk_buff *buf = n_ptr->bclink.deferred_head;
- u32 prev = n_ptr->bclink.gap_to;
- for (; buf; buf = buf->next) {
- u32 seqno = buf_seqno(buf);
+ if (n_ptr->bclink.recv_permitted &&
+ (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) &&
+ (n_ptr->bclink.last_in == msg_bcgap_after(msg)))
+ n_ptr->bclink.oos_state = 2;
- if (mod(seqno - prev) != 1) {
- buf = NULL;
- break;
- }
- if (seqno == gap_after)
- break;
- prev = seqno;
- }
- if (buf == NULL)
- n_ptr->bclink.gap_to = gap_after;
- }
- /*
- * Some nodes may send a complementary NACK now:
- */
- if (bclink_ack_allowed(sender_tag + 1)) {
- if (n_ptr->bclink.gap_to != n_ptr->bclink.gap_after) {
- bclink_send_nack(n_ptr);
- bclink_set_gap(n_ptr);
- }
- }
tipc_node_unlock(n_ptr);
}
-/**
- * tipc_bclink_send_msg - broadcast a packet to all nodes in cluster
+/*
+ * tipc_bclink_xmit - broadcast a packet to all nodes in cluster
*/
-
-int tipc_bclink_send_msg(struct sk_buff *buf)
+int tipc_bclink_xmit(struct sk_buff *buf)
{
int res;
- spin_lock_bh(&bc_lock);
+ tipc_bclink_lock();
- res = tipc_link_send_buf(bcl, buf);
- if (unlikely(res == -ELINKCONG))
- buf_discard(buf);
- else
- bclink_set_last_sent();
-
- if (bcl->out_queue_size > bcl->stats.max_queue_sz)
- bcl->stats.max_queue_sz = bcl->out_queue_size;
- bcl->stats.queue_sz_counts++;
- bcl->stats.accu_queue_sz += bcl->out_queue_size;
+ if (!bclink->bcast_nodes.count) {
+ res = msg_data_sz(buf_msg(buf));
+ kfree_skb(buf);
+ goto exit;
+ }
- spin_unlock_bh(&bc_lock);
+ res = __tipc_link_xmit(bcl, buf);
+ if (likely(res >= 0)) {
+ bclink_set_last_sent();
+ bcl->stats.queue_sz_counts++;
+ bcl->stats.accu_queue_sz += bcl->out_queue_size;
+ }
+exit:
+ tipc_bclink_unlock();
return res;
}
/**
- * tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards
+ * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet
*
- * tipc_net_lock is read_locked, no other locks set
+ * Called with both sending node's lock and bclink_lock taken.
*/
+static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
+{
+ bclink_update_last_sent(node, seqno);
+ node->bclink.last_in = seqno;
+ node->bclink.oos_state = 0;
+ bcl->stats.recv_info++;
-void tipc_bclink_recv_pkt(struct sk_buff *buf)
+ /*
+ * Unicast an ACK periodically, ensuring that
+ * all nodes in the cluster don't ACK at the same time
+ */
+
+ if (((seqno - tipc_own_addr) % TIPC_MIN_LINK_WIN) == 0) {
+ tipc_link_proto_xmit(node->active_links[node->addr & 1],
+ STATE_MSG, 0, 0, 0, 0, 0);
+ bcl->stats.sent_acks++;
+ }
+}
+
+/**
+ * tipc_bclink_rcv - receive a broadcast packet, and deliver upwards
+ *
+ * RCU is locked, no other locks set
+ */
+void tipc_bclink_rcv(struct sk_buff *buf)
{
-#if (TIPC_BCAST_LOSS_RATE)
- static int rx_count = 0;
-#endif
struct tipc_msg *msg = buf_msg(buf);
- struct tipc_node* node = tipc_node_find(msg_prevnode(msg));
+ struct tipc_node *node;
u32 next_in;
u32 seqno;
- struct sk_buff *deferred;
+ int deferred;
- msg_dbg(msg, "<BC<<<");
+ /* Screen out unwanted broadcast messages */
- if (unlikely(!node || !tipc_node_is_up(node) || !node->bclink.supported ||
- (msg_mc_netid(msg) != tipc_net_id))) {
- buf_discard(buf);
- return;
- }
+ if (msg_mc_netid(msg) != tipc_net_id)
+ goto exit;
+
+ node = tipc_node_find(msg_prevnode(msg));
+ if (unlikely(!node))
+ goto exit;
+
+ tipc_node_lock(node);
+ if (unlikely(!node->bclink.recv_permitted))
+ goto unlock;
+
+ /* Handle broadcast protocol message */
if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
- msg_dbg(msg, "<BCNACK<<<");
+ if (msg_type(msg) != STATE_MSG)
+ goto unlock;
if (msg_destnode(msg) == tipc_own_addr) {
- tipc_node_lock(node);
tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
tipc_node_unlock(node);
- spin_lock_bh(&bc_lock);
+ tipc_bclink_lock();
bcl->stats.recv_nacks++;
- bcl->owner->next = node; /* remember requestor */
+ bclink->retransmit_to = node;
bclink_retransmit_pkt(msg_bcgap_after(msg),
msg_bcgap_to(msg));
- bcl->owner->next = NULL;
- spin_unlock_bh(&bc_lock);
+ tipc_bclink_unlock();
} else {
- tipc_bclink_peek_nack(msg_destnode(msg),
- msg_bcast_tag(msg),
- msg_bcgap_after(msg),
- msg_bcgap_to(msg));
+ tipc_node_unlock(node);
+ bclink_peek_nack(msg);
}
- buf_discard(buf);
- return;
+ goto exit;
}
-#if (TIPC_BCAST_LOSS_RATE)
- if (++rx_count == TIPC_BCAST_LOSS_RATE) {
- rx_count = 0;
- buf_discard(buf);
- return;
- }
-#endif
+ /* Handle in-sequence broadcast message */
- tipc_node_lock(node);
-receive:
- deferred = node->bclink.deferred_head;
- next_in = mod(node->bclink.last_in + 1);
seqno = msg_seqno(msg);
+ next_in = mod(node->bclink.last_in + 1);
if (likely(seqno == next_in)) {
- bcl->stats.recv_info++;
- node->bclink.last_in++;
- bclink_set_gap(node);
- if (unlikely(bclink_ack_allowed(seqno))) {
- bclink_send_ack(node);
- bcl->stats.sent_acks++;
- }
+receive:
+ /* Deliver message to destination */
+
if (likely(msg_isdata(msg))) {
+ tipc_bclink_lock();
+ bclink_accept_pkt(node, seqno);
+ tipc_bclink_unlock();
tipc_node_unlock(node);
- tipc_port_recv_mcast(buf, NULL);
+ if (likely(msg_mcast(msg)))
+ tipc_port_mcast_rcv(buf, NULL);
+ else
+ kfree_skb(buf);
} else if (msg_user(msg) == MSG_BUNDLER) {
+ tipc_bclink_lock();
+ bclink_accept_pkt(node, seqno);
bcl->stats.recv_bundles++;
bcl->stats.recv_bundled += msg_msgcnt(msg);
+ tipc_bclink_unlock();
tipc_node_unlock(node);
- tipc_link_recv_bundle(buf);
+ tipc_link_bundle_rcv(buf);
} else if (msg_user(msg) == MSG_FRAGMENTER) {
+ tipc_buf_append(&node->bclink.reasm_buf, &buf);
+ if (unlikely(!buf && !node->bclink.reasm_buf))
+ goto unlock;
+ tipc_bclink_lock();
+ bclink_accept_pkt(node, seqno);
bcl->stats.recv_fragments++;
- if (tipc_link_recv_fragment(&node->bclink.defragm,
- &buf, &msg))
+ if (buf) {
bcl->stats.recv_fragmented++;
+ msg = buf_msg(buf);
+ tipc_bclink_unlock();
+ goto receive;
+ }
+ tipc_bclink_unlock();
tipc_node_unlock(node);
- tipc_net_route_msg(buf);
+ } else if (msg_user(msg) == NAME_DISTRIBUTOR) {
+ tipc_bclink_lock();
+ bclink_accept_pkt(node, seqno);
+ tipc_bclink_unlock();
+ tipc_node_unlock(node);
+ tipc_named_rcv(buf);
} else {
+ tipc_bclink_lock();
+ bclink_accept_pkt(node, seqno);
+ tipc_bclink_unlock();
tipc_node_unlock(node);
- tipc_net_route_msg(buf);
- }
- if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) {
- tipc_node_lock(node);
- buf = deferred;
- msg = buf_msg(buf);
- node->bclink.deferred_head = deferred->next;
- goto receive;
- }
- return;
- } else if (less(next_in, seqno)) {
- u32 gap_after = node->bclink.gap_after;
- u32 gap_to = node->bclink.gap_to;
-
- if (tipc_link_defer_pkt(&node->bclink.deferred_head,
- &node->bclink.deferred_tail,
- buf)) {
- node->bclink.nack_sync++;
- bcl->stats.deferred_recv++;
- if (seqno == mod(gap_after + 1))
- node->bclink.gap_after = seqno;
- else if (less(gap_after, seqno) && less(seqno, gap_to))
- node->bclink.gap_to = seqno;
+ kfree_skb(buf);
}
- if (bclink_ack_allowed(node->bclink.nack_sync)) {
- if (gap_to != gap_after)
- bclink_send_nack(node);
- bclink_set_gap(node);
+ buf = NULL;
+
+ /* Determine new synchronization state */
+
+ tipc_node_lock(node);
+ if (unlikely(!tipc_node_is_up(node)))
+ goto unlock;
+
+ if (node->bclink.last_in == node->bclink.last_sent)
+ goto unlock;
+
+ if (!node->bclink.deferred_head) {
+ node->bclink.oos_state = 1;
+ goto unlock;
}
- } else {
- bcl->stats.duplicates++;
- buf_discard(buf);
+
+ msg = buf_msg(node->bclink.deferred_head);
+ seqno = msg_seqno(msg);
+ next_in = mod(next_in + 1);
+ if (seqno != next_in)
+ goto unlock;
+
+ /* Take in-sequence message from deferred queue & deliver it */
+
+ buf = node->bclink.deferred_head;
+ node->bclink.deferred_head = buf->next;
+ buf->next = NULL;
+ node->bclink.deferred_size--;
+ goto receive;
}
+
+ /* Handle out-of-sequence broadcast message */
+
+ if (less(next_in, seqno)) {
+ deferred = tipc_link_defer_pkt(&node->bclink.deferred_head,
+ &node->bclink.deferred_tail,
+ buf);
+ node->bclink.deferred_size += deferred;
+ bclink_update_last_sent(node, seqno);
+ buf = NULL;
+ } else
+ deferred = 0;
+
+ tipc_bclink_lock();
+
+ if (deferred)
+ bcl->stats.deferred_recv++;
+ else
+ bcl->stats.duplicates++;
+
+ tipc_bclink_unlock();
+
+unlock:
tipc_node_unlock(node);
+exit:
+ kfree_skb(buf);
}
u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
{
- return (n_ptr->bclink.supported &&
+ return (n_ptr->bclink.recv_permitted &&
(tipc_bclink_get_last_sent() != n_ptr->bclink.acked));
}
@@ -549,98 +601,106 @@ u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
/**
* tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
*
- * Send through as many bearers as necessary to reach all nodes
- * that support TIPC multicasting.
+ * Send packet over as many bearers as necessary to reach all nodes
+ * that have joined the broadcast link.
*
- * Returns 0 if packet sent successfully, non-zero if not
+ * Returns 0 (packet sent successfully) under all circumstances,
+ * since the broadcast link's pseudo-bearer never blocks
*/
-
-static int tipc_bcbearer_send(struct sk_buff *buf,
- struct tipc_bearer *unused1,
+static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
struct tipc_media_addr *unused2)
{
int bp_index;
- /* Prepare buffer for broadcasting (if first time trying to send it) */
-
+ /* Prepare broadcast link message for reliable transmission,
+ * if first time trying to send it;
+ * preparation is skipped for broadcast link protocol messages
+ * since they are sent in an unreliable manner and don't need it
+ */
if (likely(!msg_non_seq(buf_msg(buf)))) {
struct tipc_msg *msg;
- assert(tipc_cltr_bcast_nodes.count != 0);
- bcbuf_set_acks(buf, tipc_cltr_bcast_nodes.count);
+ bcbuf_set_acks(buf, bclink->bcast_nodes.count);
msg = buf_msg(buf);
msg_set_non_seq(msg, 1);
msg_set_mc_netid(msg, tipc_net_id);
bcl->stats.sent_info++;
+
+ if (WARN_ON(!bclink->bcast_nodes.count)) {
+ dump_stack();
+ return 0;
+ }
}
/* Send buffer over bearers until all targets reached */
-
- bcbearer->remains = tipc_cltr_bcast_nodes;
+ bcbearer->remains = bclink->bcast_nodes;
for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
- struct bearer *p = bcbearer->bpairs[bp_index].primary;
- struct bearer *s = bcbearer->bpairs[bp_index].secondary;
+ struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
+ struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;
+ struct tipc_bearer *b = p;
+ struct sk_buff *tbuf;
if (!p)
- break; /* no more bearers to try */
+ break; /* No more bearers to try */
- tipc_nmap_diff(&bcbearer->remains, &p->nodes, &bcbearer->remains_new);
+ tipc_nmap_diff(&bcbearer->remains, &b->nodes,
+ &bcbearer->remains_new);
if (bcbearer->remains_new.count == bcbearer->remains.count)
- continue; /* bearer pair doesn't add anything */
-
- if (p->publ.blocked ||
- p->media->send_msg(buf, &p->publ, &p->media->bcast_addr)) {
- /* unable to send on primary bearer */
- if (!s || s->publ.blocked ||
- s->media->send_msg(buf, &s->publ,
- &s->media->bcast_addr)) {
- /* unable to send on either bearer */
- continue;
- }
+ continue; /* Nothing added by bearer pair */
+
+ if (bp_index == 0) {
+ /* Use original buffer for first bearer */
+ tipc_bearer_send(b->identity, buf, &b->bcast_addr);
+ } else {
+ /* Avoid concurrent buffer access */
+ tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC);
+ if (!tbuf)
+ break;
+ tipc_bearer_send(b->identity, tbuf, &b->bcast_addr);
+ kfree_skb(tbuf); /* Bearer keeps a clone */
}
+ /* Swap bearers for next packet */
if (s) {
bcbearer->bpairs[bp_index].primary = s;
bcbearer->bpairs[bp_index].secondary = p;
}
if (bcbearer->remains_new.count == 0)
- return 0;
+ break; /* All targets reached */
bcbearer->remains = bcbearer->remains_new;
}
- /*
- * Unable to reach all targets (indicate success, since currently
- * there isn't code in place to properly block & unblock the
- * pseudo-bearer used by the broadcast link)
- */
-
- return TIPC_OK;
+ return 0;
}
/**
* tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
*/
-
-void tipc_bcbearer_sort(void)
+void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action)
{
- struct bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
- struct bcbearer_pair *bp_curr;
+ struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
+ struct tipc_bcbearer_pair *bp_curr;
+ struct tipc_bearer *b;
int b_index;
int pri;
- spin_lock_bh(&bc_lock);
+ tipc_bclink_lock();
- /* Group bearers by priority (can assume max of two per priority) */
+ if (action)
+ tipc_nmap_add(nm_ptr, node);
+ else
+ tipc_nmap_remove(nm_ptr, node);
+ /* Group bearers by priority (can assume max of two per priority) */
memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
+ rcu_read_lock();
for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
- struct bearer *b = &tipc_bearers[b_index];
-
- if (!b->active || !b->nodes.count)
+ b = rcu_dereference_rtnl(bearer_list[b_index]);
+ if (!b || !b->nodes.count)
continue;
if (!bp_temp[b->priority].primary)
@@ -648,9 +708,9 @@ void tipc_bcbearer_sort(void)
else
bp_temp[b->priority].secondary = b;
}
+ rcu_read_unlock();
/* Create array of bearer pairs for broadcasting */
-
bp_curr = bcbearer->bpairs;
memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));
@@ -674,75 +734,49 @@ void tipc_bcbearer_sort(void)
bp_curr++;
}
- spin_unlock_bh(&bc_lock);
-}
-
-/**
- * tipc_bcbearer_push - resolve bearer congestion
- *
- * Forces bclink to push out any unsent packets, until all packets are gone
- * or congestion reoccurs.
- * No locks set when function called
- */
-
-void tipc_bcbearer_push(void)
-{
- struct bearer *b_ptr;
-
- spin_lock_bh(&bc_lock);
- b_ptr = &bcbearer->bearer;
- if (b_ptr->publ.blocked) {
- b_ptr->publ.blocked = 0;
- tipc_bearer_lock_push(b_ptr);
- }
- spin_unlock_bh(&bc_lock);
+ tipc_bclink_unlock();
}
int tipc_bclink_stats(char *buf, const u32 buf_size)
{
- struct print_buf pb;
+ int ret;
+ struct tipc_stats *s;
if (!bcl)
return 0;
- tipc_printbuf_init(&pb, buf, buf_size);
-
- spin_lock_bh(&bc_lock);
-
- tipc_printf(&pb, "Link <%s>\n"
- " Window:%u packets\n",
- bcl->name, bcl->queue_limit[0]);
- tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
- bcl->stats.recv_info,
- bcl->stats.recv_fragments,
- bcl->stats.recv_fragmented,
- bcl->stats.recv_bundles,
- bcl->stats.recv_bundled);
- tipc_printf(&pb, " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
- bcl->stats.sent_info,
- bcl->stats.sent_fragments,
- bcl->stats.sent_fragmented,
- bcl->stats.sent_bundles,
- bcl->stats.sent_bundled);
- tipc_printf(&pb, " RX naks:%u defs:%u dups:%u\n",
- bcl->stats.recv_nacks,
- bcl->stats.deferred_recv,
- bcl->stats.duplicates);
- tipc_printf(&pb, " TX naks:%u acks:%u dups:%u\n",
- bcl->stats.sent_nacks,
- bcl->stats.sent_acks,
- bcl->stats.retransmitted);
- tipc_printf(&pb, " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n",
- bcl->stats.bearer_congs,
- bcl->stats.link_congs,
- bcl->stats.max_queue_sz,
- bcl->stats.queue_sz_counts
- ? (bcl->stats.accu_queue_sz / bcl->stats.queue_sz_counts)
- : 0);
-
- spin_unlock_bh(&bc_lock);
- return tipc_printbuf_validate(&pb);
+ tipc_bclink_lock();
+
+ s = &bcl->stats;
+
+ ret = tipc_snprintf(buf, buf_size, "Link <%s>\n"
+ " Window:%u packets\n",
+ bcl->name, bcl->queue_limit[0]);
+ ret += tipc_snprintf(buf + ret, buf_size - ret,
+ " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
+ s->recv_info, s->recv_fragments,
+ s->recv_fragmented, s->recv_bundles,
+ s->recv_bundled);
+ ret += tipc_snprintf(buf + ret, buf_size - ret,
+ " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
+ s->sent_info, s->sent_fragments,
+ s->sent_fragmented, s->sent_bundles,
+ s->sent_bundled);
+ ret += tipc_snprintf(buf + ret, buf_size - ret,
+ " RX naks:%u defs:%u dups:%u\n",
+ s->recv_nacks, s->deferred_recv, s->duplicates);
+ ret += tipc_snprintf(buf + ret, buf_size - ret,
+ " TX naks:%u acks:%u dups:%u\n",
+ s->sent_nacks, s->sent_acks, s->retransmitted);
+ ret += tipc_snprintf(buf + ret, buf_size - ret,
+ " Congestion link:%u Send queue max:%u avg:%u\n",
+ s->link_congs, s->max_queue_sz,
+ s->queue_sz_counts ?
+ (s->accu_queue_sz / s->queue_sz_counts) : 0);
+
+ tipc_bclink_unlock();
+ return ret;
}
int tipc_bclink_reset_stats(void)
@@ -750,9 +784,9 @@ int tipc_bclink_reset_stats(void)
if (!bcl)
return -ENOPROTOOPT;
- spin_lock_bh(&bc_lock);
+ tipc_bclink_lock();
memset(&bcl->stats, 0, sizeof(bcl->stats));
- spin_unlock_bh(&bc_lock);
+ tipc_bclink_unlock();
return 0;
}
@@ -763,75 +797,59 @@ int tipc_bclink_set_queue_limits(u32 limit)
if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
return -EINVAL;
- spin_lock_bh(&bc_lock);
+ tipc_bclink_lock();
tipc_link_set_queue_limits(bcl, limit);
- spin_unlock_bh(&bc_lock);
+ tipc_bclink_unlock();
return 0;
}
int tipc_bclink_init(void)
{
bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
+ if (!bcbearer)
+ return -ENOMEM;
+
bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
- if (!bcbearer || !bclink) {
- nomem:
- warn("Multicast link creation failed, no memory\n");
+ if (!bclink) {
kfree(bcbearer);
- bcbearer = NULL;
- kfree(bclink);
- bclink = NULL;
return -ENOMEM;
}
- INIT_LIST_HEAD(&bcbearer->bearer.cong_links);
+ bcl = &bclink->link;
bcbearer->bearer.media = &bcbearer->media;
bcbearer->media.send_msg = tipc_bcbearer_send;
- sprintf(bcbearer->media.name, "tipc-multicast");
+ sprintf(bcbearer->media.name, "tipc-broadcast");
- bcl = &bclink->link;
+ spin_lock_init(&bclink->lock);
INIT_LIST_HEAD(&bcl->waiting_ports);
bcl->next_out_no = 1;
spin_lock_init(&bclink->node.lock);
bcl->owner = &bclink->node;
bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
- bcl->b_ptr = &bcbearer->bearer;
+ bcl->bearer_id = MAX_BEARERS;
+ rcu_assign_pointer(bearer_list[MAX_BEARERS], &bcbearer->bearer);
bcl->state = WORKING_WORKING;
strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
-
- if (BCLINK_LOG_BUF_SIZE) {
- char *pb = kmalloc(BCLINK_LOG_BUF_SIZE, GFP_ATOMIC);
-
- if (!pb)
- goto nomem;
- tipc_printbuf_init(&bcl->print_buf, pb, BCLINK_LOG_BUF_SIZE);
- }
-
return 0;
}
void tipc_bclink_stop(void)
{
- spin_lock_bh(&bc_lock);
- if (bcbearer) {
- tipc_link_stop(bcl);
- if (BCLINK_LOG_BUF_SIZE)
- kfree(bcl->print_buf.buf);
- bcl = NULL;
- kfree(bclink);
- bclink = NULL;
- kfree(bcbearer);
- bcbearer = NULL;
- }
- spin_unlock_bh(&bc_lock);
+ tipc_bclink_lock();
+ tipc_link_purge_queues(bcl);
+ tipc_bclink_unlock();
+
+ RCU_INIT_POINTER(bearer_list[BCBEARER], NULL);
+ synchronize_net();
+ kfree(bcbearer);
+ kfree(bclink);
}
-
/**
* tipc_nmap_add - add a node to a node map
*/
-
-void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
+static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
{
int n = tipc_node(node);
int w = n / WSIZE;
@@ -846,8 +864,7 @@ void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
/**
* tipc_nmap_remove - remove a node from a node map
*/
-
-void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
+static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
{
int n = tipc_node(node);
int w = n / WSIZE;
@@ -865,7 +882,6 @@ void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
* @nm_b: input node map B
* @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
*/
-
static void tipc_nmap_diff(struct tipc_node_map *nm_a,
struct tipc_node_map *nm_b,
struct tipc_node_map *nm_diff)
@@ -891,10 +907,9 @@ static void tipc_nmap_diff(struct tipc_node_map *nm_a,
/**
* tipc_port_list_add - add a port to a port list, ensuring no duplicates
*/
-
-void tipc_port_list_add(struct port_list *pl_ptr, u32 port)
+void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port)
{
- struct port_list *item = pl_ptr;
+ struct tipc_port_list *item = pl_ptr;
int i;
int item_sz = PLSIZE;
int cnt = pl_ptr->count;
@@ -913,7 +928,7 @@ void tipc_port_list_add(struct port_list *pl_ptr, u32 port)
if (!item->next) {
item->next = kmalloc(sizeof(*item), GFP_ATOMIC);
if (!item->next) {
- warn("Incomplete multicast delivery, no memory\n");
+ pr_warn("Incomplete multicast delivery, no memory\n");
return;
}
item->next->next = NULL;
@@ -925,15 +940,13 @@ void tipc_port_list_add(struct port_list *pl_ptr, u32 port)
* tipc_port_list_free - free dynamically created entries in port_list chain
*
*/
-
-void tipc_port_list_free(struct port_list *pl_ptr)
+void tipc_port_list_free(struct tipc_port_list *pl_ptr)
{
- struct port_list *item;
- struct port_list *next;
+ struct tipc_port_list *item;
+ struct tipc_port_list *next;
for (item = pl_ptr->next; item; item = next) {
next = item->next;
kfree(item);
}
}
-
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 011c03f0a4a..00330c45df3 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -2,7 +2,7 @@
* net/tipc/bcast.h: Include file for TIPC broadcast code
*
* Copyright (c) 2003-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -39,31 +39,29 @@
#define MAX_NODES 4096
#define WSIZE 32
+#define TIPC_BCLINK_RESET 1
/**
* struct tipc_node_map - set of node identifiers
* @count: # of nodes in set
* @map: bitmap of node identifiers that are in the set
*/
-
struct tipc_node_map {
u32 count;
u32 map[MAX_NODES / WSIZE];
};
-
#define PLSIZE 32
/**
- * struct port_list - set of node local destination ports
+ * struct tipc_port_list - set of node local destination ports
* @count: # of ports in set (only valid for first entry in list)
* @next: pointer to next entry in list
* @ports: array of port references
*/
-
-struct port_list {
+struct tipc_port_list {
int count;
- struct port_list *next;
+ struct tipc_port_list *next;
u32 ports[PLSIZE];
};
@@ -72,33 +70,33 @@ struct tipc_node;
extern const char tipc_bclink_name[];
-void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
-void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
-
/**
* tipc_nmap_equal - test for equality of node maps
*/
-
-static inline int tipc_nmap_equal(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b)
+static inline int tipc_nmap_equal(struct tipc_node_map *nm_a,
+ struct tipc_node_map *nm_b)
{
return !memcmp(nm_a, nm_b, sizeof(*nm_a));
}
-void tipc_port_list_add(struct port_list *pl_ptr, u32 port);
-void tipc_port_list_free(struct port_list *pl_ptr);
+void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port);
+void tipc_port_list_free(struct tipc_port_list *pl_ptr);
-int tipc_bclink_init(void);
+int tipc_bclink_init(void);
void tipc_bclink_stop(void);
+void tipc_bclink_set_flags(unsigned int flags);
+void tipc_bclink_add_node(u32 addr);
+void tipc_bclink_remove_node(u32 addr);
+struct tipc_node *tipc_bclink_retransmit_to(void);
void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked);
-int tipc_bclink_send_msg(struct sk_buff *buf);
-void tipc_bclink_recv_pkt(struct sk_buff *buf);
+int tipc_bclink_xmit(struct sk_buff *buf);
+void tipc_bclink_rcv(struct sk_buff *buf);
u32 tipc_bclink_get_last_sent(void);
u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr);
-void tipc_bclink_check_gap(struct tipc_node *n_ptr, u32 seqno);
+void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent);
int tipc_bclink_stats(char *stats_buf, const u32 buf_size);
int tipc_bclink_reset_stats(void);
int tipc_bclink_set_queue_limits(u32 limit);
-void tipc_bcbearer_sort(void);
-void tipc_bcbearer_push(void);
+void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action);
#endif
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 885da94be4a..264474394f9 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -1,8 +1,8 @@
/*
* net/tipc/bearer.c: TIPC bearer code
*
- * Copyright (c) 1996-2006, Ericsson AB
- * Copyright (c) 2004-2006, Wind River Systems
+ * Copyright (c) 1996-2006, 2013, Ericsson AB
+ * Copyright (c) 2004-2006, 2010-2013, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -37,204 +37,102 @@
#include "core.h"
#include "config.h"
#include "bearer.h"
-#include "port.h"
#include "discover.h"
-#define MAX_ADDR_STR 32
+#define MAX_ADDR_STR 60
-static struct media media_list[MAX_MEDIA];
-static u32 media_count = 0;
+static struct tipc_media * const media_info_array[] = {
+ &eth_media_info,
+#ifdef CONFIG_TIPC_MEDIA_IB
+ &ib_media_info,
+#endif
+ NULL
+};
-struct bearer tipc_bearers[MAX_BEARERS];
+struct tipc_bearer __rcu *bearer_list[MAX_BEARERS + 1];
-/**
- * media_name_valid - validate media name
- *
- * Returns 1 if media name is valid, otherwise 0.
- */
-
-static int media_name_valid(const char *name)
-{
- u32 len;
-
- len = strlen(name);
- if ((len + 1) > TIPC_MAX_MEDIA_NAME)
- return 0;
- return strspn(name, tipc_alphabet) == len;
-}
+static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down);
/**
- * media_find - locates specified media object by name
+ * tipc_media_find - locates specified media object by name
*/
-
-static struct media *media_find(const char *name)
+struct tipc_media *tipc_media_find(const char *name)
{
- struct media *m_ptr;
u32 i;
- for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
- if (!strcmp(m_ptr->name, name))
- return m_ptr;
+ for (i = 0; media_info_array[i] != NULL; i++) {
+ if (!strcmp(media_info_array[i]->name, name))
+ break;
}
- return NULL;
+ return media_info_array[i];
}
/**
- * tipc_register_media - register a media type
- *
- * Bearers for this media type must be activated separately at a later stage.
+ * media_find_id - locates specified media object by type identifier
*/
-
-int tipc_register_media(u32 media_type,
- char *name,
- int (*enable)(struct tipc_bearer *),
- void (*disable)(struct tipc_bearer *),
- int (*send_msg)(struct sk_buff *,
- struct tipc_bearer *,
- struct tipc_media_addr *),
- char *(*addr2str)(struct tipc_media_addr *a,
- char *str_buf, int str_size),
- struct tipc_media_addr *bcast_addr,
- const u32 bearer_priority,
- const u32 link_tolerance, /* [ms] */
- const u32 send_window_limit)
+static struct tipc_media *media_find_id(u8 type)
{
- struct media *m_ptr;
- u32 media_id;
u32 i;
- int res = -EINVAL;
-
- write_lock_bh(&tipc_net_lock);
-
- if (tipc_mode != TIPC_NET_MODE) {
- warn("Media <%s> rejected, not in networked mode yet\n", name);
- goto exit;
- }
- if (!media_name_valid(name)) {
- warn("Media <%s> rejected, illegal name\n", name);
- goto exit;
- }
- if (!bcast_addr) {
- warn("Media <%s> rejected, no broadcast address\n", name);
- goto exit;
- }
- if ((bearer_priority < TIPC_MIN_LINK_PRI) ||
- (bearer_priority > TIPC_MAX_LINK_PRI)) {
- warn("Media <%s> rejected, illegal priority (%u)\n", name,
- bearer_priority);
- goto exit;
- }
- if ((link_tolerance < TIPC_MIN_LINK_TOL) ||
- (link_tolerance > TIPC_MAX_LINK_TOL)) {
- warn("Media <%s> rejected, illegal tolerance (%u)\n", name,
- link_tolerance);
- goto exit;
- }
- media_id = media_count++;
- if (media_id >= MAX_MEDIA) {
- warn("Media <%s> rejected, media limit reached (%u)\n", name,
- MAX_MEDIA);
- media_count--;
- goto exit;
- }
- for (i = 0; i < media_id; i++) {
- if (media_list[i].type_id == media_type) {
- warn("Media <%s> rejected, duplicate type (%u)\n", name,
- media_type);
- media_count--;
- goto exit;
- }
- if (!strcmp(name, media_list[i].name)) {
- warn("Media <%s> rejected, duplicate name\n", name);
- media_count--;
- goto exit;
- }
+ for (i = 0; media_info_array[i] != NULL; i++) {
+ if (media_info_array[i]->type_id == type)
+ break;
}
-
- m_ptr = &media_list[media_id];
- m_ptr->type_id = media_type;
- m_ptr->send_msg = send_msg;
- m_ptr->enable_bearer = enable;
- m_ptr->disable_bearer = disable;
- m_ptr->addr2str = addr2str;
- memcpy(&m_ptr->bcast_addr, bcast_addr, sizeof(*bcast_addr));
- m_ptr->bcast = 1;
- strcpy(m_ptr->name, name);
- m_ptr->priority = bearer_priority;
- m_ptr->tolerance = link_tolerance;
- m_ptr->window = send_window_limit;
- dbg("Media <%s> registered\n", name);
- res = 0;
-exit:
- write_unlock_bh(&tipc_net_lock);
- return res;
+ return media_info_array[i];
}
/**
* tipc_media_addr_printf - record media address in print buffer
*/
-
-void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a)
+void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a)
{
- struct media *m_ptr;
- u32 media_type;
- u32 i;
-
- media_type = ntohl(a->type);
- for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
- if (m_ptr->type_id == media_type)
- break;
- }
+ char addr_str[MAX_ADDR_STR];
+ struct tipc_media *m_ptr;
+ int ret;
- if ((i < media_count) && (m_ptr->addr2str != NULL)) {
- char addr_str[MAX_ADDR_STR];
+ m_ptr = media_find_id(a->media_id);
- tipc_printf(pb, "%s(%s)", m_ptr->name,
- m_ptr->addr2str(a, addr_str, sizeof(addr_str)));
- } else {
- unchar *addr = (unchar *)&a->dev_addr;
+ if (m_ptr && !m_ptr->addr2str(a, addr_str, sizeof(addr_str)))
+ ret = tipc_snprintf(buf, len, "%s(%s)", m_ptr->name, addr_str);
+ else {
+ u32 i;
- tipc_printf(pb, "UNKNOWN(%u)", media_type);
- for (i = 0; i < (sizeof(*a) - sizeof(a->type)); i++) {
- tipc_printf(pb, "-%02x", addr[i]);
- }
+ ret = tipc_snprintf(buf, len, "UNKNOWN(%u)", a->media_id);
+ for (i = 0; i < sizeof(a->value); i++)
+ ret += tipc_snprintf(buf - ret, len + ret,
+ "-%02x", a->value[i]);
}
}
/**
* tipc_media_get_names - record names of registered media in buffer
*/
-
struct sk_buff *tipc_media_get_names(void)
{
struct sk_buff *buf;
- struct media *m_ptr;
int i;
buf = tipc_cfg_reply_alloc(MAX_MEDIA * TLV_SPACE(TIPC_MAX_MEDIA_NAME));
if (!buf)
return NULL;
- read_lock_bh(&tipc_net_lock);
- for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
- tipc_cfg_append_tlv(buf, TIPC_TLV_MEDIA_NAME, m_ptr->name,
- strlen(m_ptr->name) + 1);
+ for (i = 0; media_info_array[i] != NULL; i++) {
+ tipc_cfg_append_tlv(buf, TIPC_TLV_MEDIA_NAME,
+ media_info_array[i]->name,
+ strlen(media_info_array[i]->name) + 1);
}
- read_unlock_bh(&tipc_net_lock);
return buf;
}
/**
* bearer_name_validate - validate & (optionally) deconstruct bearer name
- * @name - ptr to bearer name string
- * @name_parts - ptr to area for bearer name components (or NULL if not needed)
+ * @name: ptr to bearer name string
+ * @name_parts: ptr to area for bearer name components (or NULL if not needed)
*
* Returns 1 if bearer name is valid, otherwise 0.
*/
-
static int bearer_name_validate(const char *name,
- struct bearer_name *name_parts)
+ struct tipc_bearer_names *name_parts)
{
char name_copy[TIPC_MAX_BEARER_NAME];
char *media_name;
@@ -243,7 +141,6 @@ static int bearer_name_validate(const char *name,
u32 if_len;
/* copy bearer name & ensure length is OK */
-
name_copy[TIPC_MAX_BEARER_NAME - 1] = 0;
/* need above in case non-Posix strncpy() doesn't pad with nulls */
strncpy(name_copy, name, TIPC_MAX_BEARER_NAME);
@@ -251,24 +148,20 @@ static int bearer_name_validate(const char *name,
return 0;
/* ensure all component parts of bearer name are present */
-
media_name = name_copy;
- if ((if_name = strchr(media_name, ':')) == NULL)
+ if_name = strchr(media_name, ':');
+ if (if_name == NULL)
return 0;
*(if_name++) = 0;
media_len = if_name - media_name;
if_len = strlen(if_name) + 1;
/* validate component parts of bearer name */
-
if ((media_len <= 1) || (media_len > TIPC_MAX_MEDIA_NAME) ||
- (if_len <= 1) || (if_len > TIPC_MAX_IF_NAME) ||
- (strspn(media_name, tipc_alphabet) != (media_len - 1)) ||
- (strspn(if_name, tipc_alphabet) != (if_len - 1)))
+ (if_len <= 1) || (if_len > TIPC_MAX_IF_NAME))
return 0;
/* return bearer name components, if necessary */
-
if (name_parts) {
strcpy(name_parts->media_name, media_name);
strcpy(name_parts->if_name, if_name);
@@ -277,36 +170,16 @@ static int bearer_name_validate(const char *name,
}
/**
- * bearer_find - locates bearer object with matching bearer name
+ * tipc_bearer_find - locates bearer object with matching bearer name
*/
-
-static struct bearer *bearer_find(const char *name)
+struct tipc_bearer *tipc_bearer_find(const char *name)
{
- struct bearer *b_ptr;
+ struct tipc_bearer *b_ptr;
u32 i;
- for (i = 0, b_ptr = tipc_bearers; i < MAX_BEARERS; i++, b_ptr++) {
- if (b_ptr->active && (!strcmp(b_ptr->publ.name, name)))
- return b_ptr;
- }
- return NULL;
-}
-
-/**
- * tipc_bearer_find_interface - locates bearer object with matching interface name
- */
-
-struct bearer *tipc_bearer_find_interface(const char *if_name)
-{
- struct bearer *b_ptr;
- char *b_if_name;
- u32 i;
-
- for (i = 0, b_ptr = tipc_bearers; i < MAX_BEARERS; i++, b_ptr++) {
- if (!b_ptr->active)
- continue;
- b_if_name = strchr(b_ptr->publ.name, ':') + 1;
- if (!strcmp(b_if_name, if_name))
+ for (i = 0; i < MAX_BEARERS; i++) {
+ b_ptr = rtnl_dereference(bearer_list[i]);
+ if (b_ptr && (!strcmp(b_ptr->name, name)))
return b_ptr;
}
return NULL;
@@ -315,208 +188,104 @@ struct bearer *tipc_bearer_find_interface(const char *if_name)
/**
* tipc_bearer_get_names - record names of bearers in buffer
*/
-
struct sk_buff *tipc_bearer_get_names(void)
{
struct sk_buff *buf;
- struct media *m_ptr;
- struct bearer *b_ptr;
+ struct tipc_bearer *b;
int i, j;
buf = tipc_cfg_reply_alloc(MAX_BEARERS * TLV_SPACE(TIPC_MAX_BEARER_NAME));
if (!buf)
return NULL;
- read_lock_bh(&tipc_net_lock);
- for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
+ for (i = 0; media_info_array[i] != NULL; i++) {
for (j = 0; j < MAX_BEARERS; j++) {
- b_ptr = &tipc_bearers[j];
- if (b_ptr->active && (b_ptr->media == m_ptr)) {
+ b = rtnl_dereference(bearer_list[j]);
+ if (!b)
+ continue;
+ if (b->media == media_info_array[i]) {
tipc_cfg_append_tlv(buf, TIPC_TLV_BEARER_NAME,
- b_ptr->publ.name,
- strlen(b_ptr->publ.name) + 1);
+ b->name,
+ strlen(b->name) + 1);
}
}
}
- read_unlock_bh(&tipc_net_lock);
return buf;
}
-void tipc_bearer_add_dest(struct bearer *b_ptr, u32 dest)
-{
- tipc_nmap_add(&b_ptr->nodes, dest);
- tipc_disc_update_link_req(b_ptr->link_req);
- tipc_bcbearer_sort();
-}
-
-void tipc_bearer_remove_dest(struct bearer *b_ptr, u32 dest)
-{
- tipc_nmap_remove(&b_ptr->nodes, dest);
- tipc_disc_update_link_req(b_ptr->link_req);
- tipc_bcbearer_sort();
-}
-
-/*
- * bearer_push(): Resolve bearer congestion. Force the waiting
- * links to push out their unsent packets, one packet per link
- * per iteration, until all packets are gone or congestion reoccurs.
- * 'tipc_net_lock' is read_locked when this function is called
- * bearer.lock must be taken before calling
- * Returns binary true(1) ore false(0)
- */
-static int bearer_push(struct bearer *b_ptr)
+void tipc_bearer_add_dest(u32 bearer_id, u32 dest)
{
- u32 res = 0;
- struct link *ln, *tln;
-
- if (b_ptr->publ.blocked)
- return 0;
+ struct tipc_bearer *b_ptr;
- while (!list_empty(&b_ptr->cong_links) && (res != PUSH_FAILED)) {
- list_for_each_entry_safe(ln, tln, &b_ptr->cong_links, link_list) {
- res = tipc_link_push_packet(ln);
- if (res == PUSH_FAILED)
- break;
- if (res == PUSH_FINISHED)
- list_move_tail(&ln->link_list, &b_ptr->links);
- }
+ rcu_read_lock();
+ b_ptr = rcu_dereference_rtnl(bearer_list[bearer_id]);
+ if (b_ptr) {
+ tipc_bcbearer_sort(&b_ptr->nodes, dest, true);
+ tipc_disc_add_dest(b_ptr->link_req);
}
- return list_empty(&b_ptr->cong_links);
-}
-
-void tipc_bearer_lock_push(struct bearer *b_ptr)
-{
- int res;
-
- spin_lock_bh(&b_ptr->publ.lock);
- res = bearer_push(b_ptr);
- spin_unlock_bh(&b_ptr->publ.lock);
- if (res)
- tipc_bcbearer_push();
-}
-
-
-/*
- * Interrupt enabling new requests after bearer congestion or blocking:
- * See bearer_send().
- */
-void tipc_continue(struct tipc_bearer *tb_ptr)
-{
- struct bearer *b_ptr = (struct bearer *)tb_ptr;
-
- spin_lock_bh(&b_ptr->publ.lock);
- b_ptr->continue_count++;
- if (!list_empty(&b_ptr->cong_links))
- tipc_k_signal((Handler)tipc_bearer_lock_push, (unsigned long)b_ptr);
- b_ptr->publ.blocked = 0;
- spin_unlock_bh(&b_ptr->publ.lock);
-}
-
-/*
- * Schedule link for sending of messages after the bearer
- * has been deblocked by 'continue()'. This method is called
- * when somebody tries to send a message via this link while
- * the bearer is congested. 'tipc_net_lock' is in read_lock here
- * bearer.lock is busy
- */
-
-static void tipc_bearer_schedule_unlocked(struct bearer *b_ptr, struct link *l_ptr)
-{
- list_move_tail(&l_ptr->link_list, &b_ptr->cong_links);
-}
-
-/*
- * Schedule link for sending of messages after the bearer
- * has been deblocked by 'continue()'. This method is called
- * when somebody tries to send a message via this link while
- * the bearer is congested. 'tipc_net_lock' is in read_lock here,
- * bearer.lock is free
- */
-
-void tipc_bearer_schedule(struct bearer *b_ptr, struct link *l_ptr)
-{
- spin_lock_bh(&b_ptr->publ.lock);
- tipc_bearer_schedule_unlocked(b_ptr, l_ptr);
- spin_unlock_bh(&b_ptr->publ.lock);
+ rcu_read_unlock();
}
-
-/*
- * tipc_bearer_resolve_congestion(): Check if there is bearer congestion,
- * and if there is, try to resolve it before returning.
- * 'tipc_net_lock' is read_locked when this function is called
- */
-int tipc_bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr)
+void tipc_bearer_remove_dest(u32 bearer_id, u32 dest)
{
- int res = 1;
+ struct tipc_bearer *b_ptr;
- if (list_empty(&b_ptr->cong_links))
- return 1;
- spin_lock_bh(&b_ptr->publ.lock);
- if (!bearer_push(b_ptr)) {
- tipc_bearer_schedule_unlocked(b_ptr, l_ptr);
- res = 0;
+ rcu_read_lock();
+ b_ptr = rcu_dereference_rtnl(bearer_list[bearer_id]);
+ if (b_ptr) {
+ tipc_bcbearer_sort(&b_ptr->nodes, dest, false);
+ tipc_disc_remove_dest(b_ptr->link_req);
}
- spin_unlock_bh(&b_ptr->publ.lock);
- return res;
-}
-
-/**
- * tipc_bearer_congested - determines if bearer is currently congested
- */
-
-int tipc_bearer_congested(struct bearer *b_ptr, struct link *l_ptr)
-{
- if (unlikely(b_ptr->publ.blocked))
- return 1;
- if (likely(list_empty(&b_ptr->cong_links)))
- return 0;
- return !tipc_bearer_resolve_congestion(b_ptr, l_ptr);
+ rcu_read_unlock();
}
/**
* tipc_enable_bearer - enable bearer with the given name
*/
-
-int tipc_enable_bearer(const char *name, u32 bcast_scope, u32 priority)
+int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
{
- struct bearer *b_ptr;
- struct media *m_ptr;
- struct bearer_name b_name;
+ struct tipc_bearer *b_ptr;
+ struct tipc_media *m_ptr;
+ struct tipc_bearer_names b_names;
char addr_string[16];
u32 bearer_id;
u32 with_this_prio;
u32 i;
int res = -EINVAL;
- if (tipc_mode != TIPC_NET_MODE) {
- warn("Bearer <%s> rejected, not supported in standalone mode\n",
- name);
+ if (!tipc_own_addr) {
+ pr_warn("Bearer <%s> rejected, not supported in standalone mode\n",
+ name);
return -ENOPROTOOPT;
}
- if (!bearer_name_validate(name, &b_name)) {
- warn("Bearer <%s> rejected, illegal name\n", name);
+ if (!bearer_name_validate(name, &b_names)) {
+ pr_warn("Bearer <%s> rejected, illegal name\n", name);
return -EINVAL;
}
- if (!tipc_addr_domain_valid(bcast_scope) ||
- !tipc_in_scope(bcast_scope, tipc_own_addr)) {
- warn("Bearer <%s> rejected, illegal broadcast scope\n", name);
+ if (tipc_addr_domain_valid(disc_domain) &&
+ (disc_domain != tipc_own_addr)) {
+ if (tipc_in_scope(disc_domain, tipc_own_addr)) {
+ disc_domain = tipc_own_addr & TIPC_CLUSTER_MASK;
+ res = 0; /* accept any node in own cluster */
+ } else if (in_own_cluster_exact(disc_domain))
+ res = 0; /* accept specified node in own cluster */
+ }
+ if (res) {
+ pr_warn("Bearer <%s> rejected, illegal discovery domain\n",
+ name);
return -EINVAL;
}
- if ((priority < TIPC_MIN_LINK_PRI ||
- priority > TIPC_MAX_LINK_PRI) &&
+ if ((priority > TIPC_MAX_LINK_PRI) &&
(priority != TIPC_MEDIA_LINK_PRI)) {
- warn("Bearer <%s> rejected, illegal priority\n", name);
+ pr_warn("Bearer <%s> rejected, illegal priority\n", name);
return -EINVAL;
}
- write_lock_bh(&tipc_net_lock);
-
- m_ptr = media_find(b_name.media_name);
+ m_ptr = tipc_media_find(b_names.media_name);
if (!m_ptr) {
- warn("Bearer <%s> rejected, media <%s> not registered\n", name,
- b_name.media_name);
- goto failed;
+ pr_warn("Bearer <%s> rejected, media <%s> not registered\n",
+ name, b_names.media_name);
+ return -EINVAL;
}
if (priority == TIPC_MEDIA_LINK_PRI)
@@ -526,147 +295,335 @@ restart:
bearer_id = MAX_BEARERS;
with_this_prio = 1;
for (i = MAX_BEARERS; i-- != 0; ) {
- if (!tipc_bearers[i].active) {
+ b_ptr = rtnl_dereference(bearer_list[i]);
+ if (!b_ptr) {
bearer_id = i;
continue;
}
- if (!strcmp(name, tipc_bearers[i].publ.name)) {
- warn("Bearer <%s> rejected, already enabled\n", name);
- goto failed;
+ if (!strcmp(name, b_ptr->name)) {
+ pr_warn("Bearer <%s> rejected, already enabled\n",
+ name);
+ return -EINVAL;
}
- if ((tipc_bearers[i].priority == priority) &&
+ if ((b_ptr->priority == priority) &&
(++with_this_prio > 2)) {
if (priority-- == 0) {
- warn("Bearer <%s> rejected, duplicate priority\n",
- name);
- goto failed;
+ pr_warn("Bearer <%s> rejected, duplicate priority\n",
+ name);
+ return -EINVAL;
}
- warn("Bearer <%s> priority adjustment required %u->%u\n",
- name, priority + 1, priority);
+ pr_warn("Bearer <%s> priority adjustment required %u->%u\n",
+ name, priority + 1, priority);
goto restart;
}
}
if (bearer_id >= MAX_BEARERS) {
- warn("Bearer <%s> rejected, bearer limit reached (%u)\n",
- name, MAX_BEARERS);
- goto failed;
+ pr_warn("Bearer <%s> rejected, bearer limit reached (%u)\n",
+ name, MAX_BEARERS);
+ return -EINVAL;
}
- b_ptr = &tipc_bearers[bearer_id];
- strcpy(b_ptr->publ.name, name);
- res = m_ptr->enable_bearer(&b_ptr->publ);
+ b_ptr = kzalloc(sizeof(*b_ptr), GFP_ATOMIC);
+ if (!b_ptr)
+ return -ENOMEM;
+
+ strcpy(b_ptr->name, name);
+ b_ptr->media = m_ptr;
+ res = m_ptr->enable_media(b_ptr);
if (res) {
- warn("Bearer <%s> rejected, enable failure (%d)\n", name, -res);
- goto failed;
+ pr_warn("Bearer <%s> rejected, enable failure (%d)\n",
+ name, -res);
+ return -EINVAL;
}
b_ptr->identity = bearer_id;
- b_ptr->media = m_ptr;
+ b_ptr->tolerance = m_ptr->tolerance;
+ b_ptr->window = m_ptr->window;
+ b_ptr->domain = disc_domain;
b_ptr->net_plane = bearer_id + 'A';
- b_ptr->active = 1;
- b_ptr->detect_scope = bcast_scope;
b_ptr->priority = priority;
- INIT_LIST_HEAD(&b_ptr->cong_links);
- INIT_LIST_HEAD(&b_ptr->links);
- if (m_ptr->bcast) {
- b_ptr->link_req = tipc_disc_init_link_req(b_ptr, &m_ptr->bcast_addr,
- bcast_scope, 2);
+
+ res = tipc_disc_create(b_ptr, &b_ptr->bcast_addr);
+ if (res) {
+ bearer_disable(b_ptr, false);
+ pr_warn("Bearer <%s> rejected, discovery object creation failed\n",
+ name);
+ return -EINVAL;
}
- spin_lock_init(&b_ptr->publ.lock);
- write_unlock_bh(&tipc_net_lock);
- info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
- name, tipc_addr_string_fill(addr_string, bcast_scope), priority);
- return 0;
-failed:
- write_unlock_bh(&tipc_net_lock);
+
+ rcu_assign_pointer(bearer_list[bearer_id], b_ptr);
+
+ pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
+ name,
+ tipc_addr_string_fill(addr_string, disc_domain), priority);
return res;
}
/**
- * tipc_block_bearer(): Block the bearer with the given name,
- * and reset all its links
+ * tipc_reset_bearer - Reset all links established over this bearer
*/
-
-int tipc_block_bearer(const char *name)
+static int tipc_reset_bearer(struct tipc_bearer *b_ptr)
{
- struct bearer *b_ptr = NULL;
- struct link *l_ptr;
- struct link *temp_l_ptr;
-
- read_lock_bh(&tipc_net_lock);
- b_ptr = bearer_find(name);
- if (!b_ptr) {
- warn("Attempt to block unknown bearer <%s>\n", name);
- read_unlock_bh(&tipc_net_lock);
- return -EINVAL;
- }
-
- info("Blocking bearer <%s>\n", name);
- spin_lock_bh(&b_ptr->publ.lock);
- b_ptr->publ.blocked = 1;
- list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
- struct tipc_node *n_ptr = l_ptr->owner;
-
- spin_lock_bh(&n_ptr->lock);
- tipc_link_reset(l_ptr);
- spin_unlock_bh(&n_ptr->lock);
- }
- spin_unlock_bh(&b_ptr->publ.lock);
- read_unlock_bh(&tipc_net_lock);
+ pr_info("Resetting bearer <%s>\n", b_ptr->name);
+ tipc_link_reset_list(b_ptr->identity);
+ tipc_disc_reset(b_ptr);
return 0;
}
/**
- * bearer_disable -
+ * bearer_disable
*
- * Note: This routine assumes caller holds tipc_net_lock.
+ * Note: This routine assumes caller holds RTNL lock.
*/
-
-static void bearer_disable(struct bearer *b_ptr)
+static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down)
{
- struct link *l_ptr;
- struct link *temp_l_ptr;
-
- info("Disabling bearer <%s>\n", b_ptr->publ.name);
- tipc_disc_stop_link_req(b_ptr->link_req);
- spin_lock_bh(&b_ptr->publ.lock);
- b_ptr->link_req = NULL;
- b_ptr->publ.blocked = 1;
- b_ptr->media->disable_bearer(&b_ptr->publ);
- list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
- tipc_link_delete(l_ptr);
+ u32 i;
+
+ pr_info("Disabling bearer <%s>\n", b_ptr->name);
+ b_ptr->media->disable_media(b_ptr);
+
+ tipc_link_delete_list(b_ptr->identity, shutting_down);
+ if (b_ptr->link_req)
+ tipc_disc_delete(b_ptr->link_req);
+
+ for (i = 0; i < MAX_BEARERS; i++) {
+ if (b_ptr == rtnl_dereference(bearer_list[i])) {
+ RCU_INIT_POINTER(bearer_list[i], NULL);
+ break;
+ }
}
- spin_unlock_bh(&b_ptr->publ.lock);
- memset(b_ptr, 0, sizeof(struct bearer));
+ kfree_rcu(b_ptr, rcu);
}
int tipc_disable_bearer(const char *name)
{
- struct bearer *b_ptr;
+ struct tipc_bearer *b_ptr;
int res;
- write_lock_bh(&tipc_net_lock);
- b_ptr = bearer_find(name);
+ b_ptr = tipc_bearer_find(name);
if (b_ptr == NULL) {
- warn("Attempt to disable unknown bearer <%s>\n", name);
+ pr_warn("Attempt to disable unknown bearer <%s>\n", name);
res = -EINVAL;
} else {
- bearer_disable(b_ptr);
+ bearer_disable(b_ptr, false);
res = 0;
}
- write_unlock_bh(&tipc_net_lock);
return res;
}
+int tipc_enable_l2_media(struct tipc_bearer *b)
+{
+ struct net_device *dev;
+ char *driver_name = strchr((const char *)b->name, ':') + 1;
+
+ /* Find device with specified name */
+ dev = dev_get_by_name(&init_net, driver_name);
+ if (!dev)
+ return -ENODEV;
+
+ /* Associate TIPC bearer with L2 bearer */
+ rcu_assign_pointer(b->media_ptr, dev);
+ memset(&b->bcast_addr, 0, sizeof(b->bcast_addr));
+ memcpy(b->bcast_addr.value, dev->broadcast, b->media->hwaddr_len);
+ b->bcast_addr.media_id = b->media->type_id;
+ b->bcast_addr.broadcast = 1;
+ b->mtu = dev->mtu;
+ b->media->raw2addr(b, &b->addr, (char *)dev->dev_addr);
+ rcu_assign_pointer(dev->tipc_ptr, b);
+ return 0;
+}
+
+/* tipc_disable_l2_media - detach TIPC bearer from an L2 interface
+ *
+ * Mark L2 bearer as inactive so that incoming buffers are thrown away,
+ * then get worker thread to complete bearer cleanup. (Can't do cleanup
+ * here because cleanup code needs to sleep and caller holds spinlocks.)
+ */
+void tipc_disable_l2_media(struct tipc_bearer *b)
+{
+ struct net_device *dev;
+
+ dev = (struct net_device *)rtnl_dereference(b->media_ptr);
+ RCU_INIT_POINTER(b->media_ptr, NULL);
+ RCU_INIT_POINTER(dev->tipc_ptr, NULL);
+ synchronize_net();
+ dev_put(dev);
+}
+
+/**
+ * tipc_l2_send_msg - send a TIPC packet out over an L2 interface
+ * @buf: the packet to be sent
+ * @b_ptr: the bearer through which the packet is to be sent
+ * @dest: peer destination address
+ */
+int tipc_l2_send_msg(struct sk_buff *buf, struct tipc_bearer *b,
+ struct tipc_media_addr *dest)
+{
+ struct sk_buff *clone;
+ struct net_device *dev;
+ int delta;
+ dev = (struct net_device *)rcu_dereference_rtnl(b->media_ptr);
+ if (!dev)
+ return 0;
+
+ clone = skb_clone(buf, GFP_ATOMIC);
+ if (!clone)
+ return 0;
+
+ delta = dev->hard_header_len - skb_headroom(buf);
+ if ((delta > 0) &&
+ pskb_expand_head(clone, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) {
+ kfree_skb(clone);
+ return 0;
+ }
+
+ skb_reset_network_header(clone);
+ clone->dev = dev;
+ clone->protocol = htons(ETH_P_TIPC);
+ dev_hard_header(clone, dev, ETH_P_TIPC, dest->value,
+ dev->dev_addr, clone->len);
+ dev_queue_xmit(clone);
+ return 0;
+}
+
+/* tipc_bearer_send- sends buffer to destination over bearer
+ *
+ * IMPORTANT:
+ * The media send routine must not alter the buffer being passed in
+ * as it may be needed for later retransmission!
+ */
+void tipc_bearer_send(u32 bearer_id, struct sk_buff *buf,
+ struct tipc_media_addr *dest)
+{
+ struct tipc_bearer *b_ptr;
+
+ rcu_read_lock();
+ b_ptr = rcu_dereference_rtnl(bearer_list[bearer_id]);
+ if (likely(b_ptr))
+ b_ptr->media->send_msg(buf, b_ptr, dest);
+ rcu_read_unlock();
+}
+
+/**
+ * tipc_l2_rcv_msg - handle incoming TIPC message from an interface
+ * @buf: the received packet
+ * @dev: the net device that the packet was received on
+ * @pt: the packet_type structure which was used to register this handler
+ * @orig_dev: the original receive net device in case the device is a bond
+ *
+ * Accept only packets explicitly sent to this node, or broadcast packets;
+ * ignores packets sent using interface multicast, and traffic sent to other
+ * nodes (which can happen if interface is running in promiscuous mode).
+ */
+static int tipc_l2_rcv_msg(struct sk_buff *buf, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev)
+{
+ struct tipc_bearer *b_ptr;
+
+ if (!net_eq(dev_net(dev), &init_net)) {
+ kfree_skb(buf);
+ return NET_RX_DROP;
+ }
+
+ rcu_read_lock();
+ b_ptr = rcu_dereference_rtnl(dev->tipc_ptr);
+ if (likely(b_ptr)) {
+ if (likely(buf->pkt_type <= PACKET_BROADCAST)) {
+ buf->next = NULL;
+ tipc_rcv(buf, b_ptr);
+ rcu_read_unlock();
+ return NET_RX_SUCCESS;
+ }
+ }
+ rcu_read_unlock();
+
+ kfree_skb(buf);
+ return NET_RX_DROP;
+}
+
+/**
+ * tipc_l2_device_event - handle device events from network device
+ * @nb: the context of the notification
+ * @evt: the type of event
+ * @ptr: the net device that the event was on
+ *
+ * This function is called by the Ethernet driver in case of link
+ * change event.
+ */
+static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
+ void *ptr)
+{
+ struct tipc_bearer *b_ptr;
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+ if (!net_eq(dev_net(dev), &init_net))
+ return NOTIFY_DONE;
+
+ b_ptr = rtnl_dereference(dev->tipc_ptr);
+ if (!b_ptr)
+ return NOTIFY_DONE;
+
+ b_ptr->mtu = dev->mtu;
+
+ switch (evt) {
+ case NETDEV_CHANGE:
+ if (netif_carrier_ok(dev))
+ break;
+ case NETDEV_DOWN:
+ case NETDEV_CHANGEMTU:
+ tipc_reset_bearer(b_ptr);
+ break;
+ case NETDEV_CHANGEADDR:
+ b_ptr->media->raw2addr(b_ptr, &b_ptr->addr,
+ (char *)dev->dev_addr);
+ tipc_reset_bearer(b_ptr);
+ break;
+ case NETDEV_UNREGISTER:
+ case NETDEV_CHANGENAME:
+ bearer_disable(b_ptr, false);
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct packet_type tipc_packet_type __read_mostly = {
+ .type = htons(ETH_P_TIPC),
+ .func = tipc_l2_rcv_msg,
+};
+
+static struct notifier_block notifier = {
+ .notifier_call = tipc_l2_device_event,
+ .priority = 0,
+};
+
+int tipc_bearer_setup(void)
+{
+ int err;
+
+ err = register_netdevice_notifier(&notifier);
+ if (err)
+ return err;
+ dev_add_pack(&tipc_packet_type);
+ return 0;
+}
+
+void tipc_bearer_cleanup(void)
+{
+ unregister_netdevice_notifier(&notifier);
+ dev_remove_pack(&tipc_packet_type);
+}
void tipc_bearer_stop(void)
{
+ struct tipc_bearer *b_ptr;
u32 i;
for (i = 0; i < MAX_BEARERS; i++) {
- if (tipc_bearers[i].active)
- bearer_disable(&tipc_bearers[i]);
+ b_ptr = rtnl_dereference(bearer_list[i]);
+ if (b_ptr) {
+ bearer_disable(b_ptr, true);
+ bearer_list[i] = NULL;
+ }
}
- media_count = 0;
}
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 85f451d5aac..78fccc49de2 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -1,8 +1,8 @@
/*
* net/tipc/bearer.h: Include file for TIPC bearer code
*
- * Copyright (c) 1996-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 1996-2006, 2013, Ericsson AB
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -39,185 +39,161 @@
#include "bcast.h"
-#define MAX_BEARERS 8
-#define MAX_MEDIA 4
+#define MAX_BEARERS 2
+#define MAX_MEDIA 2
+
+/* Identifiers associated with TIPC message header media address info
+ * - address info field is 32 bytes long
+ * - the field's actual content and length is defined per media
+ * - remaining unused bytes in the field are set to zero
+ */
+#define TIPC_MEDIA_ADDR_SIZE 32
+#define TIPC_MEDIA_TYPE_OFFSET 3
/*
* Identifiers of supported TIPC media types
*/
#define TIPC_MEDIA_TYPE_ETH 1
+#define TIPC_MEDIA_TYPE_IB 2
-/*
- * Destination address structure used by TIPC bearers when sending messages
- *
- * IMPORTANT: The fields of this structure MUST be stored using the specified
- * byte order indicated below, as the structure is exchanged between nodes
- * as part of a link setup process.
+/**
+ * struct tipc_media_addr - destination address used by TIPC bearers
+ * @value: address info (format defined by media)
+ * @media_id: TIPC media type identifier
+ * @broadcast: non-zero if address is a broadcast address
*/
struct tipc_media_addr {
- __be32 type; /* bearer type (network byte order) */
- union {
- __u8 eth_addr[6]; /* 48 bit Ethernet addr (byte array) */
- } dev_addr;
+ u8 value[TIPC_MEDIA_ADDR_SIZE];
+ u8 media_id;
+ u8 broadcast;
};
-/**
- * struct tipc_bearer - TIPC bearer info available to media code
- * @usr_handle: pointer to additional media-specific information about bearer
- * @mtu: max packet size bearer can support
- * @blocked: non-zero if bearer is blocked
- * @lock: spinlock for controlling access to bearer
- * @addr: media-specific address associated with bearer
- * @name: bearer name (format = media:interface)
- *
- * Note: TIPC initializes "name" and "lock" fields; media code is responsible
- * for initialization all other fields when a bearer is enabled.
- */
-struct tipc_bearer {
- void *usr_handle;
- u32 mtu;
- int blocked;
- spinlock_t lock;
- struct tipc_media_addr addr;
- char name[TIPC_MAX_BEARER_NAME];
-};
+struct tipc_bearer;
/**
- * struct media - TIPC media information available to internal users
+ * struct tipc_media - Media specific info exposed to generic bearer layer
* @send_msg: routine which handles buffer transmission
- * @enable_bearer: routine which enables a bearer
- * @disable_bearer: routine which disables a bearer
- * @addr2str: routine which converts bearer's address to string form
- * @bcast_addr: media address used in broadcasting
- * @bcast: non-zero if media supports broadcasting [currently mandatory]
+ * @enable_media: routine which enables a media
+ * @disable_media: routine which disables a media
+ * @addr2str: convert media address format to string
+ * @addr2msg: convert from media addr format to discovery msg addr format
+ * @msg2addr: convert from discovery msg addr format to media addr format
+ * @raw2addr: convert from raw addr format to media addr format
* @priority: default link (and bearer) priority
* @tolerance: default time (in ms) before declaring link failure
* @window: default window (in packets) before declaring link congestion
* @type_id: TIPC media identifier
+ * @hwaddr_len: TIPC media address len
* @name: media name
*/
-
-struct media {
+struct tipc_media {
int (*send_msg)(struct sk_buff *buf,
struct tipc_bearer *b_ptr,
struct tipc_media_addr *dest);
- int (*enable_bearer)(struct tipc_bearer *b_ptr);
- void (*disable_bearer)(struct tipc_bearer *b_ptr);
- char *(*addr2str)(struct tipc_media_addr *a,
- char *str_buf, int str_size);
- struct tipc_media_addr bcast_addr;
- int bcast;
+ int (*enable_media)(struct tipc_bearer *b_ptr);
+ void (*disable_media)(struct tipc_bearer *b_ptr);
+ int (*addr2str)(struct tipc_media_addr *addr,
+ char *strbuf,
+ int bufsz);
+ int (*addr2msg)(char *msg, struct tipc_media_addr *addr);
+ int (*msg2addr)(struct tipc_bearer *b,
+ struct tipc_media_addr *addr,
+ char *msg);
+ int (*raw2addr)(struct tipc_bearer *b,
+ struct tipc_media_addr *addr,
+ char *raw);
u32 priority;
u32 tolerance;
u32 window;
u32 type_id;
+ u32 hwaddr_len;
char name[TIPC_MAX_MEDIA_NAME];
};
/**
- * struct bearer - TIPC bearer information available to internal users
- * @publ: bearer information available to privileged users
+ * struct tipc_bearer - Generic TIPC bearer structure
+ * @media_ptr: pointer to additional media-specific information about bearer
+ * @mtu: max packet size bearer can support
+ * @addr: media-specific address associated with bearer
+ * @name: bearer name (format = media:interface)
* @media: ptr to media structure associated with bearer
+ * @bcast_addr: media address used in broadcasting
+ * @rcu: rcu struct for tipc_bearer
* @priority: default link priority for bearer
- * @detect_scope: network address mask used during automatic link creation
+ * @window: default window size for bearer
+ * @tolerance: default link tolerance for bearer
+ * @domain: network domain to which links can be established
* @identity: array index of this bearer within TIPC bearer array
* @link_req: ptr to (optional) structure making periodic link setup requests
- * @links: list of non-congested links associated with bearer
- * @cong_links: list of congested links associated with bearer
- * @continue_count: # of times bearer has resumed after congestion or blocking
- * @active: non-zero if bearer structure is represents a bearer
* @net_plane: network plane ('A' through 'H') currently associated with bearer
* @nodes: indicates which nodes in cluster can be reached through bearer
+ *
+ * Note: media-specific code is responsible for initialization of the fields
+ * indicated below when a bearer is enabled; TIPC's generic bearer code takes
+ * care of initializing all other fields.
*/
-
-struct bearer {
- struct tipc_bearer publ;
- struct media *media;
+struct tipc_bearer {
+ void __rcu *media_ptr; /* initalized by media */
+ u32 mtu; /* initalized by media */
+ struct tipc_media_addr addr; /* initalized by media */
+ char name[TIPC_MAX_BEARER_NAME];
+ struct tipc_media *media;
+ struct tipc_media_addr bcast_addr;
+ struct rcu_head rcu;
u32 priority;
- u32 detect_scope;
+ u32 window;
+ u32 tolerance;
+ u32 domain;
u32 identity;
- struct link_req *link_req;
- struct list_head links;
- struct list_head cong_links;
- u32 continue_count;
- int active;
+ struct tipc_link_req *link_req;
char net_plane;
struct tipc_node_map nodes;
};
-struct bearer_name {
+struct tipc_bearer_names {
char media_name[TIPC_MAX_MEDIA_NAME];
char if_name[TIPC_MAX_IF_NAME];
};
-struct link;
+struct tipc_link;
-extern struct bearer tipc_bearers[];
+extern struct tipc_bearer __rcu *bearer_list[];
/*
* TIPC routines available to supported media types
*/
-int tipc_register_media(u32 media_type,
- char *media_name, int (*enable)(struct tipc_bearer *),
- void (*disable)(struct tipc_bearer *),
- int (*send_msg)(struct sk_buff *,
- struct tipc_bearer *, struct tipc_media_addr *),
- char *(*addr2str)(struct tipc_media_addr *a,
- char *str_buf, int str_size),
- struct tipc_media_addr *bcast_addr, const u32 bearer_priority,
- const u32 link_tolerance, /* [ms] */
- const u32 send_window_limit);
-
-void tipc_recv_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr);
-
-int tipc_block_bearer(const char *name);
-void tipc_continue(struct tipc_bearer *tb_ptr);
-
-int tipc_enable_bearer(const char *bearer_name, u32 bcast_scope, u32 priority);
+
+void tipc_rcv(struct sk_buff *buf, struct tipc_bearer *tb_ptr);
+int tipc_enable_bearer(const char *bearer_name, u32 disc_domain, u32 priority);
int tipc_disable_bearer(const char *name);
/*
* Routines made available to TIPC by supported media types
*/
-int tipc_eth_media_start(void);
-void tipc_eth_media_stop(void);
+extern struct tipc_media eth_media_info;
-void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a);
+#ifdef CONFIG_TIPC_MEDIA_IB
+extern struct tipc_media ib_media_info;
+#endif
+
+int tipc_media_set_priority(const char *name, u32 new_value);
+int tipc_media_set_window(const char *name, u32 new_value);
+void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a);
struct sk_buff *tipc_media_get_names(void);
+int tipc_enable_l2_media(struct tipc_bearer *b);
+void tipc_disable_l2_media(struct tipc_bearer *b);
+int tipc_l2_send_msg(struct sk_buff *buf, struct tipc_bearer *b,
+ struct tipc_media_addr *dest);
struct sk_buff *tipc_bearer_get_names(void);
-void tipc_bearer_add_dest(struct bearer *b_ptr, u32 dest);
-void tipc_bearer_remove_dest(struct bearer *b_ptr, u32 dest);
-void tipc_bearer_schedule(struct bearer *b_ptr, struct link *l_ptr);
-struct bearer *tipc_bearer_find_interface(const char *if_name);
-int tipc_bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr);
-int tipc_bearer_congested(struct bearer *b_ptr, struct link *l_ptr);
+void tipc_bearer_add_dest(u32 bearer_id, u32 dest);
+void tipc_bearer_remove_dest(u32 bearer_id, u32 dest);
+struct tipc_bearer *tipc_bearer_find(const char *name);
+struct tipc_media *tipc_media_find(const char *name);
+int tipc_bearer_setup(void);
+void tipc_bearer_cleanup(void);
void tipc_bearer_stop(void);
-void tipc_bearer_lock_push(struct bearer *b_ptr);
-
-
-/**
- * tipc_bearer_send- sends buffer to destination over bearer
- *
- * Returns true (1) if successful, or false (0) if unable to send
- *
- * IMPORTANT:
- * The media send routine must not alter the buffer being passed in
- * as it may be needed for later retransmission!
- *
- * If the media send routine returns a non-zero value (indicating that
- * it was unable to send the buffer), it must:
- * 1) mark the bearer as blocked,
- * 2) call tipc_continue() once the bearer is able to send again.
- * Media types that are unable to meet these two critera must ensure their
- * send routine always returns success -- even if the buffer was not sent --
- * and let TIPC's link code deal with the undelivered message.
- */
-
-static inline int tipc_bearer_send(struct bearer *b_ptr, struct sk_buff *buf,
- struct tipc_media_addr *dest)
-{
- return !b_ptr->media->send_msg(buf, &b_ptr->publ, dest);
-}
+void tipc_bearer_send(u32 bearer_id, struct sk_buff *buf,
+ struct tipc_media_addr *dest);
#endif /* _TIPC_BEARER_H */
diff --git a/net/tipc/cluster.c b/net/tipc/cluster.c
deleted file mode 100644
index 405be87157b..00000000000
--- a/net/tipc/cluster.c
+++ /dev/null
@@ -1,550 +0,0 @@
-/*
- * net/tipc/cluster.c: TIPC cluster management routines
- *
- * Copyright (c) 2000-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "core.h"
-#include "cluster.h"
-#include "link.h"
-
-static void tipc_cltr_multicast(struct cluster *c_ptr, struct sk_buff *buf,
- u32 lower, u32 upper);
-
-struct tipc_node **tipc_local_nodes = NULL;
-struct tipc_node_map tipc_cltr_bcast_nodes = {0,{0,}};
-u32 tipc_highest_allowed_slave = 0;
-
-struct cluster *tipc_cltr_create(u32 addr)
-{
- struct _zone *z_ptr;
- struct cluster *c_ptr;
- int max_nodes;
-
- c_ptr = kzalloc(sizeof(*c_ptr), GFP_ATOMIC);
- if (c_ptr == NULL) {
- warn("Cluster creation failure, no memory\n");
- return NULL;
- }
-
- c_ptr->addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0);
- if (in_own_cluster(addr))
- max_nodes = LOWEST_SLAVE + tipc_max_slaves;
- else
- max_nodes = tipc_max_nodes + 1;
-
- c_ptr->nodes = kcalloc(max_nodes + 1, sizeof(void*), GFP_ATOMIC);
- if (c_ptr->nodes == NULL) {
- warn("Cluster creation failure, no memory for node area\n");
- kfree(c_ptr);
- return NULL;
- }
-
- if (in_own_cluster(addr))
- tipc_local_nodes = c_ptr->nodes;
- c_ptr->highest_slave = LOWEST_SLAVE - 1;
- c_ptr->highest_node = 0;
-
- z_ptr = tipc_zone_find(tipc_zone(addr));
- if (!z_ptr) {
- z_ptr = tipc_zone_create(addr);
- }
- if (!z_ptr) {
- kfree(c_ptr->nodes);
- kfree(c_ptr);
- return NULL;
- }
-
- tipc_zone_attach_cluster(z_ptr, c_ptr);
- c_ptr->owner = z_ptr;
- return c_ptr;
-}
-
-void tipc_cltr_delete(struct cluster *c_ptr)
-{
- u32 n_num;
-
- if (!c_ptr)
- return;
- for (n_num = 1; n_num <= c_ptr->highest_node; n_num++) {
- tipc_node_delete(c_ptr->nodes[n_num]);
- }
- for (n_num = LOWEST_SLAVE; n_num <= c_ptr->highest_slave; n_num++) {
- tipc_node_delete(c_ptr->nodes[n_num]);
- }
- kfree(c_ptr->nodes);
- kfree(c_ptr);
-}
-
-
-void tipc_cltr_attach_node(struct cluster *c_ptr, struct tipc_node *n_ptr)
-{
- u32 n_num = tipc_node(n_ptr->addr);
- u32 max_n_num = tipc_max_nodes;
-
- if (in_own_cluster(n_ptr->addr))
- max_n_num = tipc_highest_allowed_slave;
- assert(n_num > 0);
- assert(n_num <= max_n_num);
- assert(c_ptr->nodes[n_num] == NULL);
- c_ptr->nodes[n_num] = n_ptr;
- if (n_num > c_ptr->highest_node)
- c_ptr->highest_node = n_num;
-}
-
-/**
- * tipc_cltr_select_router - select router to a cluster
- *
- * Uses deterministic and fair algorithm.
- */
-
-u32 tipc_cltr_select_router(struct cluster *c_ptr, u32 ref)
-{
- u32 n_num;
- u32 ulim = c_ptr->highest_node;
- u32 mask;
- u32 tstart;
-
- assert(!in_own_cluster(c_ptr->addr));
- if (!ulim)
- return 0;
-
- /* Start entry must be random */
- mask = tipc_max_nodes;
- while (mask > ulim)
- mask >>= 1;
- tstart = ref & mask;
- n_num = tstart;
-
- /* Lookup upwards with wrap-around */
- do {
- if (tipc_node_is_up(c_ptr->nodes[n_num]))
- break;
- } while (++n_num <= ulim);
- if (n_num > ulim) {
- n_num = 1;
- do {
- if (tipc_node_is_up(c_ptr->nodes[n_num]))
- break;
- } while (++n_num < tstart);
- if (n_num == tstart)
- return 0;
- }
- assert(n_num <= ulim);
- return tipc_node_select_router(c_ptr->nodes[n_num], ref);
-}
-
-/**
- * tipc_cltr_select_node - select destination node within a remote cluster
- *
- * Uses deterministic and fair algorithm.
- */
-
-struct tipc_node *tipc_cltr_select_node(struct cluster *c_ptr, u32 selector)
-{
- u32 n_num;
- u32 mask = tipc_max_nodes;
- u32 start_entry;
-
- assert(!in_own_cluster(c_ptr->addr));
- if (!c_ptr->highest_node)
- return NULL;
-
- /* Start entry must be random */
- while (mask > c_ptr->highest_node) {
- mask >>= 1;
- }
- start_entry = (selector & mask) ? selector & mask : 1u;
- assert(start_entry <= c_ptr->highest_node);
-
- /* Lookup upwards with wrap-around */
- for (n_num = start_entry; n_num <= c_ptr->highest_node; n_num++) {
- if (tipc_node_has_active_links(c_ptr->nodes[n_num]))
- return c_ptr->nodes[n_num];
- }
- for (n_num = 1; n_num < start_entry; n_num++) {
- if (tipc_node_has_active_links(c_ptr->nodes[n_num]))
- return c_ptr->nodes[n_num];
- }
- return NULL;
-}
-
-/*
- * Routing table management: See description in node.c
- */
-
-static struct sk_buff *tipc_cltr_prepare_routing_msg(u32 data_size, u32 dest)
-{
- u32 size = INT_H_SIZE + data_size;
- struct sk_buff *buf = tipc_buf_acquire(size);
- struct tipc_msg *msg;
-
- if (buf) {
- msg = buf_msg(buf);
- memset((char *)msg, 0, size);
- tipc_msg_init(msg, ROUTE_DISTRIBUTOR, 0, INT_H_SIZE, dest);
- }
- return buf;
-}
-
-void tipc_cltr_bcast_new_route(struct cluster *c_ptr, u32 dest,
- u32 lower, u32 upper)
-{
- struct sk_buff *buf = tipc_cltr_prepare_routing_msg(0, c_ptr->addr);
- struct tipc_msg *msg;
-
- if (buf) {
- msg = buf_msg(buf);
- msg_set_remote_node(msg, dest);
- msg_set_type(msg, ROUTE_ADDITION);
- tipc_cltr_multicast(c_ptr, buf, lower, upper);
- } else {
- warn("Memory squeeze: broadcast of new route failed\n");
- }
-}
-
-void tipc_cltr_bcast_lost_route(struct cluster *c_ptr, u32 dest,
- u32 lower, u32 upper)
-{
- struct sk_buff *buf = tipc_cltr_prepare_routing_msg(0, c_ptr->addr);
- struct tipc_msg *msg;
-
- if (buf) {
- msg = buf_msg(buf);
- msg_set_remote_node(msg, dest);
- msg_set_type(msg, ROUTE_REMOVAL);
- tipc_cltr_multicast(c_ptr, buf, lower, upper);
- } else {
- warn("Memory squeeze: broadcast of lost route failed\n");
- }
-}
-
-void tipc_cltr_send_slave_routes(struct cluster *c_ptr, u32 dest)
-{
- struct sk_buff *buf;
- struct tipc_msg *msg;
- u32 highest = c_ptr->highest_slave;
- u32 n_num;
- int send = 0;
-
- assert(!is_slave(dest));
- assert(in_own_cluster(dest));
- assert(in_own_cluster(c_ptr->addr));
- if (highest <= LOWEST_SLAVE)
- return;
- buf = tipc_cltr_prepare_routing_msg(highest - LOWEST_SLAVE + 1,
- c_ptr->addr);
- if (buf) {
- msg = buf_msg(buf);
- msg_set_remote_node(msg, c_ptr->addr);
- msg_set_type(msg, SLAVE_ROUTING_TABLE);
- for (n_num = LOWEST_SLAVE; n_num <= highest; n_num++) {
- if (c_ptr->nodes[n_num] &&
- tipc_node_has_active_links(c_ptr->nodes[n_num])) {
- send = 1;
- msg_set_dataoctet(msg, n_num);
- }
- }
- if (send)
- tipc_link_send(buf, dest, dest);
- else
- buf_discard(buf);
- } else {
- warn("Memory squeeze: broadcast of lost route failed\n");
- }
-}
-
-void tipc_cltr_send_ext_routes(struct cluster *c_ptr, u32 dest)
-{
- struct sk_buff *buf;
- struct tipc_msg *msg;
- u32 highest = c_ptr->highest_node;
- u32 n_num;
- int send = 0;
-
- if (in_own_cluster(c_ptr->addr))
- return;
- assert(!is_slave(dest));
- assert(in_own_cluster(dest));
- highest = c_ptr->highest_node;
- buf = tipc_cltr_prepare_routing_msg(highest + 1, c_ptr->addr);
- if (buf) {
- msg = buf_msg(buf);
- msg_set_remote_node(msg, c_ptr->addr);
- msg_set_type(msg, EXT_ROUTING_TABLE);
- for (n_num = 1; n_num <= highest; n_num++) {
- if (c_ptr->nodes[n_num] &&
- tipc_node_has_active_links(c_ptr->nodes[n_num])) {
- send = 1;
- msg_set_dataoctet(msg, n_num);
- }
- }
- if (send)
- tipc_link_send(buf, dest, dest);
- else
- buf_discard(buf);
- } else {
- warn("Memory squeeze: broadcast of external route failed\n");
- }
-}
-
-void tipc_cltr_send_local_routes(struct cluster *c_ptr, u32 dest)
-{
- struct sk_buff *buf;
- struct tipc_msg *msg;
- u32 highest = c_ptr->highest_node;
- u32 n_num;
- int send = 0;
-
- assert(is_slave(dest));
- assert(in_own_cluster(c_ptr->addr));
- buf = tipc_cltr_prepare_routing_msg(highest, c_ptr->addr);
- if (buf) {
- msg = buf_msg(buf);
- msg_set_remote_node(msg, c_ptr->addr);
- msg_set_type(msg, LOCAL_ROUTING_TABLE);
- for (n_num = 1; n_num <= highest; n_num++) {
- if (c_ptr->nodes[n_num] &&
- tipc_node_has_active_links(c_ptr->nodes[n_num])) {
- send = 1;
- msg_set_dataoctet(msg, n_num);
- }
- }
- if (send)
- tipc_link_send(buf, dest, dest);
- else
- buf_discard(buf);
- } else {
- warn("Memory squeeze: broadcast of local route failed\n");
- }
-}
-
-void tipc_cltr_recv_routing_table(struct sk_buff *buf)
-{
- struct tipc_msg *msg = buf_msg(buf);
- struct cluster *c_ptr;
- struct tipc_node *n_ptr;
- unchar *node_table;
- u32 table_size;
- u32 router;
- u32 rem_node = msg_remote_node(msg);
- u32 z_num;
- u32 c_num;
- u32 n_num;
-
- c_ptr = tipc_cltr_find(rem_node);
- if (!c_ptr) {
- c_ptr = tipc_cltr_create(rem_node);
- if (!c_ptr) {
- buf_discard(buf);
- return;
- }
- }
-
- node_table = buf->data + msg_hdr_sz(msg);
- table_size = msg_size(msg) - msg_hdr_sz(msg);
- router = msg_prevnode(msg);
- z_num = tipc_zone(rem_node);
- c_num = tipc_cluster(rem_node);
-
- switch (msg_type(msg)) {
- case LOCAL_ROUTING_TABLE:
- assert(is_slave(tipc_own_addr));
- case EXT_ROUTING_TABLE:
- for (n_num = 1; n_num < table_size; n_num++) {
- if (node_table[n_num]) {
- u32 addr = tipc_addr(z_num, c_num, n_num);
- n_ptr = c_ptr->nodes[n_num];
- if (!n_ptr) {
- n_ptr = tipc_node_create(addr);
- }
- if (n_ptr)
- tipc_node_add_router(n_ptr, router);
- }
- }
- break;
- case SLAVE_ROUTING_TABLE:
- assert(!is_slave(tipc_own_addr));
- assert(in_own_cluster(c_ptr->addr));
- for (n_num = 1; n_num < table_size; n_num++) {
- if (node_table[n_num]) {
- u32 slave_num = n_num + LOWEST_SLAVE;
- u32 addr = tipc_addr(z_num, c_num, slave_num);
- n_ptr = c_ptr->nodes[slave_num];
- if (!n_ptr) {
- n_ptr = tipc_node_create(addr);
- }
- if (n_ptr)
- tipc_node_add_router(n_ptr, router);
- }
- }
- break;
- case ROUTE_ADDITION:
- if (!is_slave(tipc_own_addr)) {
- assert(!in_own_cluster(c_ptr->addr) ||
- is_slave(rem_node));
- } else {
- assert(in_own_cluster(c_ptr->addr) &&
- !is_slave(rem_node));
- }
- n_ptr = c_ptr->nodes[tipc_node(rem_node)];
- if (!n_ptr)
- n_ptr = tipc_node_create(rem_node);
- if (n_ptr)
- tipc_node_add_router(n_ptr, router);
- break;
- case ROUTE_REMOVAL:
- if (!is_slave(tipc_own_addr)) {
- assert(!in_own_cluster(c_ptr->addr) ||
- is_slave(rem_node));
- } else {
- assert(in_own_cluster(c_ptr->addr) &&
- !is_slave(rem_node));
- }
- n_ptr = c_ptr->nodes[tipc_node(rem_node)];
- if (n_ptr)
- tipc_node_remove_router(n_ptr, router);
- break;
- default:
- assert(!"Illegal routing manager message received\n");
- }
- buf_discard(buf);
-}
-
-void tipc_cltr_remove_as_router(struct cluster *c_ptr, u32 router)
-{
- u32 start_entry;
- u32 tstop;
- u32 n_num;
-
- if (is_slave(router))
- return; /* Slave nodes can not be routers */
-
- if (in_own_cluster(c_ptr->addr)) {
- start_entry = LOWEST_SLAVE;
- tstop = c_ptr->highest_slave;
- } else {
- start_entry = 1;
- tstop = c_ptr->highest_node;
- }
-
- for (n_num = start_entry; n_num <= tstop; n_num++) {
- if (c_ptr->nodes[n_num]) {
- tipc_node_remove_router(c_ptr->nodes[n_num], router);
- }
- }
-}
-
-/**
- * tipc_cltr_multicast - multicast message to local nodes
- */
-
-static void tipc_cltr_multicast(struct cluster *c_ptr, struct sk_buff *buf,
- u32 lower, u32 upper)
-{
- struct sk_buff *buf_copy;
- struct tipc_node *n_ptr;
- u32 n_num;
- u32 tstop;
-
- assert(lower <= upper);
- assert(((lower >= 1) && (lower <= tipc_max_nodes)) ||
- ((lower >= LOWEST_SLAVE) && (lower <= tipc_highest_allowed_slave)));
- assert(((upper >= 1) && (upper <= tipc_max_nodes)) ||
- ((upper >= LOWEST_SLAVE) && (upper <= tipc_highest_allowed_slave)));
- assert(in_own_cluster(c_ptr->addr));
-
- tstop = is_slave(upper) ? c_ptr->highest_slave : c_ptr->highest_node;
- if (tstop > upper)
- tstop = upper;
- for (n_num = lower; n_num <= tstop; n_num++) {
- n_ptr = c_ptr->nodes[n_num];
- if (n_ptr && tipc_node_has_active_links(n_ptr)) {
- buf_copy = skb_copy(buf, GFP_ATOMIC);
- if (buf_copy == NULL)
- break;
- msg_set_destnode(buf_msg(buf_copy), n_ptr->addr);
- tipc_link_send(buf_copy, n_ptr->addr, n_ptr->addr);
- }
- }
- buf_discard(buf);
-}
-
-/**
- * tipc_cltr_broadcast - broadcast message to all nodes within cluster
- */
-
-void tipc_cltr_broadcast(struct sk_buff *buf)
-{
- struct sk_buff *buf_copy;
- struct cluster *c_ptr;
- struct tipc_node *n_ptr;
- u32 n_num;
- u32 tstart;
- u32 tstop;
- u32 node_type;
-
- if (tipc_mode == TIPC_NET_MODE) {
- c_ptr = tipc_cltr_find(tipc_own_addr);
- assert(in_own_cluster(c_ptr->addr)); /* For now */
-
- /* Send to standard nodes, then repeat loop sending to slaves */
- tstart = 1;
- tstop = c_ptr->highest_node;
- for (node_type = 1; node_type <= 2; node_type++) {
- for (n_num = tstart; n_num <= tstop; n_num++) {
- n_ptr = c_ptr->nodes[n_num];
- if (n_ptr && tipc_node_has_active_links(n_ptr)) {
- buf_copy = skb_copy(buf, GFP_ATOMIC);
- if (buf_copy == NULL)
- goto exit;
- msg_set_destnode(buf_msg(buf_copy),
- n_ptr->addr);
- tipc_link_send(buf_copy, n_ptr->addr,
- n_ptr->addr);
- }
- }
- tstart = LOWEST_SLAVE;
- tstop = c_ptr->highest_slave;
- }
- }
-exit:
- buf_discard(buf);
-}
-
-int tipc_cltr_init(void)
-{
- tipc_highest_allowed_slave = LOWEST_SLAVE + tipc_max_slaves;
- return tipc_cltr_create(tipc_own_addr) ? 0 : -ENOMEM;
-}
-
diff --git a/net/tipc/cluster.h b/net/tipc/cluster.h
deleted file mode 100644
index 32636d98c9c..00000000000
--- a/net/tipc/cluster.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * net/tipc/cluster.h: Include file for TIPC cluster management routines
- *
- * Copyright (c) 2000-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _TIPC_CLUSTER_H
-#define _TIPC_CLUSTER_H
-
-#include "addr.h"
-#include "zone.h"
-
-#define LOWEST_SLAVE 2048u
-
-/**
- * struct cluster - TIPC cluster structure
- * @addr: network address of cluster
- * @owner: pointer to zone that cluster belongs to
- * @nodes: array of pointers to all nodes within cluster
- * @highest_node: id of highest numbered node within cluster
- * @highest_slave: (used for secondary node support)
- */
-
-struct cluster {
- u32 addr;
- struct _zone *owner;
- struct tipc_node **nodes;
- u32 highest_node;
- u32 highest_slave;
-};
-
-
-extern struct tipc_node **tipc_local_nodes;
-extern u32 tipc_highest_allowed_slave;
-extern struct tipc_node_map tipc_cltr_bcast_nodes;
-
-void tipc_cltr_remove_as_router(struct cluster *c_ptr, u32 router);
-void tipc_cltr_send_ext_routes(struct cluster *c_ptr, u32 dest);
-struct tipc_node *tipc_cltr_select_node(struct cluster *c_ptr, u32 selector);
-u32 tipc_cltr_select_router(struct cluster *c_ptr, u32 ref);
-void tipc_cltr_recv_routing_table(struct sk_buff *buf);
-struct cluster *tipc_cltr_create(u32 addr);
-void tipc_cltr_delete(struct cluster *c_ptr);
-void tipc_cltr_attach_node(struct cluster *c_ptr, struct tipc_node *n_ptr);
-void tipc_cltr_send_slave_routes(struct cluster *c_ptr, u32 dest);
-void tipc_cltr_broadcast(struct sk_buff *buf);
-int tipc_cltr_init(void);
-
-void tipc_cltr_bcast_new_route(struct cluster *c_ptr, u32 dest, u32 lo, u32 hi);
-void tipc_cltr_send_local_routes(struct cluster *c_ptr, u32 dest);
-void tipc_cltr_bcast_lost_route(struct cluster *c_ptr, u32 dest, u32 lo, u32 hi);
-
-static inline struct cluster *tipc_cltr_find(u32 addr)
-{
- struct _zone *z_ptr = tipc_zone_find(addr);
-
- if (z_ptr)
- return z_ptr->clusters[1];
- return NULL;
-}
-
-#endif
diff --git a/net/tipc/config.c b/net/tipc/config.c
index bdde39f0436..2b42403ad33 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -2,7 +2,7 @@
* net/tipc/config.c: TIPC configuration management code
*
* Copyright (c) 2002-2006, Ericsson AB
- * Copyright (c) 2004-2007, Wind River Systems
+ * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -36,25 +36,16 @@
#include "core.h"
#include "port.h"
-#include "link.h"
#include "name_table.h"
-#include "user_reg.h"
#include "config.h"
+#include "server.h"
-struct manager {
- u32 user_ref;
- u32 port_ref;
-};
-
-static struct manager mng = { 0};
-
-static DEFINE_SPINLOCK(config_lock);
+#define REPLY_TRUNCATED "<truncated>\n"
static const void *req_tlv_area; /* request message TLV area */
static int req_tlv_space; /* request message TLV area size */
static int rep_headroom; /* reply message headroom to use */
-
struct sk_buff *tipc_cfg_reply_alloc(int payload_size)
{
struct sk_buff *buf;
@@ -71,10 +62,8 @@ int tipc_cfg_append_tlv(struct sk_buff *buf, int tlv_type,
struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(buf);
int new_tlv_space = TLV_SPACE(tlv_data_size);
- if (skb_tailroom(buf) < new_tlv_space) {
- dbg("tipc_cfg_append_tlv unable to append TLV\n");
+ if (skb_tailroom(buf) < new_tlv_space)
return 0;
- }
skb_put(buf, new_tlv_space);
tlv->tlv_type = htons(tlv_type);
tlv->tlv_len = htons(TLV_LENGTH(tlv_data_size));
@@ -113,13 +102,12 @@ struct sk_buff *tipc_cfg_reply_string_type(u16 tlv_type, char *string)
return buf;
}
-#define MAX_STATS_INFO 2000
-
static struct sk_buff *tipc_show_stats(void)
{
struct sk_buff *buf;
struct tlv_desc *rep_tlv;
- struct print_buf pb;
+ char *pb;
+ int pb_len;
int str_len;
u32 value;
@@ -130,18 +118,16 @@ static struct sk_buff *tipc_show_stats(void)
if (value != 0)
return tipc_cfg_reply_error_string("unsupported argument");
- buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_STATS_INFO));
+ buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
if (buf == NULL)
return NULL;
rep_tlv = (struct tlv_desc *)buf->data;
- tipc_printbuf_init(&pb, (char *)TLV_DATA(rep_tlv), MAX_STATS_INFO);
-
- tipc_printf(&pb, "TIPC version " TIPC_MOD_VER "\n");
+ pb = TLV_DATA(rep_tlv);
+ pb_len = ULTRA_STRING_MAX_LEN;
- /* Use additional tipc_printf()'s to return more info ... */
-
- str_len = tipc_printbuf_validate(&pb);
+ str_len = tipc_snprintf(pb, pb_len, "TIPC version " TIPC_MOD_VER "\n");
+ str_len += 1; /* for "\0" */
skb_put(buf, TLV_SPACE(str_len));
TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
@@ -157,7 +143,7 @@ static struct sk_buff *cfg_enable_bearer(void)
args = (struct tipc_bearer_config *)TLV_DATA(req_tlv_area);
if (tipc_enable_bearer(args->name,
- ntohl(args->detect_scope),
+ ntohl(args->disc_domain),
ntohl(args->priority)))
return tipc_cfg_reply_error_string("unable to enable bearer");
@@ -188,66 +174,13 @@ static struct sk_buff *cfg_set_own_addr(void)
if (!tipc_addr_node_valid(addr))
return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
" (node address)");
- if (tipc_mode == TIPC_NET_MODE)
+ if (tipc_own_addr)
return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
" (cannot change node address once assigned)");
+ if (!tipc_net_start(addr))
+ return tipc_cfg_reply_none();
- /*
- * Must release all spinlocks before calling start_net() because
- * Linux version of TIPC calls eth_media_start() which calls
- * register_netdevice_notifier() which may block!
- *
- * Temporarily releasing the lock should be harmless for non-Linux TIPC,
- * but Linux version of eth_media_start() should really be reworked
- * so that it can be called with spinlocks held.
- */
-
- spin_unlock_bh(&config_lock);
- tipc_core_start_net(addr);
- spin_lock_bh(&config_lock);
- return tipc_cfg_reply_none();
-}
-
-static struct sk_buff *cfg_set_remote_mng(void)
-{
- u32 value;
-
- if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
- return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
-
- value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
- tipc_remote_management = (value != 0);
- return tipc_cfg_reply_none();
-}
-
-static struct sk_buff *cfg_set_max_publications(void)
-{
- u32 value;
-
- if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
- return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
-
- value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
- if (value != delimit(value, 1, 65535))
- return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
- " (max publications must be 1-65535)");
- tipc_max_publications = value;
- return tipc_cfg_reply_none();
-}
-
-static struct sk_buff *cfg_set_max_subscriptions(void)
-{
- u32 value;
-
- if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
- return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
-
- value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
- if (value != delimit(value, 1, 65535))
- return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
- " (max subscriptions must be 1-65535");
- tipc_max_subscriptions = value;
- return tipc_cfg_reply_none();
+ return tipc_cfg_reply_error_string("cannot change to network mode");
}
static struct sk_buff *cfg_set_max_ports(void)
@@ -259,78 +192,11 @@ static struct sk_buff *cfg_set_max_ports(void)
value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
if (value == tipc_max_ports)
return tipc_cfg_reply_none();
- if (value != delimit(value, 127, 65535))
+ if (value < 127 || value > 65535)
return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
" (max ports must be 127-65535)");
- if (tipc_mode != TIPC_NOT_RUNNING)
- return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
- " (cannot change max ports while TIPC is active)");
- tipc_max_ports = value;
- return tipc_cfg_reply_none();
-}
-
-static struct sk_buff *cfg_set_max_zones(void)
-{
- u32 value;
-
- if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
- return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
- value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
- if (value == tipc_max_zones)
- return tipc_cfg_reply_none();
- if (value != delimit(value, 1, 255))
- return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
- " (max zones must be 1-255)");
- if (tipc_mode == TIPC_NET_MODE)
- return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
- " (cannot change max zones once TIPC has joined a network)");
- tipc_max_zones = value;
- return tipc_cfg_reply_none();
-}
-
-static struct sk_buff *cfg_set_max_clusters(void)
-{
- u32 value;
-
- if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
- return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
- value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
- if (value != delimit(value, 1, 1))
- return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
- " (max clusters fixed at 1)");
- return tipc_cfg_reply_none();
-}
-
-static struct sk_buff *cfg_set_max_nodes(void)
-{
- u32 value;
-
- if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
- return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
- value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
- if (value == tipc_max_nodes)
- return tipc_cfg_reply_none();
- if (value != delimit(value, 8, 2047))
- return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
- " (max nodes must be 8-2047)");
- if (tipc_mode == TIPC_NET_MODE)
- return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
- " (cannot change max nodes once TIPC has joined a network)");
- tipc_max_nodes = value;
- return tipc_cfg_reply_none();
-}
-
-static struct sk_buff *cfg_set_max_slaves(void)
-{
- u32 value;
-
- if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
- return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
- value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
- if (value != 0)
- return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
- " (max secondary nodes fixed at 0)");
- return tipc_cfg_reply_none();
+ return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+ " (cannot change max ports while TIPC is active)");
}
static struct sk_buff *cfg_set_netid(void)
@@ -342,10 +208,10 @@ static struct sk_buff *cfg_set_netid(void)
value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
if (value == tipc_net_id)
return tipc_cfg_reply_none();
- if (value != delimit(value, 1, 9999))
+ if (value < 1 || value > 9999)
return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
" (network id must be 1-9999)");
- if (tipc_mode == TIPC_NET_MODE)
+ if (tipc_own_addr)
return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
" (cannot change network id once TIPC has joined a network)");
tipc_net_id = value;
@@ -357,38 +223,23 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
{
struct sk_buff *rep_tlv_buf;
- spin_lock_bh(&config_lock);
+ rtnl_lock();
/* Save request and reply details in a well-known location */
-
req_tlv_area = request_area;
req_tlv_space = request_space;
rep_headroom = reply_headroom;
/* Check command authorization */
-
- if (likely(orig_node == tipc_own_addr)) {
+ if (likely(in_own_node(orig_node))) {
/* command is permitted */
- } else if (cmd >= 0x8000) {
+ } else {
rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
" (cannot be done remotely)");
goto exit;
- } else if (!tipc_remote_management) {
- rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NO_REMOTE);
- goto exit;
- }
- else if (cmd >= 0x4000) {
- u32 domain = 0;
-
- if ((tipc_nametbl_translate(TIPC_ZM_SRV, 0, &domain) == 0) ||
- (domain != orig_node)) {
- rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_ZONE_MSTR);
- goto exit;
- }
}
/* Call appropriate processing routine */
-
switch (cmd) {
case TIPC_CMD_NOOP:
rep_tlv_buf = tipc_cfg_reply_none();
@@ -417,12 +268,6 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
case TIPC_CMD_SHOW_PORTS:
rep_tlv_buf = tipc_port_get_ports();
break;
- case TIPC_CMD_SET_LOG_SIZE:
- rep_tlv_buf = tipc_log_resize_cmd(req_tlv_area, req_tlv_space);
- break;
- case TIPC_CMD_DUMP_LOG:
- rep_tlv_buf = tipc_log_dump();
- break;
case TIPC_CMD_SHOW_STATS:
rep_tlv_buf = tipc_show_stats();
break;
@@ -440,57 +285,15 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
case TIPC_CMD_SET_NODE_ADDR:
rep_tlv_buf = cfg_set_own_addr();
break;
- case TIPC_CMD_SET_REMOTE_MNG:
- rep_tlv_buf = cfg_set_remote_mng();
- break;
case TIPC_CMD_SET_MAX_PORTS:
rep_tlv_buf = cfg_set_max_ports();
break;
- case TIPC_CMD_SET_MAX_PUBL:
- rep_tlv_buf = cfg_set_max_publications();
- break;
- case TIPC_CMD_SET_MAX_SUBSCR:
- rep_tlv_buf = cfg_set_max_subscriptions();
- break;
- case TIPC_CMD_SET_MAX_ZONES:
- rep_tlv_buf = cfg_set_max_zones();
- break;
- case TIPC_CMD_SET_MAX_CLUSTERS:
- rep_tlv_buf = cfg_set_max_clusters();
- break;
- case TIPC_CMD_SET_MAX_NODES:
- rep_tlv_buf = cfg_set_max_nodes();
- break;
- case TIPC_CMD_SET_MAX_SLAVES:
- rep_tlv_buf = cfg_set_max_slaves();
- break;
case TIPC_CMD_SET_NETID:
rep_tlv_buf = cfg_set_netid();
break;
- case TIPC_CMD_GET_REMOTE_MNG:
- rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_remote_management);
- break;
case TIPC_CMD_GET_MAX_PORTS:
rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_ports);
break;
- case TIPC_CMD_GET_MAX_PUBL:
- rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_publications);
- break;
- case TIPC_CMD_GET_MAX_SUBSCR:
- rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_subscriptions);
- break;
- case TIPC_CMD_GET_MAX_ZONES:
- rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_zones);
- break;
- case TIPC_CMD_GET_MAX_CLUSTERS:
- rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_clusters);
- break;
- case TIPC_CMD_GET_MAX_NODES:
- rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_nodes);
- break;
- case TIPC_CMD_GET_MAX_SLAVES:
- rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_slaves);
- break;
case TIPC_CMD_GET_NETID:
rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_net_id);
break;
@@ -498,98 +301,42 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
rep_tlv_buf =
tipc_cfg_reply_error_string(TIPC_CFG_NOT_NET_ADMIN);
break;
+ case TIPC_CMD_SET_MAX_ZONES:
+ case TIPC_CMD_GET_MAX_ZONES:
+ case TIPC_CMD_SET_MAX_SLAVES:
+ case TIPC_CMD_GET_MAX_SLAVES:
+ case TIPC_CMD_SET_MAX_CLUSTERS:
+ case TIPC_CMD_GET_MAX_CLUSTERS:
+ case TIPC_CMD_SET_MAX_NODES:
+ case TIPC_CMD_GET_MAX_NODES:
+ case TIPC_CMD_SET_MAX_SUBSCR:
+ case TIPC_CMD_GET_MAX_SUBSCR:
+ case TIPC_CMD_SET_MAX_PUBL:
+ case TIPC_CMD_GET_MAX_PUBL:
+ case TIPC_CMD_SET_LOG_SIZE:
+ case TIPC_CMD_SET_REMOTE_MNG:
+ case TIPC_CMD_GET_REMOTE_MNG:
+ case TIPC_CMD_DUMP_LOG:
+ rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+ " (obsolete command)");
+ break;
default:
rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
" (unknown command)");
break;
}
- /* Return reply buffer */
-exit:
- spin_unlock_bh(&config_lock);
- return rep_tlv_buf;
-}
-
-static void cfg_named_msg_event(void *userdata,
- u32 port_ref,
- struct sk_buff **buf,
- const unchar *msg,
- u32 size,
- u32 importance,
- struct tipc_portid const *orig,
- struct tipc_name_seq const *dest)
-{
- struct tipc_cfg_msg_hdr *req_hdr;
- struct tipc_cfg_msg_hdr *rep_hdr;
- struct sk_buff *rep_buf;
-
- /* Validate configuration message header (ignore invalid message) */
-
- req_hdr = (struct tipc_cfg_msg_hdr *)msg;
- if ((size < sizeof(*req_hdr)) ||
- (size != TCM_ALIGN(ntohl(req_hdr->tcm_len))) ||
- (ntohs(req_hdr->tcm_flags) != TCM_F_REQUEST)) {
- warn("Invalid configuration message discarded\n");
- return;
- }
+ WARN_ON(rep_tlv_buf->len > TLV_SPACE(ULTRA_STRING_MAX_LEN));
- /* Generate reply for request (if can't, return request) */
-
- rep_buf = tipc_cfg_do_cmd(orig->node,
- ntohs(req_hdr->tcm_type),
- msg + sizeof(*req_hdr),
- size - sizeof(*req_hdr),
- BUF_HEADROOM + MAX_H_SIZE + sizeof(*rep_hdr));
- if (rep_buf) {
- skb_push(rep_buf, sizeof(*rep_hdr));
- rep_hdr = (struct tipc_cfg_msg_hdr *)rep_buf->data;
- memcpy(rep_hdr, req_hdr, sizeof(*rep_hdr));
- rep_hdr->tcm_len = htonl(rep_buf->len);
- rep_hdr->tcm_flags &= htons(~TCM_F_REQUEST);
- } else {
- rep_buf = *buf;
- *buf = NULL;
+ /* Append an error message if we cannot return all requested data */
+ if (rep_tlv_buf->len == TLV_SPACE(ULTRA_STRING_MAX_LEN)) {
+ if (*(rep_tlv_buf->data + ULTRA_STRING_MAX_LEN) != '\0')
+ sprintf(rep_tlv_buf->data + rep_tlv_buf->len -
+ sizeof(REPLY_TRUNCATED) - 1, REPLY_TRUNCATED);
}
- /* NEED TO ADD CODE TO HANDLE FAILED SEND (SUCH AS CONGESTION) */
- tipc_send_buf2port(port_ref, orig, rep_buf, rep_buf->len);
-}
-
-int tipc_cfg_init(void)
-{
- struct tipc_name_seq seq;
- int res;
-
- res = tipc_attach(&mng.user_ref);
- if (res)
- goto failed;
-
- res = tipc_createport(mng.user_ref, NULL, TIPC_CRITICAL_IMPORTANCE,
- NULL, NULL, NULL,
- NULL, cfg_named_msg_event, NULL,
- NULL, &mng.port_ref);
- if (res)
- goto failed;
-
- seq.type = TIPC_CFG_SRV;
- seq.lower = seq.upper = tipc_own_addr;
- res = tipc_nametbl_publish_rsv(mng.port_ref, TIPC_ZONE_SCOPE, &seq);
- if (res)
- goto failed;
-
- return 0;
-
-failed:
- err("Unable to create configuration service\n");
- tipc_detach(mng.user_ref);
- mng.user_ref = 0;
- return res;
-}
-
-void tipc_cfg_stop(void)
-{
- if (mng.user_ref) {
- tipc_detach(mng.user_ref);
- mng.user_ref = 0;
- }
+ /* Return reply buffer */
+exit:
+ rtnl_unlock();
+ return rep_tlv_buf;
}
diff --git a/net/tipc/config.h b/net/tipc/config.h
index 443159a166f..47b1bf18161 100644
--- a/net/tipc/config.h
+++ b/net/tipc/config.h
@@ -64,9 +64,4 @@ static inline struct sk_buff *tipc_cfg_reply_ultra_string(char *string)
struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd,
const void *req_tlv_area, int req_tlv_space,
int headroom);
-
-void tipc_cfg_link_event(u32 addr, char *name, int up);
-int tipc_cfg_init(void);
-void tipc_cfg_stop(void);
-
#endif
diff --git a/net/tipc/core.c b/net/tipc/core.c
index f5d62c174de..676d18015dd 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -1,8 +1,8 @@
/*
* net/tipc/core.c: TIPC module code
*
- * Copyright (c) 2003-2006, Ericsson AB
- * Copyright (c) 2005-2006, Wind River Systems
+ * Copyright (c) 2003-2006, 2013, Ericsson AB
+ * Copyright (c) 2005-2006, 2010-2013, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -34,66 +34,23 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/random.h>
-
#include "core.h"
#include "ref.h"
-#include "net.h"
-#include "user_reg.h"
#include "name_table.h"
#include "subscr.h"
#include "config.h"
+#include "port.h"
-
-#ifndef CONFIG_TIPC_ZONES
-#define CONFIG_TIPC_ZONES 3
-#endif
-
-#ifndef CONFIG_TIPC_CLUSTERS
-#define CONFIG_TIPC_CLUSTERS 1
-#endif
-
-#ifndef CONFIG_TIPC_NODES
-#define CONFIG_TIPC_NODES 255
-#endif
-
-#ifndef CONFIG_TIPC_SLAVE_NODES
-#define CONFIG_TIPC_SLAVE_NODES 0
-#endif
-
-#ifndef CONFIG_TIPC_PORTS
-#define CONFIG_TIPC_PORTS 8191
-#endif
-
-#ifndef CONFIG_TIPC_LOG
-#define CONFIG_TIPC_LOG 0
-#endif
+#include <linux/module.h>
/* global variables used by multiple sub-systems within TIPC */
-
-int tipc_mode = TIPC_NOT_RUNNING;
-int tipc_random;
-atomic_t tipc_user_count = ATOMIC_INIT(0);
-
-const char tipc_alphabet[] =
- "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_.";
+int tipc_random __read_mostly;
/* configurable TIPC parameters */
-
-u32 tipc_own_addr;
-int tipc_max_zones;
-int tipc_max_clusters;
-int tipc_max_nodes;
-int tipc_max_slaves;
-int tipc_max_ports;
-int tipc_max_subscriptions;
-int tipc_max_publications;
-int tipc_net_id;
-int tipc_remote_management;
-
+u32 tipc_own_addr __read_mostly;
+int tipc_max_ports __read_mostly;
+int tipc_net_id __read_mostly;
+int sysctl_tipc_rmem[3] __read_mostly; /* min/default/max */
/**
* tipc_buf_acquire - creates a TIPC message buffer
@@ -104,7 +61,6 @@ int tipc_remote_management;
* NOTE: Headroom is reserved to allow prepending of a data link header.
* There may also be unrequested tailroom present at the buffer's end.
*/
-
struct sk_buff *tipc_buf_acquire(u32 size)
{
struct sk_buff *skb;
@@ -120,113 +76,102 @@ struct sk_buff *tipc_buf_acquire(u32 size)
}
/**
- * tipc_core_stop_net - shut down TIPC networking sub-systems
- */
-
-static void tipc_core_stop_net(void)
-{
- tipc_eth_media_stop();
- tipc_net_stop();
-}
-
-/**
- * start_net - start TIPC networking sub-systems
- */
-
-int tipc_core_start_net(unsigned long addr)
-{
- int res;
-
- if ((res = tipc_net_start(addr)) ||
- (res = tipc_eth_media_start())) {
- tipc_core_stop_net();
- }
- return res;
-}
-
-/**
* tipc_core_stop - switch TIPC from SINGLE NODE to NOT RUNNING mode
*/
-
static void tipc_core_stop(void)
{
- if (tipc_mode != TIPC_NODE_MODE)
- return;
-
- tipc_mode = TIPC_NOT_RUNNING;
-
+ tipc_net_stop();
+ tipc_bearer_cleanup();
tipc_netlink_stop();
- tipc_handler_stop();
- tipc_cfg_stop();
tipc_subscr_stop();
- tipc_reg_stop();
tipc_nametbl_stop();
tipc_ref_table_stop();
tipc_socket_stop();
- tipc_log_resize(0);
+ tipc_unregister_sysctl();
}
/**
* tipc_core_start - switch TIPC from NOT RUNNING to SINGLE NODE mode
*/
-
static int tipc_core_start(void)
{
- int res;
-
- if (tipc_mode != TIPC_NOT_RUNNING)
- return -ENOPROTOOPT;
+ int err;
get_random_bytes(&tipc_random, sizeof(tipc_random));
- tipc_mode = TIPC_NODE_MODE;
-
- if ((res = tipc_handler_start()) ||
- (res = tipc_ref_table_init(tipc_max_ports, tipc_random)) ||
- (res = tipc_reg_start()) ||
- (res = tipc_nametbl_init()) ||
- (res = tipc_k_signal((Handler)tipc_subscr_start, 0)) ||
- (res = tipc_k_signal((Handler)tipc_cfg_init, 0)) ||
- (res = tipc_netlink_start()) ||
- (res = tipc_socket_init())) {
- tipc_core_stop();
- }
- return res;
-}
+ err = tipc_ref_table_init(tipc_max_ports, tipc_random);
+ if (err)
+ goto out_reftbl;
+
+ err = tipc_nametbl_init();
+ if (err)
+ goto out_nametbl;
+
+ err = tipc_netlink_start();
+ if (err)
+ goto out_netlink;
+
+ err = tipc_socket_init();
+ if (err)
+ goto out_socket;
+
+ err = tipc_register_sysctl();
+ if (err)
+ goto out_sysctl;
+
+ err = tipc_subscr_start();
+ if (err)
+ goto out_subscr;
+
+ err = tipc_bearer_setup();
+ if (err)
+ goto out_bearer;
+
+ return 0;
+out_bearer:
+ tipc_subscr_stop();
+out_subscr:
+ tipc_unregister_sysctl();
+out_sysctl:
+ tipc_socket_stop();
+out_socket:
+ tipc_netlink_stop();
+out_netlink:
+ tipc_nametbl_stop();
+out_nametbl:
+ tipc_ref_table_stop();
+out_reftbl:
+ return err;
+}
static int __init tipc_init(void)
{
int res;
- if (tipc_log_resize(CONFIG_TIPC_LOG) != 0)
- warn("Unable to create log buffer\n");
-
- info("Activated (version " TIPC_MOD_VER
- " compiled " __DATE__ " " __TIME__ ")\n");
+ pr_info("Activated (version " TIPC_MOD_VER ")\n");
tipc_own_addr = 0;
- tipc_remote_management = 1;
- tipc_max_publications = 10000;
- tipc_max_subscriptions = 2000;
tipc_max_ports = CONFIG_TIPC_PORTS;
- tipc_max_zones = CONFIG_TIPC_ZONES;
- tipc_max_clusters = CONFIG_TIPC_CLUSTERS;
- tipc_max_nodes = CONFIG_TIPC_NODES;
- tipc_max_slaves = CONFIG_TIPC_SLAVE_NODES;
tipc_net_id = 4711;
- if ((res = tipc_core_start()))
- err("Unable to start in single node mode\n");
+ sysctl_tipc_rmem[0] = TIPC_CONN_OVERLOAD_LIMIT >> 4 <<
+ TIPC_LOW_IMPORTANCE;
+ sysctl_tipc_rmem[1] = TIPC_CONN_OVERLOAD_LIMIT >> 4 <<
+ TIPC_CRITICAL_IMPORTANCE;
+ sysctl_tipc_rmem[2] = TIPC_CONN_OVERLOAD_LIMIT;
+
+ res = tipc_core_start();
+ if (res)
+ pr_err("Unable to start in single node mode\n");
else
- info("Started in single node mode\n");
+ pr_info("Started in single node mode\n");
return res;
}
static void __exit tipc_exit(void)
{
- tipc_core_stop_net();
tipc_core_stop();
- info("Deactivated\n");
+ pr_info("Deactivated\n");
}
module_init(tipc_init);
diff --git a/net/tipc/core.h b/net/tipc/core.h
index ca7e171c104..bb26ed1ee96 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -1,8 +1,8 @@
/*
* net/tipc/core.h: Include file for TIPC global declarations
*
- * Copyright (c) 2005-2006, Ericsson AB
- * Copyright (c) 2005-2007, Wind River Systems
+ * Copyright (c) 2005-2006, 2013 Ericsson AB
+ * Copyright (c) 2005-2007, 2010-2013, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -37,6 +37,8 @@
#ifndef _TIPC_CORE_H
#define _TIPC_CORE_H
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/tipc.h>
#include <linux/tipc_config.h>
#include <linux/types.h>
@@ -45,195 +47,71 @@
#include <linux/mm.h>
#include <linux/timer.h>
#include <linux/string.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/interrupt.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/hardirq.h>
#include <linux/netdevice.h>
#include <linux/in.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-
+#include <linux/rtnetlink.h>
+#include <linux/etherdevice.h>
#define TIPC_MOD_VER "2.0.0"
-struct tipc_msg; /* msg.h */
-struct print_buf; /* dbg.h */
-
-/*
- * TIPC sanity test macros
- */
-
-#define assert(i) BUG_ON(!(i))
-
-/*
- * TIPC system monitoring code
- */
-
-/*
- * TIPC's print buffer subsystem supports the following print buffers:
- *
- * TIPC_NULL : null buffer (i.e. print nowhere)
- * TIPC_CONS : system console
- * TIPC_LOG : TIPC log buffer
- * &buf : user-defined buffer (struct print_buf *)
- *
- * Note: TIPC_LOG is configured to echo its output to the system console;
- * user-defined buffers can be configured to do the same thing.
- */
-extern struct print_buf *const TIPC_NULL;
-extern struct print_buf *const TIPC_LOG;
-
-void tipc_printf(struct print_buf *, const char *fmt, ...);
+#define ULTRA_STRING_MAX_LEN 32768
+#define TIPC_MAX_SUBSCRIPTIONS 65535
+#define TIPC_MAX_PUBLICATIONS 65535
-/*
- * TIPC_OUTPUT is the destination print buffer for system messages.
- */
-
-#ifndef TIPC_OUTPUT
-#define TIPC_OUTPUT TIPC_LOG
-#endif
-
-/*
- * TIPC can be configured to send system messages to TIPC_OUTPUT
- * or to the system console only.
- */
-
-#ifdef CONFIG_TIPC_DEBUG
-
-#define err(fmt, arg...) tipc_printf(TIPC_OUTPUT, \
- KERN_ERR "TIPC: " fmt, ## arg)
-#define warn(fmt, arg...) tipc_printf(TIPC_OUTPUT, \
- KERN_WARNING "TIPC: " fmt, ## arg)
-#define info(fmt, arg...) tipc_printf(TIPC_OUTPUT, \
- KERN_NOTICE "TIPC: " fmt, ## arg)
-
-#else
-
-#define err(fmt, arg...) printk(KERN_ERR "TIPC: " fmt , ## arg)
-#define info(fmt, arg...) printk(KERN_INFO "TIPC: " fmt , ## arg)
-#define warn(fmt, arg...) printk(KERN_WARNING "TIPC: " fmt , ## arg)
-
-#endif
-
-/*
- * DBG_OUTPUT is the destination print buffer for debug messages.
- * It defaults to the the null print buffer, but can be redefined
- * (typically in the individual .c files being debugged) to allow
- * selected debug messages to be generated where needed.
- */
-
-#ifndef DBG_OUTPUT
-#define DBG_OUTPUT TIPC_NULL
-#endif
-
-/*
- * TIPC can be configured to send debug messages to the specified print buffer
- * (typically DBG_OUTPUT) or to suppress them entirely.
- */
-
-#ifdef CONFIG_TIPC_DEBUG
-
-#define dbg(fmt, arg...) \
- do { \
- if (DBG_OUTPUT != TIPC_NULL) \
- tipc_printf(DBG_OUTPUT, fmt, ## arg); \
- } while (0)
-#define msg_dbg(msg, txt) \
- do { \
- if (DBG_OUTPUT != TIPC_NULL) \
- tipc_msg_dbg(DBG_OUTPUT, msg, txt); \
- } while (0)
-#define dump(fmt, arg...) \
- do { \
- if (DBG_OUTPUT != TIPC_NULL) \
- tipc_dump_dbg(DBG_OUTPUT, fmt, ##arg); \
- } while (0)
-
-void tipc_msg_dbg(struct print_buf *, struct tipc_msg *, const char *);
-void tipc_dump_dbg(struct print_buf *, const char *fmt, ...);
-
-#else
-
-#define dbg(fmt, arg...) do {} while (0)
-#define msg_dbg(msg, txt) do {} while (0)
-#define dump(fmt, arg...) do {} while (0)
-
-#define tipc_msg_dbg(...) do {} while (0)
-#define tipc_dump_dbg(...) do {} while (0)
-
-#endif
+struct tipc_msg; /* msg.h */
+int tipc_snprintf(char *buf, int len, const char *fmt, ...);
/*
* TIPC-specific error codes
*/
-
#define ELINKCONG EAGAIN /* link congestion <=> resource unavailable */
/*
- * TIPC operating mode routines
- */
-#define TIPC_NOT_RUNNING 0
-#define TIPC_NODE_MODE 1
-#define TIPC_NET_MODE 2
-
-/*
* Global configuration variables
*/
-
-extern u32 tipc_own_addr;
-extern int tipc_max_zones;
-extern int tipc_max_clusters;
-extern int tipc_max_nodes;
-extern int tipc_max_slaves;
-extern int tipc_max_ports;
-extern int tipc_max_subscriptions;
-extern int tipc_max_publications;
-extern int tipc_net_id;
-extern int tipc_remote_management;
+extern u32 tipc_own_addr __read_mostly;
+extern int tipc_max_ports __read_mostly;
+extern int tipc_net_id __read_mostly;
+extern int sysctl_tipc_rmem[3] __read_mostly;
/*
* Other global variables
*/
-
-extern int tipc_mode;
-extern int tipc_random;
-extern const char tipc_alphabet[];
-extern atomic_t tipc_user_count;
-
+extern int tipc_random __read_mostly;
/*
* Routines available to privileged subsystems
*/
-
-extern int tipc_core_start_net(unsigned long);
-extern int tipc_handler_start(void);
-extern void tipc_handler_stop(void);
-extern int tipc_netlink_start(void);
-extern void tipc_netlink_stop(void);
-extern int tipc_socket_init(void);
-extern void tipc_socket_stop(void);
-
-static inline int delimit(int val, int min, int max)
-{
- if (val > max)
- return max;
- if (val < min)
- return min;
- return val;
-}
-
+int tipc_netlink_start(void);
+void tipc_netlink_stop(void);
+int tipc_socket_init(void);
+void tipc_socket_stop(void);
+int tipc_sock_create_local(int type, struct socket **res);
+void tipc_sock_release_local(struct socket *sock);
+int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
+ int flags);
+
+#ifdef CONFIG_SYSCTL
+int tipc_register_sysctl(void);
+void tipc_unregister_sysctl(void);
+#else
+#define tipc_register_sysctl() 0
+#define tipc_unregister_sysctl()
+#endif
/*
- * TIPC timer and signal code
+ * TIPC timer code
*/
-
typedef void (*Handler) (unsigned long);
-u32 tipc_k_signal(Handler routine, unsigned long argument);
-
/**
* k_init_timer - initialize a timer
* @timer: pointer to timer structure
@@ -242,11 +120,9 @@ u32 tipc_k_signal(Handler routine, unsigned long argument);
*
* Timer must be initialized before use (and terminated when no longer needed).
*/
-
static inline void k_init_timer(struct timer_list *timer, Handler routine,
unsigned long argument)
{
- dbg("initializing timer %p\n", timer);
setup_timer(timer, routine, argument);
}
@@ -263,10 +139,8 @@ static inline void k_init_timer(struct timer_list *timer, Handler routine,
* then an additional jiffy is added to account for the fact that
* the starting time may be in the middle of the current jiffy.
*/
-
static inline void k_start_timer(struct timer_list *timer, unsigned long msec)
{
- dbg("starting timer %p for %u\n", timer, msec);
mod_timer(timer, jiffies + msecs_to_jiffies(msec) + 1);
}
@@ -280,10 +154,8 @@ static inline void k_start_timer(struct timer_list *timer, unsigned long msec)
* WARNING: Must not be called when holding locks required by the timer's
* timeout routine, otherwise deadlock can occur on SMP systems!
*/
-
static inline void k_cancel_timer(struct timer_list *timer)
{
- dbg("cancelling timer %p\n", timer);
del_timer_sync(timer);
}
@@ -298,13 +170,10 @@ static inline void k_cancel_timer(struct timer_list *timer)
* (Do not "enhance" this routine to automatically cancel an active timer,
* otherwise deadlock can arise when a timeout routine calls k_term_timer.)
*/
-
static inline void k_term_timer(struct timer_list *timer)
{
- dbg("terminating timer %p\n", timer);
}
-
/*
* TIPC message buffer code
*
@@ -314,45 +183,21 @@ static inline void k_term_timer(struct timer_list *timer)
* Note: Headroom should be a multiple of 4 to ensure the TIPC header fields
* are word aligned for quicker access
*/
-
#define BUF_HEADROOM LL_MAX_HEADER
struct tipc_skb_cb {
void *handle;
+ bool deferred;
+ struct sk_buff *tail;
};
#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
-
static inline struct tipc_msg *buf_msg(struct sk_buff *skb)
{
return (struct tipc_msg *)skb->data;
}
-extern struct sk_buff *tipc_buf_acquire(u32 size);
-
-/**
- * buf_discard - frees a TIPC message buffer
- * @skb: message buffer
- *
- * Frees a message buffer. If passed NULL, just returns.
- */
-
-static inline void buf_discard(struct sk_buff *skb)
-{
- kfree_skb(skb);
-}
-
-/**
- * buf_linearize - convert a TIPC message buffer into a single contiguous piece
- * @skb: message buffer
- *
- * Returns 0 on success.
- */
-
-static inline int buf_linearize(struct sk_buff *skb)
-{
- return skb_linearize(skb);
-}
+struct sk_buff *tipc_buf_acquire(u32 size);
#endif
diff --git a/net/tipc/dbg.c b/net/tipc/dbg.c
deleted file mode 100644
index 46f51d208e5..00000000000
--- a/net/tipc/dbg.c
+++ /dev/null
@@ -1,432 +0,0 @@
-/*
- * net/tipc/dbg.c: TIPC print buffer routines for debugging
- *
- * Copyright (c) 1996-2006, Ericsson AB
- * Copyright (c) 2005-2007, Wind River Systems
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "core.h"
-#include "config.h"
-#include "dbg.h"
-
-/*
- * TIPC pre-defines the following print buffers:
- *
- * TIPC_NULL : null buffer (i.e. print nowhere)
- * TIPC_CONS : system console
- * TIPC_LOG : TIPC log buffer
- *
- * Additional user-defined print buffers are also permitted.
- */
-
-static struct print_buf null_buf = { NULL, 0, NULL, 0 };
-struct print_buf *const TIPC_NULL = &null_buf;
-
-static struct print_buf cons_buf = { NULL, 0, NULL, 1 };
-static struct print_buf *const TIPC_CONS = &cons_buf;
-
-static struct print_buf log_buf = { NULL, 0, NULL, 1 };
-struct print_buf *const TIPC_LOG = &log_buf;
-
-/*
- * Locking policy when using print buffers.
- *
- * 1) tipc_printf() uses 'print_lock' to protect against concurrent access to
- * 'print_string' when writing to a print buffer. This also protects against
- * concurrent writes to the print buffer being written to.
- *
- * 2) tipc_dump() and tipc_log_XXX() leverage the aforementioned
- * use of 'print_lock' to protect against all types of concurrent operations
- * on their associated print buffer (not just write operations).
- *
- * Note: All routines of the form tipc_printbuf_XXX() are lock-free, and rely
- * on the caller to prevent simultaneous use of the print buffer(s) being
- * manipulated.
- */
-
-static char print_string[TIPC_PB_MAX_STR];
-static DEFINE_SPINLOCK(print_lock);
-
-static void tipc_printbuf_reset(struct print_buf *pb);
-static int tipc_printbuf_empty(struct print_buf *pb);
-static void tipc_printbuf_move(struct print_buf *pb_to,
- struct print_buf *pb_from);
-
-#define FORMAT(PTR,LEN,FMT) \
-{\
- va_list args;\
- va_start(args, FMT);\
- LEN = vsprintf(PTR, FMT, args);\
- va_end(args);\
- *(PTR + LEN) = '\0';\
-}
-
-/**
- * tipc_printbuf_init - initialize print buffer to empty
- * @pb: pointer to print buffer structure
- * @raw: pointer to character array used by print buffer
- * @size: size of character array
- *
- * Note: If the character array is too small (or absent), the print buffer
- * becomes a null device that discards anything written to it.
- */
-
-void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 size)
-{
- pb->buf = raw;
- pb->crs = raw;
- pb->size = size;
- pb->echo = 0;
-
- if (size < TIPC_PB_MIN_SIZE) {
- pb->buf = NULL;
- } else if (raw) {
- pb->buf[0] = 0;
- pb->buf[size - 1] = ~0;
- }
-}
-
-/**
- * tipc_printbuf_reset - reinitialize print buffer to empty state
- * @pb: pointer to print buffer structure
- */
-
-static void tipc_printbuf_reset(struct print_buf *pb)
-{
- if (pb->buf) {
- pb->crs = pb->buf;
- pb->buf[0] = 0;
- pb->buf[pb->size - 1] = ~0;
- }
-}
-
-/**
- * tipc_printbuf_empty - test if print buffer is in empty state
- * @pb: pointer to print buffer structure
- *
- * Returns non-zero if print buffer is empty.
- */
-
-static int tipc_printbuf_empty(struct print_buf *pb)
-{
- return !pb->buf || (pb->crs == pb->buf);
-}
-
-/**
- * tipc_printbuf_validate - check for print buffer overflow
- * @pb: pointer to print buffer structure
- *
- * Verifies that a print buffer has captured all data written to it.
- * If data has been lost, linearize buffer and prepend an error message
- *
- * Returns length of print buffer data string (including trailing NUL)
- */
-
-int tipc_printbuf_validate(struct print_buf *pb)
-{
- char *err = "\n\n*** PRINT BUFFER OVERFLOW ***\n\n";
- char *cp_buf;
- struct print_buf cb;
-
- if (!pb->buf)
- return 0;
-
- if (pb->buf[pb->size - 1] == 0) {
- cp_buf = kmalloc(pb->size, GFP_ATOMIC);
- if (cp_buf) {
- tipc_printbuf_init(&cb, cp_buf, pb->size);
- tipc_printbuf_move(&cb, pb);
- tipc_printbuf_move(pb, &cb);
- kfree(cp_buf);
- memcpy(pb->buf, err, strlen(err));
- } else {
- tipc_printbuf_reset(pb);
- tipc_printf(pb, err);
- }
- }
- return pb->crs - pb->buf + 1;
-}
-
-/**
- * tipc_printbuf_move - move print buffer contents to another print buffer
- * @pb_to: pointer to destination print buffer structure
- * @pb_from: pointer to source print buffer structure
- *
- * Current contents of destination print buffer (if any) are discarded.
- * Source print buffer becomes empty if a successful move occurs.
- */
-
-static void tipc_printbuf_move(struct print_buf *pb_to,
- struct print_buf *pb_from)
-{
- int len;
-
- /* Handle the cases where contents can't be moved */
-
- if (!pb_to->buf)
- return;
-
- if (!pb_from->buf) {
- tipc_printbuf_reset(pb_to);
- return;
- }
-
- if (pb_to->size < pb_from->size) {
- strcpy(pb_to->buf, "*** PRINT BUFFER MOVE ERROR ***");
- pb_to->buf[pb_to->size - 1] = ~0;
- pb_to->crs = strchr(pb_to->buf, 0);
- return;
- }
-
- /* Copy data from char after cursor to end (if used) */
-
- len = pb_from->buf + pb_from->size - pb_from->crs - 2;
- if ((pb_from->buf[pb_from->size - 1] == 0) && (len > 0)) {
- strcpy(pb_to->buf, pb_from->crs + 1);
- pb_to->crs = pb_to->buf + len;
- } else
- pb_to->crs = pb_to->buf;
-
- /* Copy data from start to cursor (always) */
-
- len = pb_from->crs - pb_from->buf;
- strcpy(pb_to->crs, pb_from->buf);
- pb_to->crs += len;
-
- tipc_printbuf_reset(pb_from);
-}
-
-/**
- * tipc_printf - append formatted output to print buffer
- * @pb: pointer to print buffer
- * @fmt: formatted info to be printed
- */
-
-void tipc_printf(struct print_buf *pb, const char *fmt, ...)
-{
- int chars_to_add;
- int chars_left;
- char save_char;
-
- spin_lock_bh(&print_lock);
-
- FORMAT(print_string, chars_to_add, fmt);
- if (chars_to_add >= TIPC_PB_MAX_STR)
- strcpy(print_string, "*** PRINT BUFFER STRING TOO LONG ***");
-
- if (pb->buf) {
- chars_left = pb->buf + pb->size - pb->crs - 1;
- if (chars_to_add <= chars_left) {
- strcpy(pb->crs, print_string);
- pb->crs += chars_to_add;
- } else if (chars_to_add >= (pb->size - 1)) {
- strcpy(pb->buf, print_string + chars_to_add + 1
- - pb->size);
- pb->crs = pb->buf + pb->size - 1;
- } else {
- strcpy(pb->buf, print_string + chars_left);
- save_char = print_string[chars_left];
- print_string[chars_left] = 0;
- strcpy(pb->crs, print_string);
- print_string[chars_left] = save_char;
- pb->crs = pb->buf + chars_to_add - chars_left;
- }
- }
-
- if (pb->echo)
- printk("%s", print_string);
-
- spin_unlock_bh(&print_lock);
-}
-
-#ifdef CONFIG_TIPC_DEBUG
-
-/**
- * print_to_console - write string of bytes to console in multiple chunks
- */
-
-static void print_to_console(char *crs, int len)
-{
- int rest = len;
-
- while (rest > 0) {
- int sz = rest < TIPC_PB_MAX_STR ? rest : TIPC_PB_MAX_STR;
- char c = crs[sz];
-
- crs[sz] = 0;
- printk((const char *)crs);
- crs[sz] = c;
- rest -= sz;
- crs += sz;
- }
-}
-
-/**
- * printbuf_dump - write print buffer contents to console
- */
-
-static void printbuf_dump(struct print_buf *pb)
-{
- int len;
-
- if (!pb->buf) {
- printk("*** PRINT BUFFER NOT ALLOCATED ***");
- return;
- }
-
- /* Dump print buffer from char after cursor to end (if used) */
-
- len = pb->buf + pb->size - pb->crs - 2;
- if ((pb->buf[pb->size - 1] == 0) && (len > 0))
- print_to_console(pb->crs + 1, len);
-
- /* Dump print buffer from start to cursor (always) */
-
- len = pb->crs - pb->buf;
- print_to_console(pb->buf, len);
-}
-
-/**
- * tipc_dump_dbg - dump (non-console) print buffer to console
- * @pb: pointer to print buffer
- */
-
-void tipc_dump_dbg(struct print_buf *pb, const char *fmt, ...)
-{
- int len;
-
- if (pb == TIPC_CONS)
- return;
-
- spin_lock_bh(&print_lock);
-
- FORMAT(print_string, len, fmt);
- printk(print_string);
-
- printk("\n---- Start of %s log dump ----\n\n",
- (pb == TIPC_LOG) ? "global" : "local");
- printbuf_dump(pb);
- tipc_printbuf_reset(pb);
- printk("\n---- End of dump ----\n");
-
- spin_unlock_bh(&print_lock);
-}
-
-#endif
-
-/**
- * tipc_log_resize - change the size of the TIPC log buffer
- * @log_size: print buffer size to use
- */
-
-int tipc_log_resize(int log_size)
-{
- int res = 0;
-
- spin_lock_bh(&print_lock);
- if (TIPC_LOG->buf) {
- kfree(TIPC_LOG->buf);
- TIPC_LOG->buf = NULL;
- }
- if (log_size) {
- if (log_size < TIPC_PB_MIN_SIZE)
- log_size = TIPC_PB_MIN_SIZE;
- res = TIPC_LOG->echo;
- tipc_printbuf_init(TIPC_LOG, kmalloc(log_size, GFP_ATOMIC),
- log_size);
- TIPC_LOG->echo = res;
- res = !TIPC_LOG->buf;
- }
- spin_unlock_bh(&print_lock);
-
- return res;
-}
-
-/**
- * tipc_log_resize_cmd - reconfigure size of TIPC log buffer
- */
-
-struct sk_buff *tipc_log_resize_cmd(const void *req_tlv_area, int req_tlv_space)
-{
- u32 value;
-
- if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
- return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
-
- value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
- if (value != delimit(value, 0, 32768))
- return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
- " (log size must be 0-32768)");
- if (tipc_log_resize(value))
- return tipc_cfg_reply_error_string(
- "unable to create specified log (log size is now 0)");
- return tipc_cfg_reply_none();
-}
-
-/**
- * tipc_log_dump - capture TIPC log buffer contents in configuration message
- */
-
-struct sk_buff *tipc_log_dump(void)
-{
- struct sk_buff *reply;
-
- spin_lock_bh(&print_lock);
- if (!TIPC_LOG->buf) {
- spin_unlock_bh(&print_lock);
- reply = tipc_cfg_reply_ultra_string("log not activated\n");
- } else if (tipc_printbuf_empty(TIPC_LOG)) {
- spin_unlock_bh(&print_lock);
- reply = tipc_cfg_reply_ultra_string("log is empty\n");
- }
- else {
- struct tlv_desc *rep_tlv;
- struct print_buf pb;
- int str_len;
-
- str_len = min(TIPC_LOG->size, 32768u);
- spin_unlock_bh(&print_lock);
- reply = tipc_cfg_reply_alloc(TLV_SPACE(str_len));
- if (reply) {
- rep_tlv = (struct tlv_desc *)reply->data;
- tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), str_len);
- spin_lock_bh(&print_lock);
- tipc_printbuf_move(&pb, TIPC_LOG);
- spin_unlock_bh(&print_lock);
- str_len = strlen(TLV_DATA(rep_tlv)) + 1;
- skb_put(reply, TLV_SPACE(str_len));
- TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
- }
- }
- return reply;
-}
-
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index f2ce36baf42..aa722a42ef8 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -1,8 +1,8 @@
/*
* net/tipc/discover.c
*
- * Copyright (c) 2003-2006, Ericsson AB
- * Copyright (c) 2005-2006, Wind River Systems
+ * Copyright (c) 2003-2006, 2014, Ericsson AB
+ * Copyright (c) 2005-2006, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -37,30 +37,30 @@
#include "core.h"
#include "link.h"
#include "discover.h"
-#include "port.h"
-#include "name_table.h"
#define TIPC_LINK_REQ_INIT 125 /* min delay during bearer start up */
-#define TIPC_LINK_REQ_FAST 2000 /* normal delay if bearer has no links */
-#define TIPC_LINK_REQ_SLOW 600000 /* normal delay if bearer has links */
-
-/*
- * TODO: Most of the inter-cluster setup stuff should be
- * rewritten, and be made conformant with specification.
- */
+#define TIPC_LINK_REQ_FAST 1000 /* max delay if bearer has no links */
+#define TIPC_LINK_REQ_SLOW 60000 /* max delay if bearer has links */
+#define TIPC_LINK_REQ_INACTIVE 0xffffffff /* indicates no timer in use */
/**
- * struct link_req - information about an ongoing link setup request
- * @bearer: bearer issuing requests
+ * struct tipc_link_req - information about an ongoing link setup request
+ * @bearer_id: identity of bearer issuing requests
* @dest: destination address for request messages
+ * @domain: network domain to which links can be established
+ * @num_nodes: number of nodes currently discovered (i.e. with an active link)
+ * @lock: spinlock for controlling access to requests
* @buf: request message to be (repeatedly) sent
* @timer: timer governing period between requests
* @timer_intv: current interval between requests (in ms)
*/
-struct link_req {
- struct bearer *bearer;
+struct tipc_link_req {
+ u32 bearer_id;
struct tipc_media_addr dest;
+ u32 domain;
+ int num_nodes;
+ spinlock_t lock;
struct sk_buff *buf;
struct timer_list timer;
unsigned int timer_intv;
@@ -69,29 +69,21 @@ struct link_req {
/**
* tipc_disc_init_msg - initialize a link setup message
* @type: message type (request or response)
- * @req_links: number of links associated with message
- * @dest_domain: network domain of node(s) which should respond to message
* @b_ptr: ptr to bearer issuing message
*/
-
-static struct sk_buff *tipc_disc_init_msg(u32 type,
- u32 req_links,
- u32 dest_domain,
- struct bearer *b_ptr)
+static void tipc_disc_init_msg(struct sk_buff *buf, u32 type,
+ struct tipc_bearer *b_ptr)
{
- struct sk_buff *buf = tipc_buf_acquire(DSC_H_SIZE);
struct tipc_msg *msg;
-
- if (buf) {
- msg = buf_msg(buf);
- tipc_msg_init(msg, LINK_CONFIG, type, DSC_H_SIZE, dest_domain);
- msg_set_non_seq(msg, 1);
- msg_set_req_links(msg, req_links);
- msg_set_dest_domain(msg, dest_domain);
- msg_set_bc_netid(msg, tipc_net_id);
- msg_set_media_addr(msg, &b_ptr->publ.addr);
- }
- return buf;
+ u32 dest_domain = b_ptr->domain;
+
+ msg = buf_msg(buf);
+ tipc_msg_init(msg, LINK_CONFIG, type, INT_H_SIZE, dest_domain);
+ msg_set_non_seq(msg, 1);
+ msg_set_node_sig(msg, tipc_random);
+ msg_set_dest_domain(msg, dest_domain);
+ msg_set_bc_netid(msg, tipc_net_id);
+ b_ptr->media->addr2msg(msg_media_addr(msg), &b_ptr->addr);
}
/**
@@ -100,154 +92,205 @@ static struct sk_buff *tipc_disc_init_msg(u32 type,
* @node_addr: duplicated node address
* @media_addr: media address advertised by duplicated node
*/
-
-static void disc_dupl_alert(struct bearer *b_ptr, u32 node_addr,
+static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr,
struct tipc_media_addr *media_addr)
{
char node_addr_str[16];
char media_addr_str[64];
- struct print_buf pb;
tipc_addr_string_fill(node_addr_str, node_addr);
- tipc_printbuf_init(&pb, media_addr_str, sizeof(media_addr_str));
- tipc_media_addr_printf(&pb, media_addr);
- tipc_printbuf_validate(&pb);
- warn("Duplicate %s using %s seen on <%s>\n",
- node_addr_str, media_addr_str, b_ptr->publ.name);
+ tipc_media_addr_printf(media_addr_str, sizeof(media_addr_str),
+ media_addr);
+ pr_warn("Duplicate %s using %s seen on <%s>\n", node_addr_str,
+ media_addr_str, b_ptr->name);
}
/**
- * tipc_disc_recv_msg - handle incoming link setup message (request or response)
+ * tipc_disc_rcv - handle incoming discovery message (request or response)
* @buf: buffer containing message
- * @b_ptr: bearer that message arrived on
+ * @bearer: bearer that message arrived on
*/
-
-void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr)
+void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *bearer)
{
- struct link *link;
- struct tipc_media_addr media_addr;
+ struct tipc_node *node;
+ struct tipc_link *link;
+ struct tipc_media_addr maddr;
+ struct sk_buff *rbuf;
struct tipc_msg *msg = buf_msg(buf);
- u32 dest = msg_dest_domain(msg);
- u32 orig = msg_prevnode(msg);
+ u32 ddom = msg_dest_domain(msg);
+ u32 onode = msg_prevnode(msg);
u32 net_id = msg_bc_netid(msg);
- u32 type = msg_type(msg);
-
- msg_get_media_addr(msg,&media_addr);
- msg_dbg(msg, "RECV:");
- buf_discard(buf);
-
+ u32 mtyp = msg_type(msg);
+ u32 signature = msg_node_sig(msg);
+ bool addr_match = false;
+ bool sign_match = false;
+ bool link_up = false;
+ bool accept_addr = false;
+ bool accept_sign = false;
+ bool respond = false;
+
+ bearer->media->msg2addr(bearer, &maddr, msg_media_addr(msg));
+ kfree_skb(buf);
+
+ /* Ensure message from node is valid and communication is permitted */
if (net_id != tipc_net_id)
return;
- if (!tipc_addr_domain_valid(dest))
+ if (maddr.broadcast)
+ return;
+ if (!tipc_addr_domain_valid(ddom))
return;
- if (!tipc_addr_node_valid(orig))
+ if (!tipc_addr_node_valid(onode))
return;
- if (orig == tipc_own_addr) {
- if (memcmp(&media_addr, &b_ptr->publ.addr, sizeof(media_addr)))
- disc_dupl_alert(b_ptr, tipc_own_addr, &media_addr);
+
+ if (in_own_node(onode)) {
+ if (memcmp(&maddr, &bearer->addr, sizeof(maddr)))
+ disc_dupl_alert(bearer, tipc_own_addr, &maddr);
return;
}
- if (!tipc_in_scope(dest, tipc_own_addr))
+ if (!tipc_in_scope(ddom, tipc_own_addr))
return;
- if (is_slave(tipc_own_addr) && is_slave(orig))
+ if (!tipc_in_scope(bearer->domain, onode))
return;
- if (is_slave(orig) && !in_own_cluster(orig))
+
+ /* Locate, or if necessary, create, node: */
+ node = tipc_node_find(onode);
+ if (!node)
+ node = tipc_node_create(onode);
+ if (!node)
return;
- if (in_own_cluster(orig)) {
- /* Always accept link here */
- struct sk_buff *rbuf;
- struct tipc_media_addr *addr;
- struct tipc_node *n_ptr = tipc_node_find(orig);
- int link_fully_up;
-
- dbg(" in own cluster\n");
- if (n_ptr == NULL) {
- n_ptr = tipc_node_create(orig);
- if (!n_ptr)
- return;
- }
- spin_lock_bh(&n_ptr->lock);
- /* Don't talk to neighbor during cleanup after last session */
+ tipc_node_lock(node);
+ link = node->links[bearer->identity];
+
+ /* Prepare to validate requesting node's signature and media address */
+ sign_match = (signature == node->signature);
+ addr_match = link && !memcmp(&link->media_addr, &maddr, sizeof(maddr));
+ link_up = link && tipc_link_is_up(link);
+
+
+ /* These three flags give us eight permutations: */
+
+ if (sign_match && addr_match && link_up) {
+ /* All is fine. Do nothing. */
+ } else if (sign_match && addr_match && !link_up) {
+ /* Respond. The link will come up in due time */
+ respond = true;
+ } else if (sign_match && !addr_match && link_up) {
+ /* Peer has changed i/f address without rebooting.
+ * If so, the link will reset soon, and the next
+ * discovery will be accepted. So we can ignore it.
+ * It may also be an cloned or malicious peer having
+ * chosen the same node address and signature as an
+ * existing one.
+ * Ignore requests until the link goes down, if ever.
+ */
+ disc_dupl_alert(bearer, onode, &maddr);
+ } else if (sign_match && !addr_match && !link_up) {
+ /* Peer link has changed i/f address without rebooting.
+ * It may also be a cloned or malicious peer; we can't
+ * distinguish between the two.
+ * The signature is correct, so we must accept.
+ */
+ accept_addr = true;
+ respond = true;
+ } else if (!sign_match && addr_match && link_up) {
+ /* Peer node rebooted. Two possibilities:
+ * - Delayed re-discovery; this link endpoint has already
+ * reset and re-established contact with the peer, before
+ * receiving a discovery message from that node.
+ * (The peer happened to receive one from this node first).
+ * - The peer came back so fast that our side has not
+ * discovered it yet. Probing from this side will soon
+ * reset the link, since there can be no working link
+ * endpoint at the peer end, and the link will re-establish.
+ * Accept the signature, since it comes from a known peer.
+ */
+ accept_sign = true;
+ } else if (!sign_match && addr_match && !link_up) {
+ /* The peer node has rebooted.
+ * Accept signature, since it is a known peer.
+ */
+ accept_sign = true;
+ respond = true;
+ } else if (!sign_match && !addr_match && link_up) {
+ /* Peer rebooted with new address, or a new/duplicate peer.
+ * Ignore until the link goes down, if ever.
+ */
+ disc_dupl_alert(bearer, onode, &maddr);
+ } else if (!sign_match && !addr_match && !link_up) {
+ /* Peer rebooted with new address, or it is a new peer.
+ * Accept signature and address.
+ */
+ accept_sign = true;
+ accept_addr = true;
+ respond = true;
+ }
- if (n_ptr->cleanup_required) {
- spin_unlock_bh(&n_ptr->lock);
- return;
- }
+ if (accept_sign)
+ node->signature = signature;
- link = n_ptr->links[b_ptr->identity];
- if (!link) {
- dbg("creating link\n");
- link = tipc_link_create(b_ptr, orig, &media_addr);
- if (!link) {
- spin_unlock_bh(&n_ptr->lock);
- return;
- }
- }
- addr = &link->media_addr;
- if (memcmp(addr, &media_addr, sizeof(*addr))) {
- if (tipc_link_is_up(link) || (!link->started)) {
- disc_dupl_alert(b_ptr, orig, &media_addr);
- spin_unlock_bh(&n_ptr->lock);
- return;
- }
- warn("Resetting link <%s>, peer interface address changed\n",
- link->name);
- memcpy(addr, &media_addr, sizeof(*addr));
+ if (accept_addr) {
+ if (!link)
+ link = tipc_link_create(node, bearer, &maddr);
+ if (link) {
+ memcpy(&link->media_addr, &maddr, sizeof(maddr));
tipc_link_reset(link);
+ } else {
+ respond = false;
}
- link_fully_up = link_working_working(link);
- spin_unlock_bh(&n_ptr->lock);
- if ((type == DSC_RESP_MSG) || link_fully_up)
- return;
- rbuf = tipc_disc_init_msg(DSC_RESP_MSG, 1, orig, b_ptr);
- if (rbuf != NULL) {
- msg_dbg(buf_msg(rbuf),"SEND:");
- b_ptr->media->send_msg(rbuf, &b_ptr->publ, &media_addr);
- buf_discard(rbuf);
+ }
+
+ /* Send response, if necessary */
+ if (respond && (mtyp == DSC_REQ_MSG)) {
+ rbuf = tipc_buf_acquire(INT_H_SIZE);
+ if (rbuf) {
+ tipc_disc_init_msg(rbuf, DSC_RESP_MSG, bearer);
+ tipc_bearer_send(bearer->identity, rbuf, &maddr);
+ kfree_skb(rbuf);
}
}
+ tipc_node_unlock(node);
}
/**
- * tipc_disc_stop_link_req - stop sending periodic link setup requests
+ * disc_update - update frequency of periodic link setup requests
* @req: ptr to link request structure
+ *
+ * Reinitiates discovery process if discovery object has no associated nodes
+ * and is either not currently searching or is searching at a slow rate
*/
-
-void tipc_disc_stop_link_req(struct link_req *req)
+static void disc_update(struct tipc_link_req *req)
{
- if (!req)
- return;
-
- k_cancel_timer(&req->timer);
- k_term_timer(&req->timer);
- buf_discard(req->buf);
- kfree(req);
+ if (!req->num_nodes) {
+ if ((req->timer_intv == TIPC_LINK_REQ_INACTIVE) ||
+ (req->timer_intv > TIPC_LINK_REQ_FAST)) {
+ req->timer_intv = TIPC_LINK_REQ_INIT;
+ k_start_timer(&req->timer, req->timer_intv);
+ }
+ }
}
/**
- * tipc_disc_update_link_req - update frequency of periodic link setup requests
+ * tipc_disc_add_dest - increment set of discovered nodes
* @req: ptr to link request structure
*/
-
-void tipc_disc_update_link_req(struct link_req *req)
+void tipc_disc_add_dest(struct tipc_link_req *req)
{
- if (!req)
- return;
+ spin_lock_bh(&req->lock);
+ req->num_nodes++;
+ spin_unlock_bh(&req->lock);
+}
- if (req->timer_intv == TIPC_LINK_REQ_SLOW) {
- if (!req->bearer->nodes.count) {
- req->timer_intv = TIPC_LINK_REQ_FAST;
- k_start_timer(&req->timer, req->timer_intv);
- }
- } else if (req->timer_intv == TIPC_LINK_REQ_FAST) {
- if (req->bearer->nodes.count) {
- req->timer_intv = TIPC_LINK_REQ_SLOW;
- k_start_timer(&req->timer, req->timer_intv);
- }
- } else {
- /* leave timer "as is" if haven't yet reached a "normal" rate */
- }
+/**
+ * tipc_disc_remove_dest - decrement set of discovered nodes
+ * @req: ptr to link request structure
+ */
+void tipc_disc_remove_dest(struct tipc_link_req *req)
+{
+ spin_lock_bh(&req->lock);
+ req->num_nodes--;
+ disc_update(req);
+ spin_unlock_bh(&req->lock);
}
/**
@@ -256,61 +299,105 @@ void tipc_disc_update_link_req(struct link_req *req)
*
* Called whenever a link setup request timer associated with a bearer expires.
*/
-
-static void disc_timeout(struct link_req *req)
+static void disc_timeout(struct tipc_link_req *req)
{
- spin_lock_bh(&req->bearer->publ.lock);
-
- req->bearer->media->send_msg(req->buf, &req->bearer->publ, &req->dest);
-
- if ((req->timer_intv == TIPC_LINK_REQ_SLOW) ||
- (req->timer_intv == TIPC_LINK_REQ_FAST)) {
- /* leave timer interval "as is" if already at a "normal" rate */
- } else {
- req->timer_intv *= 2;
- if (req->timer_intv > TIPC_LINK_REQ_FAST)
- req->timer_intv = TIPC_LINK_REQ_FAST;
- if ((req->timer_intv == TIPC_LINK_REQ_FAST) &&
- (req->bearer->nodes.count))
- req->timer_intv = TIPC_LINK_REQ_SLOW;
+ int max_delay;
+
+ spin_lock_bh(&req->lock);
+
+ /* Stop searching if only desired node has been found */
+ if (tipc_node(req->domain) && req->num_nodes) {
+ req->timer_intv = TIPC_LINK_REQ_INACTIVE;
+ goto exit;
}
- k_start_timer(&req->timer, req->timer_intv);
- spin_unlock_bh(&req->bearer->publ.lock);
+ /*
+ * Send discovery message, then update discovery timer
+ *
+ * Keep doubling time between requests until limit is reached;
+ * hold at fast polling rate if don't have any associated nodes,
+ * otherwise hold at slow polling rate
+ */
+ tipc_bearer_send(req->bearer_id, req->buf, &req->dest);
+
+
+ req->timer_intv *= 2;
+ if (req->num_nodes)
+ max_delay = TIPC_LINK_REQ_SLOW;
+ else
+ max_delay = TIPC_LINK_REQ_FAST;
+ if (req->timer_intv > max_delay)
+ req->timer_intv = max_delay;
+
+ k_start_timer(&req->timer, req->timer_intv);
+exit:
+ spin_unlock_bh(&req->lock);
}
/**
- * tipc_disc_init_link_req - start sending periodic link setup requests
+ * tipc_disc_create - create object to send periodic link setup requests
* @b_ptr: ptr to bearer issuing requests
* @dest: destination address for request messages
- * @dest_domain: network domain of node(s) which should respond to message
- * @req_links: max number of desired links
+ * @dest_domain: network domain to which links can be established
*
- * Returns pointer to link request structure, or NULL if unable to create.
+ * Returns 0 if successful, otherwise -errno.
*/
-
-struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr,
- const struct tipc_media_addr *dest,
- u32 dest_domain,
- u32 req_links)
+int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest)
{
- struct link_req *req;
+ struct tipc_link_req *req;
req = kmalloc(sizeof(*req), GFP_ATOMIC);
if (!req)
- return NULL;
+ return -ENOMEM;
- req->buf = tipc_disc_init_msg(DSC_REQ_MSG, req_links, dest_domain, b_ptr);
+ req->buf = tipc_buf_acquire(INT_H_SIZE);
if (!req->buf) {
kfree(req);
- return NULL;
+ return -ENOMEM;
}
+ tipc_disc_init_msg(req->buf, DSC_REQ_MSG, b_ptr);
memcpy(&req->dest, dest, sizeof(*dest));
- req->bearer = b_ptr;
+ req->bearer_id = b_ptr->identity;
+ req->domain = b_ptr->domain;
+ req->num_nodes = 0;
req->timer_intv = TIPC_LINK_REQ_INIT;
+ spin_lock_init(&req->lock);
k_init_timer(&req->timer, (Handler)disc_timeout, (unsigned long)req);
k_start_timer(&req->timer, req->timer_intv);
- return req;
+ b_ptr->link_req = req;
+ tipc_bearer_send(req->bearer_id, req->buf, &req->dest);
+ return 0;
}
+/**
+ * tipc_disc_delete - destroy object sending periodic link setup requests
+ * @req: ptr to link request structure
+ */
+void tipc_disc_delete(struct tipc_link_req *req)
+{
+ k_cancel_timer(&req->timer);
+ k_term_timer(&req->timer);
+ kfree_skb(req->buf);
+ kfree(req);
+}
+
+/**
+ * tipc_disc_reset - reset object to send periodic link setup requests
+ * @b_ptr: ptr to bearer issuing requests
+ * @dest_domain: network domain to which links can be established
+ */
+void tipc_disc_reset(struct tipc_bearer *b_ptr)
+{
+ struct tipc_link_req *req = b_ptr->link_req;
+
+ spin_lock_bh(&req->lock);
+ tipc_disc_init_msg(req->buf, DSC_REQ_MSG, b_ptr);
+ req->bearer_id = b_ptr->identity;
+ req->domain = b_ptr->domain;
+ req->num_nodes = 0;
+ req->timer_intv = TIPC_LINK_REQ_INIT;
+ k_start_timer(&req->timer, req->timer_intv);
+ tipc_bearer_send(req->bearer_id, req->buf, &req->dest);
+ spin_unlock_bh(&req->lock);
+}
diff --git a/net/tipc/discover.h b/net/tipc/discover.h
index d2c3cffb79f..515b57392f4 100644
--- a/net/tipc/discover.h
+++ b/net/tipc/discover.h
@@ -2,7 +2,7 @@
* net/tipc/discover.h
*
* Copyright (c) 2003-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -37,15 +37,13 @@
#ifndef _TIPC_DISCOVER_H
#define _TIPC_DISCOVER_H
-struct link_req;
+struct tipc_link_req;
-struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr,
- const struct tipc_media_addr *dest,
- u32 dest_domain,
- u32 req_links);
-void tipc_disc_update_link_req(struct link_req *req);
-void tipc_disc_stop_link_req(struct link_req *req);
-
-void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr);
+int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest);
+void tipc_disc_delete(struct tipc_link_req *req);
+void tipc_disc_reset(struct tipc_bearer *b_ptr);
+void tipc_disc_add_dest(struct tipc_link_req *req);
+void tipc_disc_remove_dest(struct tipc_link_req *req);
+void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *b_ptr);
#endif
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index ee683cc8f4b..5e1426f1751 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -1,8 +1,8 @@
/*
* net/tipc/eth_media.c: Ethernet bearer support for TIPC
*
- * Copyright (c) 2001-2007, Ericsson AB
- * Copyright (c) 2005-2007, Wind River Systems
+ * Copyright (c) 2001-2007, 2013-2014, Ericsson AB
+ * Copyright (c) 2005-2008, 2011-2013, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -34,293 +34,68 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#include <linux/netdevice.h>
-#include <linux/slab.h>
-#include <net/net_namespace.h>
-
#include "core.h"
#include "bearer.h"
-#define MAX_ETH_BEARERS 2
-#define ETH_LINK_PRIORITY TIPC_DEF_LINK_PRI
-#define ETH_LINK_TOLERANCE TIPC_DEF_LINK_TOL
-#define ETH_LINK_WINDOW TIPC_DEF_LINK_WIN
-
-/**
- * struct eth_bearer - Ethernet bearer data structure
- * @bearer: ptr to associated "generic" bearer structure
- * @dev: ptr to associated Ethernet network device
- * @tipc_packet_type: used in binding TIPC to Ethernet driver
- */
-
-struct eth_bearer {
- struct tipc_bearer *bearer;
- struct net_device *dev;
- struct packet_type tipc_packet_type;
-};
-
-static struct eth_bearer eth_bearers[MAX_ETH_BEARERS];
-static int eth_started = 0;
-static struct notifier_block notifier;
-
-/**
- * send_msg - send a TIPC message out over an Ethernet interface
- */
+#define ETH_ADDR_OFFSET 4 /* MAC addr position inside address field */
-static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
- struct tipc_media_addr *dest)
+/* Convert Ethernet address (media address format) to string */
+static int tipc_eth_addr2str(struct tipc_media_addr *addr,
+ char *strbuf, int bufsz)
{
- struct sk_buff *clone;
- struct net_device *dev;
- int delta;
+ if (bufsz < 18) /* 18 = strlen("aa:bb:cc:dd:ee:ff\0") */
+ return 1;
- clone = skb_clone(buf, GFP_ATOMIC);
- if (!clone)
- return 0;
-
- dev = ((struct eth_bearer *)(tb_ptr->usr_handle))->dev;
- delta = dev->hard_header_len - skb_headroom(buf);
-
- if ((delta > 0) &&
- pskb_expand_head(clone, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) {
- kfree_skb(clone);
- return 0;
- }
-
- skb_reset_network_header(clone);
- clone->dev = dev;
- dev_hard_header(clone, dev, ETH_P_TIPC, &dest->dev_addr.eth_addr,
- dev->dev_addr, clone->len);
- dev_queue_xmit(clone);
+ sprintf(strbuf, "%pM", addr->value);
return 0;
}
-/**
- * recv_msg - handle incoming TIPC message from an Ethernet interface
- *
- * Accept only packets explicitly sent to this node, or broadcast packets;
- * ignores packets sent using Ethernet multicast, and traffic sent to other
- * nodes (which can happen if interface is running in promiscuous mode).
- */
-
-static int recv_msg(struct sk_buff *buf, struct net_device *dev,
- struct packet_type *pt, struct net_device *orig_dev)
+/* Convert from media address format to discovery message addr format */
+static int tipc_eth_addr2msg(char *msg, struct tipc_media_addr *addr)
{
- struct eth_bearer *eb_ptr = (struct eth_bearer *)pt->af_packet_priv;
-
- if (!net_eq(dev_net(dev), &init_net)) {
- kfree_skb(buf);
- return 0;
- }
-
- if (likely(eb_ptr->bearer)) {
- if (likely(buf->pkt_type <= PACKET_BROADCAST)) {
- buf->next = NULL;
- tipc_recv_msg(buf, eb_ptr->bearer);
- return 0;
- }
- }
- kfree_skb(buf);
+ memset(msg, 0, TIPC_MEDIA_ADDR_SIZE);
+ msg[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_ETH;
+ memcpy(msg + ETH_ADDR_OFFSET, addr->value, ETH_ALEN);
return 0;
}
-/**
- * enable_bearer - attach TIPC bearer to an Ethernet interface
- */
-
-static int enable_bearer(struct tipc_bearer *tb_ptr)
+/* Convert raw mac address format to media addr format */
+static int tipc_eth_raw2addr(struct tipc_bearer *b,
+ struct tipc_media_addr *addr,
+ char *msg)
{
- struct net_device *dev = NULL;
- struct net_device *pdev = NULL;
- struct eth_bearer *eb_ptr = &eth_bearers[0];
- struct eth_bearer *stop = &eth_bearers[MAX_ETH_BEARERS];
- char *driver_name = strchr((const char *)tb_ptr->name, ':') + 1;
- int pending_dev = 0;
-
- /* Find unused Ethernet bearer structure */
-
- while (eb_ptr->dev) {
- if (!eb_ptr->bearer)
- pending_dev++;
- if (++eb_ptr == stop)
- return pending_dev ? -EAGAIN : -EDQUOT;
- }
-
- /* Find device with specified name */
-
- for_each_netdev(&init_net, pdev){
- if (!strncmp(pdev->name, driver_name, IFNAMSIZ)) {
- dev = pdev;
- break;
- }
- }
- if (!dev)
- return -ENODEV;
-
- /* Find Ethernet bearer for device (or create one) */
-
- for (;(eb_ptr != stop) && eb_ptr->dev && (eb_ptr->dev != dev); eb_ptr++);
- if (eb_ptr == stop)
- return -EDQUOT;
- if (!eb_ptr->dev) {
- eb_ptr->dev = dev;
- eb_ptr->tipc_packet_type.type = htons(ETH_P_TIPC);
- eb_ptr->tipc_packet_type.dev = dev;
- eb_ptr->tipc_packet_type.func = recv_msg;
- eb_ptr->tipc_packet_type.af_packet_priv = eb_ptr;
- INIT_LIST_HEAD(&(eb_ptr->tipc_packet_type.list));
- dev_hold(dev);
- dev_add_pack(&eb_ptr->tipc_packet_type);
- }
+ char bcast_mac[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- /* Associate TIPC bearer with Ethernet bearer */
-
- eb_ptr->bearer = tb_ptr;
- tb_ptr->usr_handle = (void *)eb_ptr;
- tb_ptr->mtu = dev->mtu;
- tb_ptr->blocked = 0;
- tb_ptr->addr.type = htonl(TIPC_MEDIA_TYPE_ETH);
- memcpy(&tb_ptr->addr.dev_addr, dev->dev_addr, ETH_ALEN);
+ memset(addr, 0, sizeof(*addr));
+ ether_addr_copy(addr->value, msg);
+ addr->media_id = TIPC_MEDIA_TYPE_ETH;
+ addr->broadcast = !memcmp(addr->value, bcast_mac, ETH_ALEN);
return 0;
}
-/**
- * disable_bearer - detach TIPC bearer from an Ethernet interface
- *
- * We really should do dev_remove_pack() here, but this function can not be
- * called at tasklet level. => Use eth_bearer->bearer as a flag to throw away
- * incoming buffers, & postpone dev_remove_pack() to eth_media_stop() on exit.
- */
-
-static void disable_bearer(struct tipc_bearer *tb_ptr)
-{
- ((struct eth_bearer *)tb_ptr->usr_handle)->bearer = NULL;
-}
-
-/**
- * recv_notification - handle device updates from OS
- *
- * Change the state of the Ethernet bearer (if any) associated with the
- * specified device.
- */
-
-static int recv_notification(struct notifier_block *nb, unsigned long evt,
- void *dv)
-{
- struct net_device *dev = (struct net_device *)dv;
- struct eth_bearer *eb_ptr = &eth_bearers[0];
- struct eth_bearer *stop = &eth_bearers[MAX_ETH_BEARERS];
-
- if (!net_eq(dev_net(dev), &init_net))
- return NOTIFY_DONE;
-
- while ((eb_ptr->dev != dev)) {
- if (++eb_ptr == stop)
- return NOTIFY_DONE; /* couldn't find device */
- }
- if (!eb_ptr->bearer)
- return NOTIFY_DONE; /* bearer had been disabled */
-
- eb_ptr->bearer->mtu = dev->mtu;
-
- switch (evt) {
- case NETDEV_CHANGE:
- if (netif_carrier_ok(dev))
- tipc_continue(eb_ptr->bearer);
- else
- tipc_block_bearer(eb_ptr->bearer->name);
- break;
- case NETDEV_UP:
- tipc_continue(eb_ptr->bearer);
- break;
- case NETDEV_DOWN:
- tipc_block_bearer(eb_ptr->bearer->name);
- break;
- case NETDEV_CHANGEMTU:
- case NETDEV_CHANGEADDR:
- tipc_block_bearer(eb_ptr->bearer->name);
- tipc_continue(eb_ptr->bearer);
- break;
- case NETDEV_UNREGISTER:
- case NETDEV_CHANGENAME:
- tipc_disable_bearer(eb_ptr->bearer->name);
- break;
- }
- return NOTIFY_OK;
-}
-
-/**
- * eth_addr2str - convert Ethernet address to string
- */
-
-static char *eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size)
+/* Convert discovery msg addr format to Ethernet media addr format */
+static int tipc_eth_msg2addr(struct tipc_bearer *b,
+ struct tipc_media_addr *addr,
+ char *msg)
{
- unchar *addr = (unchar *)&a->dev_addr;
-
- if (str_size < 18)
- *str_buf = '\0';
- else
- sprintf(str_buf, "%pM", addr);
- return str_buf;
+ /* Skip past preamble: */
+ msg += ETH_ADDR_OFFSET;
+ return tipc_eth_raw2addr(b, addr, msg);
}
-/**
- * tipc_eth_media_start - activate Ethernet bearer support
- *
- * Register Ethernet media type with TIPC bearer code. Also register
- * with OS for notifications about device state changes.
- */
-
-int tipc_eth_media_start(void)
-{
- struct tipc_media_addr bcast_addr;
- int res;
-
- if (eth_started)
- return -EINVAL;
-
- bcast_addr.type = htonl(TIPC_MEDIA_TYPE_ETH);
- memset(&bcast_addr.dev_addr, 0xff, ETH_ALEN);
-
- memset(eth_bearers, 0, sizeof(eth_bearers));
-
- res = tipc_register_media(TIPC_MEDIA_TYPE_ETH, "eth",
- enable_bearer, disable_bearer, send_msg,
- eth_addr2str, &bcast_addr, ETH_LINK_PRIORITY,
- ETH_LINK_TOLERANCE, ETH_LINK_WINDOW);
- if (res)
- return res;
-
- notifier.notifier_call = &recv_notification;
- notifier.priority = 0;
- res = register_netdevice_notifier(&notifier);
- if (!res)
- eth_started = 1;
- return res;
-}
-
-/**
- * tipc_eth_media_stop - deactivate Ethernet bearer support
- */
-
-void tipc_eth_media_stop(void)
-{
- int i;
-
- if (!eth_started)
- return;
-
- unregister_netdevice_notifier(&notifier);
- for (i = 0; i < MAX_ETH_BEARERS ; i++) {
- if (eth_bearers[i].bearer) {
- eth_bearers[i].bearer->blocked = 1;
- eth_bearers[i].bearer = NULL;
- }
- if (eth_bearers[i].dev) {
- dev_remove_pack(&eth_bearers[i].tipc_packet_type);
- dev_put(eth_bearers[i].dev);
- }
- }
- memset(&eth_bearers, 0, sizeof(eth_bearers));
- eth_started = 0;
-}
+/* Ethernet media registration info */
+struct tipc_media eth_media_info = {
+ .send_msg = tipc_l2_send_msg,
+ .enable_media = tipc_enable_l2_media,
+ .disable_media = tipc_disable_l2_media,
+ .addr2str = tipc_eth_addr2str,
+ .addr2msg = tipc_eth_addr2msg,
+ .msg2addr = tipc_eth_msg2addr,
+ .raw2addr = tipc_eth_raw2addr,
+ .priority = TIPC_DEF_LINK_PRI,
+ .tolerance = TIPC_DEF_LINK_TOL,
+ .window = TIPC_DEF_LINK_WIN,
+ .type_id = TIPC_MEDIA_TYPE_ETH,
+ .hwaddr_len = ETH_ALEN,
+ .name = "eth"
+};
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
deleted file mode 100644
index 0c70010a7df..00000000000
--- a/net/tipc/handler.c
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * net/tipc/handler.c: TIPC signal handling
- *
- * Copyright (c) 2000-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "core.h"
-
-struct queue_item {
- struct list_head next_signal;
- void (*handler) (unsigned long);
- unsigned long data;
-};
-
-static struct kmem_cache *tipc_queue_item_cache;
-static struct list_head signal_queue_head;
-static DEFINE_SPINLOCK(qitem_lock);
-static int handler_enabled = 0;
-
-static void process_signal_queue(unsigned long dummy);
-
-static DECLARE_TASKLET_DISABLED(tipc_tasklet, process_signal_queue, 0);
-
-
-unsigned int tipc_k_signal(Handler routine, unsigned long argument)
-{
- struct queue_item *item;
-
- if (!handler_enabled) {
- err("Signal request ignored by handler\n");
- return -ENOPROTOOPT;
- }
-
- spin_lock_bh(&qitem_lock);
- item = kmem_cache_alloc(tipc_queue_item_cache, GFP_ATOMIC);
- if (!item) {
- err("Signal queue out of memory\n");
- spin_unlock_bh(&qitem_lock);
- return -ENOMEM;
- }
- item->handler = routine;
- item->data = argument;
- list_add_tail(&item->next_signal, &signal_queue_head);
- spin_unlock_bh(&qitem_lock);
- tasklet_schedule(&tipc_tasklet);
- return 0;
-}
-
-static void process_signal_queue(unsigned long dummy)
-{
- struct queue_item *__volatile__ item;
- struct list_head *l, *n;
-
- spin_lock_bh(&qitem_lock);
- list_for_each_safe(l, n, &signal_queue_head) {
- item = list_entry(l, struct queue_item, next_signal);
- list_del(&item->next_signal);
- spin_unlock_bh(&qitem_lock);
- item->handler(item->data);
- spin_lock_bh(&qitem_lock);
- kmem_cache_free(tipc_queue_item_cache, item);
- }
- spin_unlock_bh(&qitem_lock);
-}
-
-int tipc_handler_start(void)
-{
- tipc_queue_item_cache =
- kmem_cache_create("tipc_queue_items", sizeof(struct queue_item),
- 0, SLAB_HWCACHE_ALIGN, NULL);
- if (!tipc_queue_item_cache)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&signal_queue_head);
- tasklet_enable(&tipc_tasklet);
- handler_enabled = 1;
- return 0;
-}
-
-void tipc_handler_stop(void)
-{
- struct list_head *l, *n;
- struct queue_item *item;
-
- if (!handler_enabled)
- return;
-
- handler_enabled = 0;
- tasklet_disable(&tipc_tasklet);
- tasklet_kill(&tipc_tasklet);
-
- spin_lock_bh(&qitem_lock);
- list_for_each_safe(l, n, &signal_queue_head) {
- item = list_entry(l, struct queue_item, next_signal);
- list_del(&item->next_signal);
- kmem_cache_free(tipc_queue_item_cache, item);
- }
- spin_unlock_bh(&qitem_lock);
-
- kmem_cache_destroy(tipc_queue_item_cache);
-}
-
diff --git a/net/tipc/ib_media.c b/net/tipc/ib_media.c
new file mode 100644
index 00000000000..8522eef9c13
--- /dev/null
+++ b/net/tipc/ib_media.c
@@ -0,0 +1,101 @@
+/*
+ * net/tipc/ib_media.c: Infiniband bearer support for TIPC
+ *
+ * Copyright (c) 2013 Patrick McHardy <kaber@trash.net>
+ *
+ * Based on eth_media.c, which carries the following copyright notice:
+ *
+ * Copyright (c) 2001-2007, Ericsson AB
+ * Copyright (c) 2005-2008, 2011, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/if_infiniband.h>
+#include "core.h"
+#include "bearer.h"
+
+/* convert InfiniBand address (media address format) media address to string */
+static int tipc_ib_addr2str(struct tipc_media_addr *a, char *str_buf,
+ int str_size)
+{
+ if (str_size < 60) /* 60 = 19 * strlen("xx:") + strlen("xx\0") */
+ return 1;
+
+ sprintf(str_buf, "%20phC", a->value);
+
+ return 0;
+}
+
+/* Convert from media address format to discovery message addr format */
+static int tipc_ib_addr2msg(char *msg, struct tipc_media_addr *addr)
+{
+ memset(msg, 0, TIPC_MEDIA_ADDR_SIZE);
+ memcpy(msg, addr->value, INFINIBAND_ALEN);
+ return 0;
+}
+
+/* Convert raw InfiniBand address format to media addr format */
+static int tipc_ib_raw2addr(struct tipc_bearer *b,
+ struct tipc_media_addr *addr,
+ char *msg)
+{
+ memset(addr, 0, sizeof(*addr));
+ memcpy(addr->value, msg, INFINIBAND_ALEN);
+ addr->media_id = TIPC_MEDIA_TYPE_IB;
+ addr->broadcast = !memcmp(msg, b->bcast_addr.value,
+ INFINIBAND_ALEN);
+ return 0;
+}
+
+/* Convert discovery msg addr format to InfiniBand media addr format */
+static int tipc_ib_msg2addr(struct tipc_bearer *b,
+ struct tipc_media_addr *addr,
+ char *msg)
+{
+ return tipc_ib_raw2addr(b, addr, msg);
+}
+
+/* InfiniBand media registration info */
+struct tipc_media ib_media_info = {
+ .send_msg = tipc_l2_send_msg,
+ .enable_media = tipc_enable_l2_media,
+ .disable_media = tipc_disable_l2_media,
+ .addr2str = tipc_ib_addr2str,
+ .addr2msg = tipc_ib_addr2msg,
+ .msg2addr = tipc_ib_msg2addr,
+ .raw2addr = tipc_ib_raw2addr,
+ .priority = TIPC_DEF_LINK_PRI,
+ .tolerance = TIPC_DEF_LINK_TOL,
+ .window = TIPC_DEF_LINK_WIN,
+ .type_id = TIPC_MEDIA_TYPE_IB,
+ .hwaddr_len = INFINIBAND_ALEN,
+ .name = "ib"
+};
diff --git a/net/tipc/link.c b/net/tipc/link.c
index cf414cf05e7..ad2c57f5868 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1,8 +1,8 @@
/*
* net/tipc/link.c: TIPC link code
*
- * Copyright (c) 1996-2007, Ericsson AB
- * Copyright (c) 2004-2007, Wind River Systems
+ * Copyright (c) 1996-2007, 2012-2014, Ericsson AB
+ * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -37,21 +37,28 @@
#include "core.h"
#include "link.h"
#include "port.h"
+#include "socket.h"
#include "name_distr.h"
#include "discover.h"
#include "config.h"
+#include <linux/pkt_sched.h>
/*
- * Out-of-range value for link session numbers
+ * Error message prefixes
*/
+static const char *link_co_err = "Link changeover error, ";
+static const char *link_rst_msg = "Resetting link ";
+static const char *link_unk_evt = "Unknown link event ";
+/*
+ * Out-of-range value for link session numbers
+ */
#define INVALID_SESSION 0x10000
/*
* Link state events:
*/
-
#define STARTING_EVT 856384768 /* link processing trigger */
#define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */
#define TIMEOUT_EVT 560817u /* link timer expired */
@@ -67,118 +74,46 @@
/*
* State value stored in 'exp_msg_count'
*/
-
#define START_CHANGEOVER 100000u
-/**
- * struct link_name - deconstructed link name
- * @addr_local: network address of node at this end
- * @if_local: name of interface at this end
- * @addr_peer: network address of node at far end
- * @if_peer: name of interface at far end
- */
-
-struct link_name {
- u32 addr_local;
- char if_local[TIPC_MAX_IF_NAME];
- u32 addr_peer;
- char if_peer[TIPC_MAX_IF_NAME];
-};
-
-static void link_handle_out_of_seq_msg(struct link *l_ptr,
+static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
struct sk_buff *buf);
-static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf);
-static int link_recv_changeover_msg(struct link **l_ptr, struct sk_buff **buf);
-static void link_set_supervision_props(struct link *l_ptr, u32 tolerance);
-static int link_send_sections_long(struct port *sender,
- struct iovec const *msg_sect,
- u32 num_sect, u32 destnode);
-static void link_check_defragm_bufs(struct link *l_ptr);
-static void link_state_event(struct link *l_ptr, u32 event);
-static void link_reset_statistics(struct link *l_ptr);
-static void link_print(struct link *l_ptr, struct print_buf *buf,
- const char *str);
-static void link_start(struct link *l_ptr);
-static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf);
-
-
-/*
- * Debugging code used by link routines only
- *
- * When debugging link problems on a system that has multiple links,
- * the standard TIPC debugging routines may not be useful since they
- * allow the output from multiple links to be intermixed. For this reason
- * routines of the form "dbg_link_XXX()" have been created that will capture
- * debug info into a link's personal print buffer, which can then be dumped
- * into the TIPC system log (TIPC_LOG) upon request.
- *
- * To enable per-link debugging, use LINK_LOG_BUF_SIZE to specify the size
- * of the print buffer used by each link. If LINK_LOG_BUF_SIZE is set to 0,
- * the dbg_link_XXX() routines simply send their output to the standard
- * debug print buffer (DBG_OUTPUT), if it has been defined; this can be useful
- * when there is only a single link in the system being debugged.
- *
- * Notes:
- * - When enabled, LINK_LOG_BUF_SIZE should be set to at least TIPC_PB_MIN_SIZE
- * - "l_ptr" must be valid when using dbg_link_XXX() macros
- */
-
-#define LINK_LOG_BUF_SIZE 0
-
-#define dbg_link(fmt, arg...) \
- do { \
- if (LINK_LOG_BUF_SIZE) \
- tipc_printf(&l_ptr->print_buf, fmt, ## arg); \
- } while (0)
-#define dbg_link_msg(msg, txt) \
- do { \
- if (LINK_LOG_BUF_SIZE) \
- tipc_msg_dbg(&l_ptr->print_buf, msg, txt); \
- } while (0)
-#define dbg_link_state(txt) \
- do { \
- if (LINK_LOG_BUF_SIZE) \
- link_print(l_ptr, &l_ptr->print_buf, txt); \
- } while (0)
-#define dbg_link_dump() do { \
- if (LINK_LOG_BUF_SIZE) { \
- tipc_printf(LOG, "\n\nDumping link <%s>:\n", l_ptr->name); \
- tipc_printbuf_move(LOG, &l_ptr->print_buf); \
- } \
-} while (0)
-
-static void dbg_print_link(struct link *l_ptr, const char *str)
-{
- if (DBG_OUTPUT != TIPC_NULL)
- link_print(l_ptr, DBG_OUTPUT, str);
-}
-
-static void dbg_print_buf_chain(struct sk_buff *root_buf)
-{
- if (DBG_OUTPUT != TIPC_NULL) {
- struct sk_buff *buf = root_buf;
-
- while (buf) {
- msg_dbg(buf_msg(buf), "In chain: ");
- buf = buf->next;
- }
- }
-}
+static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf);
+static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr,
+ struct sk_buff **buf);
+static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance);
+static int tipc_link_iovec_long_xmit(struct tipc_port *sender,
+ struct iovec const *msg_sect,
+ unsigned int len, u32 destnode);
+static void link_state_event(struct tipc_link *l_ptr, u32 event);
+static void link_reset_statistics(struct tipc_link *l_ptr);
+static void link_print(struct tipc_link *l_ptr, const char *str);
+static int tipc_link_frag_xmit(struct tipc_link *l_ptr, struct sk_buff *buf);
+static void tipc_link_sync_xmit(struct tipc_link *l);
+static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
/*
* Simple link routines
*/
-
static unsigned int align(unsigned int i)
{
return (i + 3) & ~3u;
}
-static void link_init_max_pkt(struct link *l_ptr)
+static void link_init_max_pkt(struct tipc_link *l_ptr)
{
+ struct tipc_bearer *b_ptr;
u32 max_pkt;
- max_pkt = (l_ptr->b_ptr->publ.mtu & ~3);
+ rcu_read_lock();
+ b_ptr = rcu_dereference_rtnl(bearer_list[l_ptr->bearer_id]);
+ if (!b_ptr) {
+ rcu_read_unlock();
+ return;
+ }
+ max_pkt = (b_ptr->mtu & ~3);
+ rcu_read_unlock();
+
if (max_pkt > MAX_MSG_SIZE)
max_pkt = MAX_MSG_SIZE;
@@ -191,14 +126,14 @@ static void link_init_max_pkt(struct link *l_ptr)
l_ptr->max_pkt_probes = 0;
}
-static u32 link_next_sent(struct link *l_ptr)
+static u32 link_next_sent(struct tipc_link *l_ptr)
{
if (l_ptr->next_out)
- return msg_seqno(buf_msg(l_ptr->next_out));
+ return buf_seqno(l_ptr->next_out);
return mod(l_ptr->next_out_no);
}
-static u32 link_last_sent(struct link *l_ptr)
+static u32 link_last_sent(struct tipc_link *l_ptr)
{
return mod(link_next_sent(l_ptr) - 1);
}
@@ -206,111 +141,31 @@ static u32 link_last_sent(struct link *l_ptr)
/*
* Simple non-static link routines (i.e. referenced outside this file)
*/
-
-int tipc_link_is_up(struct link *l_ptr)
+int tipc_link_is_up(struct tipc_link *l_ptr)
{
if (!l_ptr)
return 0;
return link_working_working(l_ptr) || link_working_unknown(l_ptr);
}
-int tipc_link_is_active(struct link *l_ptr)
+int tipc_link_is_active(struct tipc_link *l_ptr)
{
return (l_ptr->owner->active_links[0] == l_ptr) ||
(l_ptr->owner->active_links[1] == l_ptr);
}
/**
- * link_name_validate - validate & (optionally) deconstruct link name
- * @name - ptr to link name string
- * @name_parts - ptr to area for link name components (or NULL if not needed)
- *
- * Returns 1 if link name is valid, otherwise 0.
- */
-
-static int link_name_validate(const char *name, struct link_name *name_parts)
-{
- char name_copy[TIPC_MAX_LINK_NAME];
- char *addr_local;
- char *if_local;
- char *addr_peer;
- char *if_peer;
- char dummy;
- u32 z_local, c_local, n_local;
- u32 z_peer, c_peer, n_peer;
- u32 if_local_len;
- u32 if_peer_len;
-
- /* copy link name & ensure length is OK */
-
- name_copy[TIPC_MAX_LINK_NAME - 1] = 0;
- /* need above in case non-Posix strncpy() doesn't pad with nulls */
- strncpy(name_copy, name, TIPC_MAX_LINK_NAME);
- if (name_copy[TIPC_MAX_LINK_NAME - 1] != 0)
- return 0;
-
- /* ensure all component parts of link name are present */
-
- addr_local = name_copy;
- if ((if_local = strchr(addr_local, ':')) == NULL)
- return 0;
- *(if_local++) = 0;
- if ((addr_peer = strchr(if_local, '-')) == NULL)
- return 0;
- *(addr_peer++) = 0;
- if_local_len = addr_peer - if_local;
- if ((if_peer = strchr(addr_peer, ':')) == NULL)
- return 0;
- *(if_peer++) = 0;
- if_peer_len = strlen(if_peer) + 1;
-
- /* validate component parts of link name */
-
- if ((sscanf(addr_local, "%u.%u.%u%c",
- &z_local, &c_local, &n_local, &dummy) != 3) ||
- (sscanf(addr_peer, "%u.%u.%u%c",
- &z_peer, &c_peer, &n_peer, &dummy) != 3) ||
- (z_local > 255) || (c_local > 4095) || (n_local > 4095) ||
- (z_peer > 255) || (c_peer > 4095) || (n_peer > 4095) ||
- (if_local_len <= 1) || (if_local_len > TIPC_MAX_IF_NAME) ||
- (if_peer_len <= 1) || (if_peer_len > TIPC_MAX_IF_NAME) ||
- (strspn(if_local, tipc_alphabet) != (if_local_len - 1)) ||
- (strspn(if_peer, tipc_alphabet) != (if_peer_len - 1)))
- return 0;
-
- /* return link name components, if necessary */
-
- if (name_parts) {
- name_parts->addr_local = tipc_addr(z_local, c_local, n_local);
- strcpy(name_parts->if_local, if_local);
- name_parts->addr_peer = tipc_addr(z_peer, c_peer, n_peer);
- strcpy(name_parts->if_peer, if_peer);
- }
- return 1;
-}
-
-/**
* link_timeout - handle expiration of link timer
* @l_ptr: pointer to link
- *
- * This routine must not grab "tipc_net_lock" to avoid a potential deadlock conflict
- * with tipc_link_delete(). (There is no risk that the node will be deleted by
- * another thread because tipc_link_delete() always cancels the link timer before
- * tipc_node_delete() is called.)
*/
-
-static void link_timeout(struct link *l_ptr)
+static void link_timeout(struct tipc_link *l_ptr)
{
tipc_node_lock(l_ptr->owner);
/* update counters used in statistical profiling of send traffic */
-
l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size;
l_ptr->stats.queue_sz_counts++;
- if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz)
- l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
-
if (l_ptr->first_out) {
struct tipc_msg *msg = buf_msg(l_ptr->first_out);
u32 length = msg_size(msg);
@@ -341,8 +196,6 @@ static void link_timeout(struct link *l_ptr)
/* do all other link processing performed on a periodic basis */
- link_check_defragm_bufs(l_ptr);
-
link_state_event(l_ptr, TIMEOUT_EVT);
if (l_ptr->next_out)
@@ -351,56 +204,62 @@ static void link_timeout(struct link *l_ptr)
tipc_node_unlock(l_ptr->owner);
}
-static void link_set_timer(struct link *l_ptr, u32 time)
+static void link_set_timer(struct tipc_link *l_ptr, u32 time)
{
k_start_timer(&l_ptr->timer, time);
}
/**
* tipc_link_create - create a new link
+ * @n_ptr: pointer to associated node
* @b_ptr: pointer to associated bearer
- * @peer: network address of node at other end of link
* @media_addr: media address to use when sending messages over link
*
* Returns pointer to link.
*/
-
-struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
- const struct tipc_media_addr *media_addr)
+struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
+ struct tipc_bearer *b_ptr,
+ const struct tipc_media_addr *media_addr)
{
- struct link *l_ptr;
+ struct tipc_link *l_ptr;
struct tipc_msg *msg;
char *if_name;
+ char addr_string[16];
+ u32 peer = n_ptr->addr;
- l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
- if (!l_ptr) {
- warn("Link creation failed, no memory\n");
+ if (n_ptr->link_cnt >= 2) {
+ tipc_addr_string_fill(addr_string, n_ptr->addr);
+ pr_err("Attempt to establish third link to %s\n", addr_string);
return NULL;
}
- if (LINK_LOG_BUF_SIZE) {
- char *pb = kmalloc(LINK_LOG_BUF_SIZE, GFP_ATOMIC);
+ if (n_ptr->links[b_ptr->identity]) {
+ tipc_addr_string_fill(addr_string, n_ptr->addr);
+ pr_err("Attempt to establish second link on <%s> to %s\n",
+ b_ptr->name, addr_string);
+ return NULL;
+ }
- if (!pb) {
- kfree(l_ptr);
- warn("Link creation failed, no memory for print buffer\n");
- return NULL;
- }
- tipc_printbuf_init(&l_ptr->print_buf, pb, LINK_LOG_BUF_SIZE);
+ l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
+ if (!l_ptr) {
+ pr_warn("Link creation failed, no memory\n");
+ return NULL;
}
l_ptr->addr = peer;
- if_name = strchr(b_ptr->publ.name, ':') + 1;
- sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:",
+ if_name = strchr(b_ptr->name, ':') + 1;
+ sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
tipc_node(tipc_own_addr),
if_name,
tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
- /* note: peer i/f is appended to link name by reset/activate */
+ /* note: peer i/f name is updated by reset/activate message */
memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
+ l_ptr->owner = n_ptr;
l_ptr->checkpoint = 1;
- l_ptr->b_ptr = b_ptr;
- link_set_supervision_props(l_ptr, b_ptr->media->tolerance);
+ l_ptr->peer_session = INVALID_SESSION;
+ l_ptr->bearer_id = b_ptr->identity;
+ link_set_supervision_props(l_ptr, b_ptr->tolerance);
l_ptr->state = RESET_UNKNOWN;
l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
@@ -412,8 +271,9 @@ struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
strcpy((char *)msg_data(msg), if_name);
l_ptr->priority = b_ptr->priority;
- tipc_link_set_queue_limits(l_ptr, b_ptr->media->window);
+ tipc_link_set_queue_limits(l_ptr, b_ptr->window);
+ l_ptr->net_plane = b_ptr->net_plane;
link_init_max_pkt(l_ptr);
l_ptr->next_out_no = 1;
@@ -421,60 +281,46 @@ struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
link_reset_statistics(l_ptr);
- l_ptr->owner = tipc_node_attach_link(l_ptr);
- if (!l_ptr->owner) {
- if (LINK_LOG_BUF_SIZE)
- kfree(l_ptr->print_buf.buf);
- kfree(l_ptr);
- return NULL;
- }
+ tipc_node_attach_link(n_ptr, l_ptr);
- k_init_timer(&l_ptr->timer, (Handler)link_timeout, (unsigned long)l_ptr);
- list_add_tail(&l_ptr->link_list, &b_ptr->links);
- tipc_k_signal((Handler)link_start, (unsigned long)l_ptr);
+ k_init_timer(&l_ptr->timer, (Handler)link_timeout,
+ (unsigned long)l_ptr);
- dbg("tipc_link_create(): tolerance = %u,cont intv = %u, abort_limit = %u\n",
- l_ptr->tolerance, l_ptr->continuity_interval, l_ptr->abort_limit);
+ link_state_event(l_ptr, STARTING_EVT);
return l_ptr;
}
-/**
- * tipc_link_delete - delete a link
- * @l_ptr: pointer to link
- *
- * Note: 'tipc_net_lock' is write_locked, bearer is locked.
- * This routine must not grab the node lock until after link timer cancellation
- * to avoid a potential deadlock situation.
- */
-
-void tipc_link_delete(struct link *l_ptr)
+void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down)
{
- if (!l_ptr) {
- err("Attempt to delete non-existent link\n");
- return;
- }
-
- dbg("tipc_link_delete()\n");
-
- k_cancel_timer(&l_ptr->timer);
+ struct tipc_link *l_ptr;
+ struct tipc_node *n_ptr;
- tipc_node_lock(l_ptr->owner);
- tipc_link_reset(l_ptr);
- tipc_node_detach_link(l_ptr->owner, l_ptr);
- tipc_link_stop(l_ptr);
- list_del_init(&l_ptr->link_list);
- if (LINK_LOG_BUF_SIZE)
- kfree(l_ptr->print_buf.buf);
- tipc_node_unlock(l_ptr->owner);
- k_term_timer(&l_ptr->timer);
- kfree(l_ptr);
-}
+ rcu_read_lock();
+ list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
+ tipc_node_lock(n_ptr);
+ l_ptr = n_ptr->links[bearer_id];
+ if (l_ptr) {
+ tipc_link_reset(l_ptr);
+ if (shutting_down || !tipc_node_is_up(n_ptr)) {
+ tipc_node_detach_link(l_ptr->owner, l_ptr);
+ tipc_link_reset_fragments(l_ptr);
+ tipc_node_unlock(n_ptr);
-static void link_start(struct link *l_ptr)
-{
- dbg("link_start %x\n", l_ptr);
- link_state_event(l_ptr, STARTING_EVT);
+ /* Nobody else can access this link now: */
+ del_timer_sync(&l_ptr->timer);
+ kfree(l_ptr);
+ } else {
+ /* Detach/delete when failover is finished: */
+ l_ptr->flags |= LINK_STOPPED;
+ tipc_node_unlock(n_ptr);
+ del_timer_sync(&l_ptr->timer);
+ }
+ continue;
+ }
+ tipc_node_unlock(n_ptr);
+ }
+ rcu_read_unlock();
}
/**
@@ -486,19 +332,16 @@ static void link_start(struct link *l_ptr)
* Schedules port for renewed sending of messages after link congestion
* has abated.
*/
-
-static int link_schedule_port(struct link *l_ptr, u32 origport, u32 sz)
+static int link_schedule_port(struct tipc_link *l_ptr, u32 origport, u32 sz)
{
- struct port *p_ptr;
+ struct tipc_port *p_ptr;
spin_lock_bh(&tipc_port_list_lock);
p_ptr = tipc_port_lock(origport);
if (p_ptr) {
- if (!p_ptr->wakeup)
- goto exit;
if (!list_empty(&p_ptr->wait_list))
goto exit;
- p_ptr->publ.congested = 1;
+ p_ptr->congested = 1;
p_ptr->waiting_pkts = 1 + ((sz - 1) / l_ptr->max_pkt);
list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports);
l_ptr->stats.link_congs++;
@@ -509,10 +352,10 @@ exit:
return -ELINKCONG;
}
-void tipc_link_wakeup_ports(struct link *l_ptr, int all)
+void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all)
{
- struct port *p_ptr;
- struct port *temp_p_ptr;
+ struct tipc_port *p_ptr;
+ struct tipc_port *temp_p_ptr;
int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size;
if (all)
@@ -528,11 +371,11 @@ void tipc_link_wakeup_ports(struct link *l_ptr, int all)
if (win <= 0)
break;
list_del_init(&p_ptr->wait_list);
- spin_lock_bh(p_ptr->publ.lock);
- p_ptr->publ.congested = 0;
- p_ptr->wakeup(&p_ptr->publ);
+ spin_lock_bh(p_ptr->lock);
+ p_ptr->congested = 0;
+ tipc_port_wakeup(p_ptr);
win -= p_ptr->waiting_pkts;
- spin_unlock_bh(p_ptr->publ.lock);
+ spin_unlock_bh(p_ptr->lock);
}
exit:
@@ -543,17 +386,9 @@ exit:
* link_release_outqueue - purge link's outbound message queue
* @l_ptr: pointer to link
*/
-
-static void link_release_outqueue(struct link *l_ptr)
+static void link_release_outqueue(struct tipc_link *l_ptr)
{
- struct sk_buff *buf = l_ptr->first_out;
- struct sk_buff *next;
-
- while (buf) {
- next = buf->next;
- buf_discard(buf);
- buf = next;
- }
+ kfree_skb_list(l_ptr->first_out);
l_ptr->first_out = NULL;
l_ptr->out_queue_size = 0;
}
@@ -562,56 +397,27 @@ static void link_release_outqueue(struct link *l_ptr)
* tipc_link_reset_fragments - purge link's inbound message fragments queue
* @l_ptr: pointer to link
*/
-
-void tipc_link_reset_fragments(struct link *l_ptr)
+void tipc_link_reset_fragments(struct tipc_link *l_ptr)
{
- struct sk_buff *buf = l_ptr->defragm_buf;
- struct sk_buff *next;
-
- while (buf) {
- next = buf->next;
- buf_discard(buf);
- buf = next;
- }
- l_ptr->defragm_buf = NULL;
+ kfree_skb(l_ptr->reasm_buf);
+ l_ptr->reasm_buf = NULL;
}
/**
- * tipc_link_stop - purge all inbound and outbound messages associated with link
+ * tipc_link_purge_queues - purge all pkt queues associated with link
* @l_ptr: pointer to link
*/
-
-void tipc_link_stop(struct link *l_ptr)
+void tipc_link_purge_queues(struct tipc_link *l_ptr)
{
- struct sk_buff *buf;
- struct sk_buff *next;
-
- buf = l_ptr->oldest_deferred_in;
- while (buf) {
- next = buf->next;
- buf_discard(buf);
- buf = next;
- }
-
- buf = l_ptr->first_out;
- while (buf) {
- next = buf->next;
- buf_discard(buf);
- buf = next;
- }
-
+ kfree_skb_list(l_ptr->oldest_deferred_in);
+ kfree_skb_list(l_ptr->first_out);
tipc_link_reset_fragments(l_ptr);
-
- buf_discard(l_ptr->proto_msg_queue);
+ kfree_skb(l_ptr->proto_msg_queue);
l_ptr->proto_msg_queue = NULL;
}
-/* LINK EVENT CODE IS NOT SUPPORTED AT PRESENT */
-#define link_send_event(fcn, l_ptr, up) do { } while (0)
-
-void tipc_link_reset(struct link *l_ptr)
+void tipc_link_reset(struct tipc_link *l_ptr)
{
- struct sk_buff *buf;
u32 prev_state = l_ptr->state;
u32 checkpoint = l_ptr->next_in_no;
int was_active_link = tipc_link_is_active(l_ptr);
@@ -625,31 +431,23 @@ void tipc_link_reset(struct link *l_ptr)
link_init_max_pkt(l_ptr);
l_ptr->state = RESET_UNKNOWN;
- dbg_link_state("Resetting Link\n");
if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
return;
tipc_node_link_down(l_ptr->owner, l_ptr);
- tipc_bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr);
+ tipc_bearer_remove_dest(l_ptr->bearer_id, l_ptr->addr);
- if (was_active_link && tipc_node_has_active_links(l_ptr->owner) &&
- l_ptr->owner->permit_changeover) {
+ if (was_active_link && tipc_node_active_links(l_ptr->owner)) {
l_ptr->reset_checkpoint = checkpoint;
l_ptr->exp_msg_count = START_CHANGEOVER;
}
/* Clean up all queues: */
-
link_release_outqueue(l_ptr);
- buf_discard(l_ptr->proto_msg_queue);
+ kfree_skb(l_ptr->proto_msg_queue);
l_ptr->proto_msg_queue = NULL;
- buf = l_ptr->oldest_deferred_in;
- while (buf) {
- struct sk_buff *next = buf->next;
- buf_discard(buf);
- buf = next;
- }
+ kfree_skb_list(l_ptr->oldest_deferred_in);
if (!list_empty(&l_ptr->waiting_ports))
tipc_link_wakeup_ports(l_ptr, 1);
@@ -667,21 +465,29 @@ void tipc_link_reset(struct link *l_ptr)
l_ptr->fsm_msg_cnt = 0;
l_ptr->stale_count = 0;
link_reset_statistics(l_ptr);
-
- link_send_event(tipc_cfg_link_event, l_ptr, 0);
- if (!in_own_cluster(l_ptr->addr))
- link_send_event(tipc_disc_link_event, l_ptr, 0);
}
+void tipc_link_reset_list(unsigned int bearer_id)
+{
+ struct tipc_link *l_ptr;
+ struct tipc_node *n_ptr;
-static void link_activate(struct link *l_ptr)
+ rcu_read_lock();
+ list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
+ tipc_node_lock(n_ptr);
+ l_ptr = n_ptr->links[bearer_id];
+ if (l_ptr)
+ tipc_link_reset(l_ptr);
+ tipc_node_unlock(n_ptr);
+ }
+ rcu_read_unlock();
+}
+
+static void link_activate(struct tipc_link *l_ptr)
{
l_ptr->next_in_no = l_ptr->stats.recv_info = 1;
tipc_node_link_up(l_ptr->owner, l_ptr);
- tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr);
- link_send_event(tipc_cfg_link_event, l_ptr, 1);
- if (!in_own_cluster(l_ptr->addr))
- link_send_event(tipc_disc_link_event, l_ptr, 1);
+ tipc_bearer_add_dest(l_ptr->bearer_id, l_ptr->addr);
}
/**
@@ -689,211 +495,184 @@ static void link_activate(struct link *l_ptr)
* @l_ptr: pointer to link
* @event: state machine event to process
*/
-
-static void link_state_event(struct link *l_ptr, unsigned event)
+static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
{
- struct link *other;
+ struct tipc_link *other;
u32 cont_intv = l_ptr->continuity_interval;
- if (!l_ptr->started && (event != STARTING_EVT))
+ if (l_ptr->flags & LINK_STOPPED)
+ return;
+
+ if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT))
return; /* Not yet. */
- if (link_blocked(l_ptr)) {
- if (event == TIMEOUT_EVT) {
+ /* Check whether changeover is going on */
+ if (l_ptr->exp_msg_count) {
+ if (event == TIMEOUT_EVT)
link_set_timer(l_ptr, cont_intv);
- }
- return; /* Changeover going on */
+ return;
}
- dbg_link("STATE_EV: <%s> ", l_ptr->name);
switch (l_ptr->state) {
case WORKING_WORKING:
- dbg_link("WW/");
switch (event) {
case TRAFFIC_MSG_EVT:
- dbg_link("TRF-");
- /* fall through */
case ACTIVATE_MSG:
- dbg_link("ACT\n");
break;
case TIMEOUT_EVT:
- dbg_link("TIM ");
if (l_ptr->next_in_no != l_ptr->checkpoint) {
l_ptr->checkpoint = l_ptr->next_in_no;
if (tipc_bclink_acks_missing(l_ptr->owner)) {
- tipc_link_send_proto_msg(l_ptr, STATE_MSG,
- 0, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, STATE_MSG,
+ 0, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
} else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
- tipc_link_send_proto_msg(l_ptr, STATE_MSG,
- 1, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, STATE_MSG,
+ 1, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
}
link_set_timer(l_ptr, cont_intv);
break;
}
- dbg_link(" -> WU\n");
l_ptr->state = WORKING_UNKNOWN;
l_ptr->fsm_msg_cnt = 0;
- tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv / 4);
break;
case RESET_MSG:
- dbg_link("RES -> RR\n");
- info("Resetting link <%s>, requested by peer\n",
- l_ptr->name);
+ pr_info("%s<%s>, requested by peer\n", link_rst_msg,
+ l_ptr->name);
tipc_link_reset(l_ptr);
l_ptr->state = RESET_RESET;
l_ptr->fsm_msg_cnt = 0;
- tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
+ 0, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv);
break;
default:
- err("Unknown link event %u in WW state\n", event);
+ pr_err("%s%u in WW state\n", link_unk_evt, event);
}
break;
case WORKING_UNKNOWN:
- dbg_link("WU/");
switch (event) {
case TRAFFIC_MSG_EVT:
- dbg_link("TRF-");
case ACTIVATE_MSG:
- dbg_link("ACT -> WW\n");
l_ptr->state = WORKING_WORKING;
l_ptr->fsm_msg_cnt = 0;
link_set_timer(l_ptr, cont_intv);
break;
case RESET_MSG:
- dbg_link("RES -> RR\n");
- info("Resetting link <%s>, requested by peer "
- "while probing\n", l_ptr->name);
+ pr_info("%s<%s>, requested by peer while probing\n",
+ link_rst_msg, l_ptr->name);
tipc_link_reset(l_ptr);
l_ptr->state = RESET_RESET;
l_ptr->fsm_msg_cnt = 0;
- tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
+ 0, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv);
break;
case TIMEOUT_EVT:
- dbg_link("TIM ");
if (l_ptr->next_in_no != l_ptr->checkpoint) {
- dbg_link("-> WW\n");
l_ptr->state = WORKING_WORKING;
l_ptr->fsm_msg_cnt = 0;
l_ptr->checkpoint = l_ptr->next_in_no;
if (tipc_bclink_acks_missing(l_ptr->owner)) {
- tipc_link_send_proto_msg(l_ptr, STATE_MSG,
- 0, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, STATE_MSG,
+ 0, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
}
link_set_timer(l_ptr, cont_intv);
} else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
- dbg_link("Probing %u/%u,timer = %u ms)\n",
- l_ptr->fsm_msg_cnt, l_ptr->abort_limit,
- cont_intv / 4);
- tipc_link_send_proto_msg(l_ptr, STATE_MSG,
- 1, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, STATE_MSG,
+ 1, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv / 4);
} else { /* Link has failed */
- dbg_link("-> RU (%u probes unanswered)\n",
- l_ptr->fsm_msg_cnt);
- warn("Resetting link <%s>, peer not responding\n",
- l_ptr->name);
+ pr_warn("%s<%s>, peer not responding\n",
+ link_rst_msg, l_ptr->name);
tipc_link_reset(l_ptr);
l_ptr->state = RESET_UNKNOWN;
l_ptr->fsm_msg_cnt = 0;
- tipc_link_send_proto_msg(l_ptr, RESET_MSG,
- 0, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, RESET_MSG,
+ 0, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv);
}
break;
default:
- err("Unknown link event %u in WU state\n", event);
+ pr_err("%s%u in WU state\n", link_unk_evt, event);
}
break;
case RESET_UNKNOWN:
- dbg_link("RU/");
switch (event) {
case TRAFFIC_MSG_EVT:
- dbg_link("TRF-\n");
break;
case ACTIVATE_MSG:
other = l_ptr->owner->active_links[0];
- if (other && link_working_unknown(other)) {
- dbg_link("ACT\n");
+ if (other && link_working_unknown(other))
break;
- }
- dbg_link("ACT -> WW\n");
l_ptr->state = WORKING_WORKING;
l_ptr->fsm_msg_cnt = 0;
link_activate(l_ptr);
- tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
+ if (l_ptr->owner->working_links == 1)
+ tipc_link_sync_xmit(l_ptr);
link_set_timer(l_ptr, cont_intv);
break;
case RESET_MSG:
- dbg_link("RES\n");
- dbg_link(" -> RR\n");
l_ptr->state = RESET_RESET;
l_ptr->fsm_msg_cnt = 0;
- tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
+ 1, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv);
break;
case STARTING_EVT:
- dbg_link("START-");
- l_ptr->started = 1;
+ l_ptr->flags |= LINK_STARTED;
/* fall through */
case TIMEOUT_EVT:
- dbg_link("TIM\n");
- tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv);
break;
default:
- err("Unknown link event %u in RU state\n", event);
+ pr_err("%s%u in RU state\n", link_unk_evt, event);
}
break;
case RESET_RESET:
- dbg_link("RR/ ");
switch (event) {
case TRAFFIC_MSG_EVT:
- dbg_link("TRF-");
- /* fall through */
case ACTIVATE_MSG:
other = l_ptr->owner->active_links[0];
- if (other && link_working_unknown(other)) {
- dbg_link("ACT\n");
+ if (other && link_working_unknown(other))
break;
- }
- dbg_link("ACT -> WW\n");
l_ptr->state = WORKING_WORKING;
l_ptr->fsm_msg_cnt = 0;
link_activate(l_ptr);
- tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
+ if (l_ptr->owner->working_links == 1)
+ tipc_link_sync_xmit(l_ptr);
link_set_timer(l_ptr, cont_intv);
break;
case RESET_MSG:
- dbg_link("RES\n");
break;
case TIMEOUT_EVT:
- dbg_link("TIM\n");
- tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
+ 0, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv);
- dbg_link("fsm_msg_cnt %u\n", l_ptr->fsm_msg_cnt);
break;
default:
- err("Unknown link event %u in RR state\n", event);
+ pr_err("%s%u in RR state\n", link_unk_evt, event);
}
break;
default:
- err("Unknown link state %u/%u\n", l_ptr->state, event);
+ pr_err("Unknown link state %u/%u\n", l_ptr->state, event);
}
}
@@ -901,9 +680,7 @@ static void link_state_event(struct link *l_ptr, unsigned event)
* link_bundle_buf(): Append contents of a buffer to
* the tail of an existing one.
*/
-
-static int link_bundle_buf(struct link *l_ptr,
- struct sk_buff *bundler,
+static int link_bundle_buf(struct tipc_link *l_ptr, struct sk_buff *bundler,
struct sk_buff *buf)
{
struct tipc_msg *bundler_msg = buf_msg(bundler);
@@ -926,15 +703,12 @@ static int link_bundle_buf(struct link *l_ptr,
skb_copy_to_linear_data_offset(bundler, to_pos, buf->data, size);
msg_set_size(bundler_msg, to_pos + size);
msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1);
- dbg("Packed msg # %u(%u octets) into pos %u in buf(#%u)\n",
- msg_msgcnt(bundler_msg), size, to_pos, msg_seqno(bundler_msg));
- msg_dbg(msg, "PACKD:");
- buf_discard(buf);
+ kfree_skb(buf);
l_ptr->stats.sent_bundled++;
return 1;
}
-static void link_add_to_outqueue(struct link *l_ptr,
+static void link_add_to_outqueue(struct tipc_link *l_ptr,
struct sk_buff *buf,
struct tipc_msg *msg)
{
@@ -949,16 +723,37 @@ static void link_add_to_outqueue(struct link *l_ptr,
l_ptr->last_out = buf;
} else
l_ptr->first_out = l_ptr->last_out = buf;
+
l_ptr->out_queue_size++;
+ if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz)
+ l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
+}
+
+static void link_add_chain_to_outqueue(struct tipc_link *l_ptr,
+ struct sk_buff *buf_chain,
+ u32 long_msgno)
+{
+ struct sk_buff *buf;
+ struct tipc_msg *msg;
+
+ if (!l_ptr->next_out)
+ l_ptr->next_out = buf_chain;
+ while (buf_chain) {
+ buf = buf_chain;
+ buf_chain = buf_chain->next;
+
+ msg = buf_msg(buf);
+ msg_set_long_msgno(msg, long_msgno);
+ link_add_to_outqueue(l_ptr, buf, msg);
+ }
}
/*
- * tipc_link_send_buf() is the 'full path' for messages, called from
- * inside TIPC when the 'fast path' in tipc_send_buf
+ * tipc_link_xmit() is the 'full path' for messages, called from
+ * inside TIPC when the 'fast path' in tipc_send_xmit
* has failed, and from link_send()
*/
-
-int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
+int __tipc_link_xmit(struct tipc_link *l_ptr, struct sk_buff *buf)
{
struct tipc_msg *msg = buf_msg(buf);
u32 size = msg_size(msg);
@@ -968,62 +763,44 @@ int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
u32 queue_limit = l_ptr->queue_limit[imp];
u32 max_packet = l_ptr->max_pkt;
- msg_set_prevnode(msg, tipc_own_addr); /* If routed message */
-
/* Match msg importance against queue limits: */
-
if (unlikely(queue_size >= queue_limit)) {
if (imp <= TIPC_CRITICAL_IMPORTANCE) {
- return link_schedule_port(l_ptr, msg_origport(msg),
- size);
+ link_schedule_port(l_ptr, msg_origport(msg), size);
+ kfree_skb(buf);
+ return -ELINKCONG;
}
- msg_dbg(msg, "TIPC: Congestion, throwing away\n");
- buf_discard(buf);
+ kfree_skb(buf);
if (imp > CONN_MANAGER) {
- warn("Resetting link <%s>, send queue full", l_ptr->name);
+ pr_warn("%s<%s>, send queue full", link_rst_msg,
+ l_ptr->name);
tipc_link_reset(l_ptr);
}
return dsz;
}
/* Fragmentation needed ? */
-
if (size > max_packet)
- return link_send_long_buf(l_ptr, buf);
-
- /* Packet can be queued or sent: */
+ return tipc_link_frag_xmit(l_ptr, buf);
- if (queue_size > l_ptr->stats.max_queue_sz)
- l_ptr->stats.max_queue_sz = queue_size;
-
- if (likely(!tipc_bearer_congested(l_ptr->b_ptr, l_ptr) &&
- !link_congested(l_ptr))) {
+ /* Packet can be queued or sent. */
+ if (likely(!link_congested(l_ptr))) {
link_add_to_outqueue(l_ptr, buf, msg);
- if (likely(tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr))) {
- l_ptr->unacked_window = 0;
- } else {
- tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
- l_ptr->stats.bearer_congs++;
- l_ptr->next_out = buf;
- }
+ tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
+ l_ptr->unacked_window = 0;
return dsz;
}
- /* Congestion: can message be bundled ?: */
-
+ /* Congestion: can message be bundled ? */
if ((msg_user(msg) != CHANGEOVER_PROTOCOL) &&
(msg_user(msg) != MSG_FRAGMENTER)) {
/* Try adding message to an existing bundle */
-
if (l_ptr->next_out &&
- link_bundle_buf(l_ptr, l_ptr->last_out, buf)) {
- tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
+ link_bundle_buf(l_ptr, l_ptr->last_out, buf))
return dsz;
- }
/* Try creating a new bundle */
-
if (size <= max_packet * 2 / 3) {
struct sk_buff *bundler = tipc_buf_acquire(max_packet);
struct tipc_msg bundler_hdr;
@@ -1044,133 +821,155 @@ int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
if (!l_ptr->next_out)
l_ptr->next_out = buf;
link_add_to_outqueue(l_ptr, buf, msg);
- tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
return dsz;
}
/*
- * tipc_link_send(): same as tipc_link_send_buf(), but the link to use has
- * not been selected yet, and the the owner node is not locked
+ * tipc_link_xmit(): same as __tipc_link_xmit(), but the link to use
+ * has not been selected yet, and the the owner node is not locked
* Called by TIPC internal users, e.g. the name distributor
*/
-
-int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
+int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector)
{
- struct link *l_ptr;
+ struct tipc_link *l_ptr;
struct tipc_node *n_ptr;
int res = -ELINKCONG;
- read_lock_bh(&tipc_net_lock);
- n_ptr = tipc_node_select(dest, selector);
+ n_ptr = tipc_node_find(dest);
if (n_ptr) {
tipc_node_lock(n_ptr);
l_ptr = n_ptr->active_links[selector & 1];
- if (l_ptr) {
- dbg("tipc_link_send: found link %x for dest %x\n", l_ptr, dest);
- res = tipc_link_send_buf(l_ptr, buf);
- } else {
- dbg("Attempt to send msg to unreachable node:\n");
- msg_dbg(buf_msg(buf),">>>");
- buf_discard(buf);
- }
+ if (l_ptr)
+ res = __tipc_link_xmit(l_ptr, buf);
+ else
+ kfree_skb(buf);
tipc_node_unlock(n_ptr);
} else {
- dbg("Attempt to send msg to unknown node:\n");
- msg_dbg(buf_msg(buf),">>>");
- buf_discard(buf);
+ kfree_skb(buf);
}
- read_unlock_bh(&tipc_net_lock);
return res;
}
/*
- * link_send_buf_fast: Entry for data messages where the
- * destination link is known and the header is complete,
- * inclusive total message length. Very time critical.
- * Link is locked. Returns user data length.
+ * tipc_link_sync_xmit - synchronize broadcast link endpoints.
+ *
+ * Give a newly added peer node the sequence number where it should
+ * start receiving and acking broadcast packets.
+ *
+ * Called with node locked
*/
+static void tipc_link_sync_xmit(struct tipc_link *l)
+{
+ struct sk_buff *buf;
+ struct tipc_msg *msg;
+
+ buf = tipc_buf_acquire(INT_H_SIZE);
+ if (!buf)
+ return;
+
+ msg = buf_msg(buf);
+ tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, l->addr);
+ msg_set_last_bcast(msg, l->owner->bclink.acked);
+ link_add_chain_to_outqueue(l, buf, 0);
+ tipc_link_push_queue(l);
+}
-static int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf,
- u32 *used_max_pkt)
+/*
+ * tipc_link_sync_rcv - synchronize broadcast link endpoints.
+ * Receive the sequence number where we should start receiving and
+ * acking broadcast packets from a newly added peer node, and open
+ * up for reception of such packets.
+ *
+ * Called with node locked
+ */
+static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf)
{
struct tipc_msg *msg = buf_msg(buf);
- int res = msg_data_sz(msg);
- if (likely(!link_congested(l_ptr))) {
- if (likely(msg_size(msg) <= l_ptr->max_pkt)) {
- if (likely(list_empty(&l_ptr->b_ptr->cong_links))) {
- link_add_to_outqueue(l_ptr, buf, msg);
- if (likely(tipc_bearer_send(l_ptr->b_ptr, buf,
- &l_ptr->media_addr))) {
- l_ptr->unacked_window = 0;
- msg_dbg(msg,"SENT_FAST:");
- return res;
- }
- dbg("failed sent fast...\n");
- tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
- l_ptr->stats.bearer_congs++;
- l_ptr->next_out = buf;
- return res;
- }
- }
- else
- *used_max_pkt = l_ptr->max_pkt;
- }
- return tipc_link_send_buf(l_ptr, buf); /* All other cases */
+ n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg);
+ n->bclink.recv_permitted = true;
+ kfree_skb(buf);
}
/*
- * tipc_send_buf_fast: Entry for data messages where the
- * destination node is known and the header is complete,
- * inclusive total message length.
- * Returns user data length.
+ * tipc_link_names_xmit - send name table entries to new neighbor
+ *
+ * Send routine for bulk delivery of name table messages when contact
+ * with a new neighbor occurs. No link congestion checking is performed
+ * because name table messages *must* be delivered. The messages must be
+ * small enough not to require fragmentation.
+ * Called without any locks held.
*/
-int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
+void tipc_link_names_xmit(struct list_head *message_list, u32 dest)
{
- struct link *l_ptr;
struct tipc_node *n_ptr;
- int res;
- u32 selector = msg_origport(buf_msg(buf)) & 1;
- u32 dummy;
+ struct tipc_link *l_ptr;
+ struct sk_buff *buf;
+ struct sk_buff *temp_buf;
- if (destnode == tipc_own_addr)
- return tipc_port_recv_msg(buf);
+ if (list_empty(message_list))
+ return;
- read_lock_bh(&tipc_net_lock);
- n_ptr = tipc_node_select(destnode, selector);
- if (likely(n_ptr)) {
+ n_ptr = tipc_node_find(dest);
+ if (n_ptr) {
tipc_node_lock(n_ptr);
- l_ptr = n_ptr->active_links[selector];
- dbg("send_fast: buf %x selected %x, destnode = %x\n",
- buf, l_ptr, destnode);
- if (likely(l_ptr)) {
- res = link_send_buf_fast(l_ptr, buf, &dummy);
- tipc_node_unlock(n_ptr);
- read_unlock_bh(&tipc_net_lock);
- return res;
+ l_ptr = n_ptr->active_links[0];
+ if (l_ptr) {
+ /* convert circular list to linear list */
+ ((struct sk_buff *)message_list->prev)->next = NULL;
+ link_add_chain_to_outqueue(l_ptr,
+ (struct sk_buff *)message_list->next, 0);
+ tipc_link_push_queue(l_ptr);
+ INIT_LIST_HEAD(message_list);
}
tipc_node_unlock(n_ptr);
}
- read_unlock_bh(&tipc_net_lock);
- res = msg_data_sz(buf_msg(buf));
- tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
- return res;
+
+ /* discard the messages if they couldn't be sent */
+ list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) {
+ list_del((struct list_head *)buf);
+ kfree_skb(buf);
+ }
}
+/*
+ * tipc_link_xmit_fast: Entry for data messages where the
+ * destination link is known and the header is complete,
+ * inclusive total message length. Very time critical.
+ * Link is locked. Returns user data length.
+ */
+static int tipc_link_xmit_fast(struct tipc_link *l_ptr, struct sk_buff *buf,
+ u32 *used_max_pkt)
+{
+ struct tipc_msg *msg = buf_msg(buf);
+ int res = msg_data_sz(msg);
+
+ if (likely(!link_congested(l_ptr))) {
+ if (likely(msg_size(msg) <= l_ptr->max_pkt)) {
+ link_add_to_outqueue(l_ptr, buf, msg);
+ tipc_bearer_send(l_ptr->bearer_id, buf,
+ &l_ptr->media_addr);
+ l_ptr->unacked_window = 0;
+ return res;
+ }
+ else
+ *used_max_pkt = l_ptr->max_pkt;
+ }
+ return __tipc_link_xmit(l_ptr, buf); /* All other cases */
+}
/*
- * tipc_link_send_sections_fast: Entry for messages where the
+ * tipc_link_iovec_xmit_fast: Entry for messages where the
* destination processor is known and the header is complete,
* except for total message length.
* Returns user data length or errno.
*/
-int tipc_link_send_sections_fast(struct port *sender,
- struct iovec const *msg_sect,
- const u32 num_sect,
- u32 destaddr)
+int tipc_link_iovec_xmit_fast(struct tipc_port *sender,
+ struct iovec const *msg_sect,
+ unsigned int len, u32 destaddr)
{
- struct tipc_msg *hdr = &sender->publ.phdr;
- struct link *l_ptr;
+ struct tipc_msg *hdr = &sender->phdr;
+ struct tipc_link *l_ptr;
struct sk_buff *buf;
struct tipc_node *node;
int res;
@@ -1181,38 +980,28 @@ again:
* Try building message using port's max_pkt hint.
* (Must not hold any locks while building message.)
*/
+ res = tipc_msg_build(hdr, msg_sect, len, sender->max_pkt, &buf);
+ /* Exit if build request was invalid */
+ if (unlikely(res < 0))
+ return res;
- res = tipc_msg_build(hdr, msg_sect, num_sect, sender->publ.max_pkt,
- !sender->user_port, &buf);
-
- read_lock_bh(&tipc_net_lock);
- node = tipc_node_select(destaddr, selector);
+ node = tipc_node_find(destaddr);
if (likely(node)) {
tipc_node_lock(node);
l_ptr = node->active_links[selector];
if (likely(l_ptr)) {
if (likely(buf)) {
- res = link_send_buf_fast(l_ptr, buf,
- &sender->publ.max_pkt);
- if (unlikely(res < 0))
- buf_discard(buf);
+ res = tipc_link_xmit_fast(l_ptr, buf,
+ &sender->max_pkt);
exit:
tipc_node_unlock(node);
- read_unlock_bh(&tipc_net_lock);
return res;
}
- /* Exit if build request was invalid */
-
- if (unlikely(res < 0))
- goto exit;
-
/* Exit if link (or bearer) is congested */
-
- if (link_congested(l_ptr) ||
- !list_empty(&l_ptr->b_ptr->cong_links)) {
+ if (link_congested(l_ptr)) {
res = link_schedule_port(l_ptr,
- sender->publ.ref, res);
+ sender->ref, res);
goto exit;
}
@@ -1220,34 +1009,27 @@ exit:
* Message size exceeds max_pkt hint; update hint,
* then re-try fast path or fragment the message
*/
-
- sender->publ.max_pkt = l_ptr->max_pkt;
+ sender->max_pkt = l_ptr->max_pkt;
tipc_node_unlock(node);
- read_unlock_bh(&tipc_net_lock);
- if ((msg_hdr_sz(hdr) + res) <= sender->publ.max_pkt)
+ if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
goto again;
- return link_send_sections_long(sender, msg_sect,
- num_sect, destaddr);
+ return tipc_link_iovec_long_xmit(sender, msg_sect,
+ len, destaddr);
}
tipc_node_unlock(node);
}
- read_unlock_bh(&tipc_net_lock);
/* Couldn't find a link to the destination node */
-
- if (buf)
- return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
- if (res >= 0)
- return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
- TIPC_ERR_NO_NODE);
- return res;
+ kfree_skb(buf);
+ tipc_port_iovec_reject(sender, hdr, msg_sect, len, TIPC_ERR_NO_NODE);
+ return -ENETUNREACH;
}
/*
- * link_send_sections_long(): Entry for long messages where the
+ * tipc_link_iovec_long_xmit(): Entry for long messages where the
* destination node is known and the header is complete,
* inclusive total message length.
* Link and bearer congestion status have been checked to be ok,
@@ -1260,26 +1042,26 @@ exit:
*
* Returns user data length or errno.
*/
-static int link_send_sections_long(struct port *sender,
- struct iovec const *msg_sect,
- u32 num_sect,
- u32 destaddr)
+static int tipc_link_iovec_long_xmit(struct tipc_port *sender,
+ struct iovec const *msg_sect,
+ unsigned int len, u32 destaddr)
{
- struct link *l_ptr;
+ struct tipc_link *l_ptr;
struct tipc_node *node;
- struct tipc_msg *hdr = &sender->publ.phdr;
- u32 dsz = msg_data_sz(hdr);
- u32 max_pkt,fragm_sz,rest;
+ struct tipc_msg *hdr = &sender->phdr;
+ u32 dsz = len;
+ u32 max_pkt, fragm_sz, rest;
struct tipc_msg fragm_hdr;
- struct sk_buff *buf,*buf_chain,*prev;
- u32 fragm_crs,fragm_rest,hsz,sect_rest;
- const unchar *sect_crs;
+ struct sk_buff *buf, *buf_chain, *prev;
+ u32 fragm_crs, fragm_rest, hsz, sect_rest;
+ const unchar __user *sect_crs;
int curr_sect;
u32 fragm_no;
+ int res = 0;
again:
fragm_no = 1;
- max_pkt = sender->publ.max_pkt - INT_H_SIZE;
+ max_pkt = sender->max_pkt - INT_H_SIZE;
/* leave room for tunnel header in case of link changeover */
fragm_sz = max_pkt - INT_H_SIZE;
/* leave room for fragmentation header in each fragment */
@@ -1290,17 +1072,13 @@ again:
sect_crs = NULL;
curr_sect = -1;
- /* Prepare reusable fragment header: */
-
- msg_dbg(hdr, ">FRAGMENTING>");
+ /* Prepare reusable fragment header */
tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
INT_H_SIZE, msg_destnode(hdr));
- msg_set_link_selector(&fragm_hdr, sender->publ.ref);
msg_set_size(&fragm_hdr, max_pkt);
msg_set_fragm_no(&fragm_hdr, 1);
- /* Prepare header of first fragment: */
-
+ /* Prepare header of first fragment */
buf_chain = buf = tipc_buf_acquire(max_pkt);
if (!buf)
return -ENOMEM;
@@ -1308,10 +1086,8 @@ again:
skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
hsz = msg_hdr_sz(hdr);
skb_copy_to_linear_data_offset(buf, INT_H_SIZE, hdr, hsz);
- msg_dbg(buf_msg(buf), ">BUILD>");
-
- /* Chop up message: */
+ /* Chop up message */
fragm_crs = INT_H_SIZE + hsz;
fragm_rest = fragm_sz - hsz;
@@ -1320,7 +1096,7 @@ again:
if (!sect_rest) {
sect_rest = msg_sect[++curr_sect].iov_len;
- sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
+ sect_crs = msg_sect[curr_sect].iov_base;
}
if (sect_rest < fragm_rest)
@@ -1328,18 +1104,12 @@ again:
else
sz = fragm_rest;
- if (likely(!sender->user_port)) {
- if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) {
+ if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) {
+ res = -EFAULT;
error:
- for (; buf_chain; buf_chain = buf) {
- buf = buf_chain->next;
- buf_discard(buf_chain);
- }
- return -EFAULT;
- }
- } else
- skb_copy_to_linear_data_offset(buf, fragm_crs,
- sect_crs, sz);
+ kfree_skb_list(buf_chain);
+ return res;
+ }
sect_crs += sz;
sect_rest -= sz;
fragm_crs += sz;
@@ -1351,7 +1121,7 @@ error:
/* Initiate new fragment: */
if (rest <= fragm_sz) {
fragm_sz = rest;
- msg_set_type(&fragm_hdr,LAST_FRAGMENT);
+ msg_set_type(&fragm_hdr, LAST_FRAGMENT);
} else {
msg_set_type(&fragm_hdr, FRAGMENT);
}
@@ -1359,70 +1129,50 @@ error:
msg_set_fragm_no(&fragm_hdr, ++fragm_no);
prev = buf;
buf = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
- if (!buf)
+ if (!buf) {
+ res = -ENOMEM;
goto error;
+ }
buf->next = NULL;
prev->next = buf;
skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
fragm_crs = INT_H_SIZE;
fragm_rest = fragm_sz;
- msg_dbg(buf_msg(buf)," >BUILD>");
}
- }
- while (rest > 0);
+ } while (rest > 0);
/*
* Now we have a buffer chain. Select a link and check
* that packet size is still OK
*/
- node = tipc_node_select(destaddr, sender->publ.ref & 1);
+ node = tipc_node_find(destaddr);
if (likely(node)) {
tipc_node_lock(node);
- l_ptr = node->active_links[sender->publ.ref & 1];
+ l_ptr = node->active_links[sender->ref & 1];
if (!l_ptr) {
tipc_node_unlock(node);
goto reject;
}
if (l_ptr->max_pkt < max_pkt) {
- sender->publ.max_pkt = l_ptr->max_pkt;
+ sender->max_pkt = l_ptr->max_pkt;
tipc_node_unlock(node);
- for (; buf_chain; buf_chain = buf) {
- buf = buf_chain->next;
- buf_discard(buf_chain);
- }
+ kfree_skb_list(buf_chain);
goto again;
}
} else {
reject:
- for (; buf_chain; buf_chain = buf) {
- buf = buf_chain->next;
- buf_discard(buf_chain);
- }
- return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
- TIPC_ERR_NO_NODE);
+ kfree_skb_list(buf_chain);
+ tipc_port_iovec_reject(sender, hdr, msg_sect, len,
+ TIPC_ERR_NO_NODE);
+ return -ENETUNREACH;
}
- /* Append whole chain to send queue: */
-
- buf = buf_chain;
- l_ptr->long_msg_seq_no = mod(l_ptr->long_msg_seq_no + 1);
- if (!l_ptr->next_out)
- l_ptr->next_out = buf_chain;
+ /* Append chain of fragments to send queue & send them */
+ l_ptr->long_msg_seq_no++;
+ link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
+ l_ptr->stats.sent_fragments += fragm_no;
l_ptr->stats.sent_fragmented++;
- while (buf) {
- struct sk_buff *next = buf->next;
- struct tipc_msg *msg = buf_msg(buf);
-
- l_ptr->stats.sent_fragments++;
- msg_set_long_msgno(msg, l_ptr->long_msg_seq_no);
- link_add_to_outqueue(l_ptr, buf, msg);
- msg_dbg(msg, ">ADD>");
- buf = next;
- }
-
- /* Send it, if possible: */
-
tipc_link_push_queue(l_ptr);
tipc_node_unlock(node);
return dsz;
@@ -1431,7 +1181,7 @@ reject:
/*
* tipc_link_push_packet: Push one unsent packet to the media
*/
-u32 tipc_link_push_packet(struct link *l_ptr)
+static u32 tipc_link_push_packet(struct tipc_link *l_ptr)
{
struct sk_buff *buf = l_ptr->first_out;
u32 r_q_size = l_ptr->retransm_queue_size;
@@ -1439,11 +1189,10 @@ u32 tipc_link_push_packet(struct link *l_ptr)
/* Step to position where retransmission failed, if any, */
/* consider that buffers may have been released in meantime */
-
if (r_q_size && buf) {
u32 last = lesser(mod(r_q_head + r_q_size),
link_last_sent(l_ptr));
- u32 first = msg_seqno(buf_msg(buf));
+ u32 first = buf_seqno(buf);
while (buf && less(first, r_q_head)) {
first = mod(first + 1);
@@ -1454,164 +1203,124 @@ u32 tipc_link_push_packet(struct link *l_ptr)
}
/* Continue retransmission now, if there is anything: */
-
if (r_q_size && buf) {
msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
- if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
- msg_dbg(buf_msg(buf), ">DEF-RETR>");
- l_ptr->retransm_queue_head = mod(++r_q_head);
- l_ptr->retransm_queue_size = --r_q_size;
- l_ptr->stats.retransmitted++;
- return 0;
- } else {
- l_ptr->stats.bearer_congs++;
- msg_dbg(buf_msg(buf), "|>DEF-RETR>");
- return PUSH_FAILED;
- }
+ tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
+ l_ptr->retransm_queue_head = mod(++r_q_head);
+ l_ptr->retransm_queue_size = --r_q_size;
+ l_ptr->stats.retransmitted++;
+ return 0;
}
/* Send deferred protocol message, if any: */
-
buf = l_ptr->proto_msg_queue;
if (buf) {
msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
- msg_set_bcast_ack(buf_msg(buf),l_ptr->owner->bclink.last_in);
- if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
- msg_dbg(buf_msg(buf), ">DEF-PROT>");
- l_ptr->unacked_window = 0;
- buf_discard(buf);
- l_ptr->proto_msg_queue = NULL;
- return 0;
- } else {
- msg_dbg(buf_msg(buf), "|>DEF-PROT>");
- l_ptr->stats.bearer_congs++;
- return PUSH_FAILED;
- }
+ msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
+ tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
+ l_ptr->unacked_window = 0;
+ kfree_skb(buf);
+ l_ptr->proto_msg_queue = NULL;
+ return 0;
}
/* Send one deferred data message, if send window not full: */
-
buf = l_ptr->next_out;
if (buf) {
struct tipc_msg *msg = buf_msg(buf);
u32 next = msg_seqno(msg);
- u32 first = msg_seqno(buf_msg(l_ptr->first_out));
+ u32 first = buf_seqno(l_ptr->first_out);
if (mod(next - first) < l_ptr->queue_limit[0]) {
msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
- if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
- if (msg_user(msg) == MSG_BUNDLER)
- msg_set_type(msg, CLOSED_MSG);
- msg_dbg(msg, ">PUSH-DATA>");
- l_ptr->next_out = buf->next;
- return 0;
- } else {
- msg_dbg(msg, "|PUSH-DATA|");
- l_ptr->stats.bearer_congs++;
- return PUSH_FAILED;
- }
+ tipc_bearer_send(l_ptr->bearer_id, buf,
+ &l_ptr->media_addr);
+ if (msg_user(msg) == MSG_BUNDLER)
+ msg_set_type(msg, CLOSED_MSG);
+ l_ptr->next_out = buf->next;
+ return 0;
}
}
- return PUSH_FINISHED;
+ return 1;
}
/*
* push_queue(): push out the unsent messages of a link where
* congestion has abated. Node is locked
*/
-void tipc_link_push_queue(struct link *l_ptr)
+void tipc_link_push_queue(struct tipc_link *l_ptr)
{
u32 res;
- if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr))
- return;
-
do {
res = tipc_link_push_packet(l_ptr);
} while (!res);
-
- if (res == PUSH_FAILED)
- tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
}
-static void link_reset_all(unsigned long addr)
+void tipc_link_reset_all(struct tipc_node *node)
{
- struct tipc_node *n_ptr;
char addr_string[16];
u32 i;
- read_lock_bh(&tipc_net_lock);
- n_ptr = tipc_node_find((u32)addr);
- if (!n_ptr) {
- read_unlock_bh(&tipc_net_lock);
- return; /* node no longer exists */
- }
-
- tipc_node_lock(n_ptr);
+ tipc_node_lock(node);
- warn("Resetting all links to %s\n",
- tipc_addr_string_fill(addr_string, n_ptr->addr));
+ pr_warn("Resetting all links to %s\n",
+ tipc_addr_string_fill(addr_string, node->addr));
for (i = 0; i < MAX_BEARERS; i++) {
- if (n_ptr->links[i]) {
- link_print(n_ptr->links[i], TIPC_OUTPUT,
- "Resetting link\n");
- tipc_link_reset(n_ptr->links[i]);
+ if (node->links[i]) {
+ link_print(node->links[i], "Resetting link\n");
+ tipc_link_reset(node->links[i]);
}
}
- tipc_node_unlock(n_ptr);
- read_unlock_bh(&tipc_net_lock);
+ tipc_node_unlock(node);
}
-static void link_retransmit_failure(struct link *l_ptr, struct sk_buff *buf)
+static void link_retransmit_failure(struct tipc_link *l_ptr,
+ struct sk_buff *buf)
{
struct tipc_msg *msg = buf_msg(buf);
- warn("Retransmission failure on link <%s>\n", l_ptr->name);
- tipc_msg_dbg(TIPC_OUTPUT, msg, ">RETR-FAIL>");
+ pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
if (l_ptr->addr) {
-
/* Handle failure on standard link */
-
- link_print(l_ptr, TIPC_OUTPUT, "Resetting link\n");
+ link_print(l_ptr, "Resetting link\n");
tipc_link_reset(l_ptr);
} else {
-
/* Handle failure on broadcast link */
-
struct tipc_node *n_ptr;
char addr_string[16];
- tipc_printf(TIPC_OUTPUT, "Msg seq number: %u, ", msg_seqno(msg));
- tipc_printf(TIPC_OUTPUT, "Outstanding acks: %lu\n",
- (unsigned long) TIPC_SKB_CB(buf)->handle);
+ pr_info("Msg seq number: %u, ", msg_seqno(msg));
+ pr_cont("Outstanding acks: %lu\n",
+ (unsigned long) TIPC_SKB_CB(buf)->handle);
- n_ptr = l_ptr->owner->next;
+ n_ptr = tipc_bclink_retransmit_to();
tipc_node_lock(n_ptr);
tipc_addr_string_fill(addr_string, n_ptr->addr);
- tipc_printf(TIPC_OUTPUT, "Multicast link info for %s\n", addr_string);
- tipc_printf(TIPC_OUTPUT, "Supported: %d, ", n_ptr->bclink.supported);
- tipc_printf(TIPC_OUTPUT, "Acked: %u\n", n_ptr->bclink.acked);
- tipc_printf(TIPC_OUTPUT, "Last in: %u, ", n_ptr->bclink.last_in);
- tipc_printf(TIPC_OUTPUT, "Gap after: %u, ", n_ptr->bclink.gap_after);
- tipc_printf(TIPC_OUTPUT, "Gap to: %u\n", n_ptr->bclink.gap_to);
- tipc_printf(TIPC_OUTPUT, "Nack sync: %u\n\n", n_ptr->bclink.nack_sync);
-
- tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr);
+ pr_info("Broadcast link info for %s\n", addr_string);
+ pr_info("Reception permitted: %d, Acked: %u\n",
+ n_ptr->bclink.recv_permitted,
+ n_ptr->bclink.acked);
+ pr_info("Last in: %u, Oos state: %u, Last sent: %u\n",
+ n_ptr->bclink.last_in,
+ n_ptr->bclink.oos_state,
+ n_ptr->bclink.last_sent);
tipc_node_unlock(n_ptr);
+ tipc_bclink_set_flags(TIPC_BCLINK_RESET);
l_ptr->stale_count = 0;
}
}
-void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
+void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
u32 retransmits)
{
struct tipc_msg *msg;
@@ -1621,49 +1330,25 @@ void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
msg = buf_msg(buf);
- dbg("Retransmitting %u in link %x\n", retransmits, l_ptr);
-
- if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
- if (l_ptr->retransm_queue_size == 0) {
- msg_dbg(msg, ">NO_RETR->BCONG>");
- dbg_print_link(l_ptr, " ");
- l_ptr->retransm_queue_head = msg_seqno(msg);
- l_ptr->retransm_queue_size = retransmits;
- } else {
- err("Unexpected retransmit on link %s (qsize=%d)\n",
- l_ptr->name, l_ptr->retransm_queue_size);
+ /* Detect repeated retransmit failures */
+ if (l_ptr->last_retransmitted == msg_seqno(msg)) {
+ if (++l_ptr->stale_count > 100) {
+ link_retransmit_failure(l_ptr, buf);
+ return;
}
- return;
} else {
- /* Detect repeated retransmit failures on uncongested bearer */
-
- if (l_ptr->last_retransmitted == msg_seqno(msg)) {
- if (++l_ptr->stale_count > 100) {
- link_retransmit_failure(l_ptr, buf);
- return;
- }
- } else {
- l_ptr->last_retransmitted = msg_seqno(msg);
- l_ptr->stale_count = 1;
- }
+ l_ptr->last_retransmitted = msg_seqno(msg);
+ l_ptr->stale_count = 1;
}
while (retransmits && (buf != l_ptr->next_out) && buf) {
msg = buf_msg(buf);
msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
- if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
- msg_dbg(buf_msg(buf), ">RETR>");
- buf = buf->next;
- retransmits--;
- l_ptr->stats.retransmitted++;
- } else {
- tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
- l_ptr->stats.bearer_congs++;
- l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf));
- l_ptr->retransm_queue_size = retransmits;
- return;
- }
+ tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
+ buf = buf->next;
+ retransmits--;
+ l_ptr->stats.retransmitted++;
}
l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
@@ -1672,8 +1357,7 @@ void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
/**
* link_insert_deferred_queue - insert deferred messages back into receive chain
*/
-
-static struct sk_buff *link_insert_deferred_queue(struct link *l_ptr,
+static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr,
struct sk_buff *buf)
{
u32 seq_no;
@@ -1681,7 +1365,7 @@ static struct sk_buff *link_insert_deferred_queue(struct link *l_ptr,
if (l_ptr->oldest_deferred_in == NULL)
return buf;
- seq_no = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
+ seq_no = buf_seqno(l_ptr->oldest_deferred_in);
if (seq_no == mod(l_ptr->next_in_no)) {
l_ptr->newest_deferred_in->next = buf;
buf = l_ptr->oldest_deferred_in;
@@ -1703,11 +1387,10 @@ static struct sk_buff *link_insert_deferred_queue(struct link *l_ptr,
* TIPC will ignore the excess, under the assumption that it is optional info
* introduced by a later release of the protocol.
*/
-
static int link_recv_buf_validate(struct sk_buff *buf)
{
static u32 min_data_hdr_size[8] = {
- SHORT_H_SIZE, MCAST_H_SIZE, LONG_H_SIZE, DIR_MSG_H_SIZE,
+ SHORT_H_SIZE, MCAST_H_SIZE, NAMED_H_SIZE, BASIC_H_SIZE,
MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE
};
@@ -1717,6 +1400,12 @@ static int link_recv_buf_validate(struct sk_buff *buf)
u32 hdr_size;
u32 min_hdr_size;
+ /* If this packet comes from the defer queue, the skb has already
+ * been validated
+ */
+ if (unlikely(TIPC_SKB_CB(buf)->deferred))
+ return 1;
+
if (unlikely(buf->len < MIN_H_SIZE))
return 0;
@@ -1742,112 +1431,87 @@ static int link_recv_buf_validate(struct sk_buff *buf)
}
/**
- * tipc_recv_msg - process TIPC messages arriving from off-node
+ * tipc_rcv - process TIPC packets/messages arriving from off-node
* @head: pointer to message buffer chain
- * @tb_ptr: pointer to bearer message arrived on
+ * @b_ptr: pointer to bearer message arrived on
*
* Invoked with no locks held. Bearer pointer must point to a valid bearer
* structure (i.e. cannot be NULL), but bearer can be inactive.
*/
-
-void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
+void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
{
- read_lock_bh(&tipc_net_lock);
while (head) {
- struct bearer *b_ptr = (struct bearer *)tb_ptr;
struct tipc_node *n_ptr;
- struct link *l_ptr;
+ struct tipc_link *l_ptr;
struct sk_buff *crs;
struct sk_buff *buf = head;
struct tipc_msg *msg;
u32 seq_no;
u32 ackd;
u32 released = 0;
- int type;
head = head->next;
-
- /* Ensure bearer is still enabled */
-
- if (unlikely(!b_ptr->active))
- goto cont;
+ buf->next = NULL;
/* Ensure message is well-formed */
-
if (unlikely(!link_recv_buf_validate(buf)))
- goto cont;
+ goto discard;
/* Ensure message data is a single contiguous unit */
-
- if (unlikely(buf_linearize(buf))) {
- goto cont;
- }
+ if (unlikely(skb_linearize(buf)))
+ goto discard;
/* Handle arrival of a non-unicast link message */
-
msg = buf_msg(buf);
if (unlikely(msg_non_seq(msg))) {
if (msg_user(msg) == LINK_CONFIG)
- tipc_disc_recv_msg(buf, b_ptr);
+ tipc_disc_rcv(buf, b_ptr);
else
- tipc_bclink_recv_pkt(buf);
+ tipc_bclink_rcv(buf);
continue;
}
+ /* Discard unicast link messages destined for another node */
if (unlikely(!msg_short(msg) &&
(msg_destnode(msg) != tipc_own_addr)))
- goto cont;
-
- /* Discard non-routeable messages destined for another node */
-
- if (unlikely(!msg_isdata(msg) &&
- (msg_destnode(msg) != tipc_own_addr))) {
- if ((msg_user(msg) != CONN_MANAGER) &&
- (msg_user(msg) != MSG_FRAGMENTER))
- goto cont;
- }
+ goto discard;
/* Locate neighboring node that sent message */
-
n_ptr = tipc_node_find(msg_prevnode(msg));
if (unlikely(!n_ptr))
- goto cont;
+ goto discard;
tipc_node_lock(n_ptr);
- /* Don't talk to neighbor during cleanup after last session */
-
- if (n_ptr->cleanup_required) {
- tipc_node_unlock(n_ptr);
- goto cont;
- }
-
/* Locate unicast link endpoint that should handle message */
-
l_ptr = n_ptr->links[b_ptr->identity];
- if (unlikely(!l_ptr)) {
- tipc_node_unlock(n_ptr);
- goto cont;
- }
+ if (unlikely(!l_ptr))
+ goto unlock_discard;
- /* Validate message sequence number info */
+ /* Verify that communication with node is currently allowed */
+ if ((n_ptr->action_flags & TIPC_WAIT_PEER_LINKS_DOWN) &&
+ msg_user(msg) == LINK_PROTOCOL &&
+ (msg_type(msg) == RESET_MSG ||
+ msg_type(msg) == ACTIVATE_MSG) &&
+ !msg_redundant_link(msg))
+ n_ptr->action_flags &= ~TIPC_WAIT_PEER_LINKS_DOWN;
+
+ if (tipc_node_blocked(n_ptr))
+ goto unlock_discard;
+ /* Validate message sequence number info */
seq_no = msg_seqno(msg);
ackd = msg_ack(msg);
/* Release acked messages */
-
- if (less(n_ptr->bclink.acked, msg_bcast_ack(msg))) {
- if (tipc_node_is_up(n_ptr) && n_ptr->bclink.supported)
- tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
- }
+ if (n_ptr->bclink.recv_permitted)
+ tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
crs = l_ptr->first_out;
while ((crs != l_ptr->next_out) &&
- less_eq(msg_seqno(buf_msg(crs)), ackd)) {
+ less_eq(buf_seqno(crs), ackd)) {
struct sk_buff *next = crs->next;
-
- buf_discard(crs);
+ kfree_skb(crs);
crs = next;
released++;
}
@@ -1857,119 +1521,120 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
}
/* Try sending any messages link endpoint has pending */
-
if (unlikely(l_ptr->next_out))
tipc_link_push_queue(l_ptr);
+
if (unlikely(!list_empty(&l_ptr->waiting_ports)))
tipc_link_wakeup_ports(l_ptr, 0);
+
if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
l_ptr->stats.sent_acks++;
- tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
}
- /* Now (finally!) process the incoming message */
-
-protocol_check:
- if (likely(link_working_working(l_ptr))) {
- if (likely(seq_no == mod(l_ptr->next_in_no))) {
- l_ptr->next_in_no++;
- if (unlikely(l_ptr->oldest_deferred_in))
- head = link_insert_deferred_queue(l_ptr,
- head);
- if (likely(msg_is_dest(msg, tipc_own_addr))) {
-deliver:
- if (likely(msg_isdata(msg))) {
- tipc_node_unlock(n_ptr);
- tipc_port_recv_msg(buf);
- continue;
- }
- switch (msg_user(msg)) {
- case MSG_BUNDLER:
- l_ptr->stats.recv_bundles++;
- l_ptr->stats.recv_bundled +=
- msg_msgcnt(msg);
- tipc_node_unlock(n_ptr);
- tipc_link_recv_bundle(buf);
- continue;
- case ROUTE_DISTRIBUTOR:
- tipc_node_unlock(n_ptr);
- tipc_cltr_recv_routing_table(buf);
- continue;
- case NAME_DISTRIBUTOR:
- tipc_node_unlock(n_ptr);
- tipc_named_recv(buf);
- continue;
- case CONN_MANAGER:
- tipc_node_unlock(n_ptr);
- tipc_port_recv_proto_msg(buf);
- continue;
- case MSG_FRAGMENTER:
- l_ptr->stats.recv_fragments++;
- if (tipc_link_recv_fragment(&l_ptr->defragm_buf,
- &buf, &msg)) {
- l_ptr->stats.recv_fragmented++;
- goto deliver;
- }
- break;
- case CHANGEOVER_PROTOCOL:
- type = msg_type(msg);
- if (link_recv_changeover_msg(&l_ptr, &buf)) {
- msg = buf_msg(buf);
- seq_no = msg_seqno(msg);
- if (type == ORIGINAL_MSG)
- goto deliver;
- goto protocol_check;
- }
- break;
- }
- }
+ /* Process the incoming packet */
+ if (unlikely(!link_working_working(l_ptr))) {
+ if (msg_user(msg) == LINK_PROTOCOL) {
+ tipc_link_proto_rcv(l_ptr, buf);
+ head = link_insert_deferred_queue(l_ptr, head);
+ tipc_node_unlock(n_ptr);
+ continue;
+ }
+
+ /* Traffic message. Conditionally activate link */
+ link_state_event(l_ptr, TRAFFIC_MSG_EVT);
+
+ if (link_working_working(l_ptr)) {
+ /* Re-insert buffer in front of queue */
+ buf->next = head;
+ head = buf;
tipc_node_unlock(n_ptr);
- tipc_net_route_msg(buf);
continue;
}
+ goto unlock_discard;
+ }
+
+ /* Link is now in state WORKING_WORKING */
+ if (unlikely(seq_no != mod(l_ptr->next_in_no))) {
link_handle_out_of_seq_msg(l_ptr, buf);
head = link_insert_deferred_queue(l_ptr, head);
tipc_node_unlock(n_ptr);
continue;
}
-
- if (msg_user(msg) == LINK_PROTOCOL) {
- link_recv_proto_msg(l_ptr, buf);
+ l_ptr->next_in_no++;
+ if (unlikely(l_ptr->oldest_deferred_in))
head = link_insert_deferred_queue(l_ptr, head);
- tipc_node_unlock(n_ptr);
- continue;
+
+ /* Deliver packet/message to correct user: */
+ if (unlikely(msg_user(msg) == CHANGEOVER_PROTOCOL)) {
+ if (!tipc_link_tunnel_rcv(n_ptr, &buf)) {
+ tipc_node_unlock(n_ptr);
+ continue;
+ }
+ msg = buf_msg(buf);
+ } else if (msg_user(msg) == MSG_FRAGMENTER) {
+ l_ptr->stats.recv_fragments++;
+ if (tipc_buf_append(&l_ptr->reasm_buf, &buf)) {
+ l_ptr->stats.recv_fragmented++;
+ msg = buf_msg(buf);
+ } else {
+ if (!l_ptr->reasm_buf)
+ tipc_link_reset(l_ptr);
+ tipc_node_unlock(n_ptr);
+ continue;
+ }
}
- msg_dbg(msg,"NSEQ<REC<");
- link_state_event(l_ptr, TRAFFIC_MSG_EVT);
- if (link_working_working(l_ptr)) {
- /* Re-insert in front of queue */
- msg_dbg(msg,"RECV-REINS:");
- buf->next = head;
- head = buf;
+ switch (msg_user(msg)) {
+ case TIPC_LOW_IMPORTANCE:
+ case TIPC_MEDIUM_IMPORTANCE:
+ case TIPC_HIGH_IMPORTANCE:
+ case TIPC_CRITICAL_IMPORTANCE:
+ tipc_node_unlock(n_ptr);
+ tipc_sk_rcv(buf);
+ continue;
+ case MSG_BUNDLER:
+ l_ptr->stats.recv_bundles++;
+ l_ptr->stats.recv_bundled += msg_msgcnt(msg);
+ tipc_node_unlock(n_ptr);
+ tipc_link_bundle_rcv(buf);
+ continue;
+ case NAME_DISTRIBUTOR:
+ n_ptr->bclink.recv_permitted = true;
tipc_node_unlock(n_ptr);
+ tipc_named_rcv(buf);
continue;
+ case CONN_MANAGER:
+ tipc_node_unlock(n_ptr);
+ tipc_port_proto_rcv(buf);
+ continue;
+ case BCAST_PROTOCOL:
+ tipc_link_sync_rcv(n_ptr, buf);
+ break;
+ default:
+ kfree_skb(buf);
+ break;
}
tipc_node_unlock(n_ptr);
-cont:
- buf_discard(buf);
+ continue;
+unlock_discard:
+ tipc_node_unlock(n_ptr);
+discard:
+ kfree_skb(buf);
}
- read_unlock_bh(&tipc_net_lock);
}
-/*
- * link_defer_buf(): Sort a received out-of-sequence packet
- * into the deferred reception queue.
- * Returns the increase of the queue length,i.e. 0 or 1
+/**
+ * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
+ *
+ * Returns increase in queue length (i.e. 0 or 1)
*/
-
-u32 tipc_link_defer_pkt(struct sk_buff **head,
- struct sk_buff **tail,
+u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
struct sk_buff *buf)
{
- struct sk_buff *prev = NULL;
- struct sk_buff *crs = *head;
- u32 seq_no = msg_seqno(buf_msg(buf));
+ struct sk_buff *queue_buf;
+ struct sk_buff **prev;
+ u32 seq_no = buf_seqno(buf);
buf->next = NULL;
@@ -1980,67 +1645,58 @@ u32 tipc_link_defer_pkt(struct sk_buff **head,
}
/* Last ? */
- if (less(msg_seqno(buf_msg(*tail)), seq_no)) {
+ if (less(buf_seqno(*tail), seq_no)) {
(*tail)->next = buf;
*tail = buf;
return 1;
}
- /* Scan through queue and sort it in */
- do {
- struct tipc_msg *msg = buf_msg(crs);
+ /* Locate insertion point in queue, then insert; discard if duplicate */
+ prev = head;
+ queue_buf = *head;
+ for (;;) {
+ u32 curr_seqno = buf_seqno(queue_buf);
- if (less(seq_no, msg_seqno(msg))) {
- buf->next = crs;
- if (prev)
- prev->next = buf;
- else
- *head = buf;
- return 1;
+ if (seq_no == curr_seqno) {
+ kfree_skb(buf);
+ return 0;
}
- if (seq_no == msg_seqno(msg)) {
+
+ if (less(seq_no, curr_seqno))
break;
- }
- prev = crs;
- crs = crs->next;
- }
- while (crs);
- /* Message is a duplicate of an existing message */
+ prev = &queue_buf->next;
+ queue_buf = queue_buf->next;
+ }
- buf_discard(buf);
- return 0;
+ buf->next = queue_buf;
+ *prev = buf;
+ return 1;
}
-/**
+/*
* link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
*/
-
-static void link_handle_out_of_seq_msg(struct link *l_ptr,
+static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
struct sk_buff *buf)
{
- u32 seq_no = msg_seqno(buf_msg(buf));
+ u32 seq_no = buf_seqno(buf);
if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
- link_recv_proto_msg(l_ptr, buf);
+ tipc_link_proto_rcv(l_ptr, buf);
return;
}
- dbg("rx OOS msg: seq_no %u, expecting %u (%u)\n",
- seq_no, mod(l_ptr->next_in_no), l_ptr->next_in_no);
-
/* Record OOS packet arrival (force mismatch on next timeout) */
-
l_ptr->checkpoint--;
/*
* Discard packet if a duplicate; otherwise add it to deferred queue
* and notify peer of gap as per protocol specification
*/
-
if (less(seq_no, mod(l_ptr->next_in_no))) {
l_ptr->stats.duplicates++;
- buf_discard(buf);
+ kfree_skb(buf);
return;
}
@@ -2048,8 +1704,9 @@ static void link_handle_out_of_seq_msg(struct link *l_ptr,
&l_ptr->newest_deferred_in, buf)) {
l_ptr->deferred_inqueue_sz++;
l_ptr->stats.deferred_recv++;
+ TIPC_SKB_CB(buf)->deferred = true;
if ((l_ptr->deferred_inqueue_sz % 16) == 1)
- tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
} else
l_ptr->stats.duplicates++;
}
@@ -2057,18 +1714,32 @@ static void link_handle_out_of_seq_msg(struct link *l_ptr,
/*
* Send protocol message to the other endpoint.
*/
-void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
- u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
+void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
+ u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
{
struct sk_buff *buf = NULL;
struct tipc_msg *msg = l_ptr->pmsg;
u32 msg_size = sizeof(l_ptr->proto_msg);
+ int r_flag;
- if (link_blocked(l_ptr))
+ /* Discard any previous message that was deferred due to congestion */
+ if (l_ptr->proto_msg_queue) {
+ kfree_skb(l_ptr->proto_msg_queue);
+ l_ptr->proto_msg_queue = NULL;
+ }
+
+ /* Don't send protocol message during link changeover */
+ if (l_ptr->exp_msg_count)
return;
+
+ /* Abort non-RESET send if communication with node is prohibited */
+ if ((tipc_node_blocked(l_ptr->owner)) && (msg_typ != RESET_MSG))
+ return;
+
+ /* Create protocol message with "out-of-sequence" sequence number */
msg_set_type(msg, msg_typ);
- msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
- msg_set_bcast_ack(msg, mod(l_ptr->owner->bclink.last_in));
+ msg_set_net_plane(msg, l_ptr->net_plane);
+ msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
msg_set_last_bcast(msg, tipc_bclink_get_last_sent());
if (msg_typ == STATE_MSG) {
@@ -2077,10 +1748,10 @@ void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
if (!tipc_link_is_up(l_ptr))
return;
if (l_ptr->next_out)
- next_sent = msg_seqno(buf_msg(l_ptr->next_out));
+ next_sent = buf_seqno(l_ptr->next_out);
msg_set_next_sent(msg, next_sent);
if (l_ptr->oldest_deferred_in) {
- u32 rec = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
+ u32 rec = buf_seqno(l_ptr->oldest_deferred_in);
gap = mod(rec - mod(l_ptr->next_in_no));
}
msg_set_seq_gap(msg, gap);
@@ -2113,58 +1784,29 @@ void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
msg_set_seq_gap(msg, 0);
msg_set_next_sent(msg, 1);
+ msg_set_probe(msg, 0);
msg_set_link_tolerance(msg, l_ptr->tolerance);
msg_set_linkprio(msg, l_ptr->priority);
msg_set_max_pkt(msg, l_ptr->max_pkt_target);
}
- if (tipc_node_has_redundant_links(l_ptr->owner)) {
- msg_set_redundant_link(msg);
- } else {
- msg_clear_redundant_link(msg);
- }
+ r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
+ msg_set_redundant_link(msg, r_flag);
msg_set_linkprio(msg, l_ptr->priority);
-
- /* Ensure sequence number will not fit : */
+ msg_set_size(msg, msg_size);
msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
- /* Congestion? */
-
- if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
- if (!l_ptr->proto_msg_queue) {
- l_ptr->proto_msg_queue =
- tipc_buf_acquire(sizeof(l_ptr->proto_msg));
- }
- buf = l_ptr->proto_msg_queue;
- if (!buf)
- return;
- skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
- return;
- }
- msg_set_timestamp(msg, jiffies_to_msecs(jiffies));
-
- /* Message can be sent */
-
- msg_dbg(msg, ">>");
-
buf = tipc_buf_acquire(msg_size);
if (!buf)
return;
skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
- msg_set_size(buf_msg(buf), msg_size);
-
- if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
- l_ptr->unacked_window = 0;
- buf_discard(buf);
- return;
- }
+ buf->priority = TC_PRIO_CONTROL;
- /* New congestion */
- tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
- l_ptr->proto_msg_queue = buf;
- l_ptr->stats.bearer_congs++;
+ tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
+ l_ptr->unacked_window = 0;
+ kfree_skb(buf);
}
/*
@@ -2172,8 +1814,7 @@ void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
* Note that network plane id propagates through the network, and may
* change at any time. The node with lowest address rules
*/
-
-static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
+static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf)
{
u32 rec_gap = 0;
u32 max_pkt_info;
@@ -2181,40 +1822,41 @@ static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
u32 msg_tol;
struct tipc_msg *msg = buf_msg(buf);
- dbg("AT(%u):", jiffies_to_msecs(jiffies));
- msg_dbg(msg, "<<");
- if (link_blocked(l_ptr))
+ /* Discard protocol message during link changeover */
+ if (l_ptr->exp_msg_count)
goto exit;
- /* record unnumbered packet arrival (force mismatch on next timeout) */
-
- l_ptr->checkpoint--;
-
- if (l_ptr->b_ptr->net_plane != msg_net_plane(msg))
+ if (l_ptr->net_plane != msg_net_plane(msg))
if (tipc_own_addr > msg_prevnode(msg))
- l_ptr->b_ptr->net_plane = msg_net_plane(msg);
-
- l_ptr->owner->permit_changeover = msg_redundant_link(msg);
+ l_ptr->net_plane = msg_net_plane(msg);
switch (msg_type(msg)) {
case RESET_MSG:
if (!link_working_unknown(l_ptr) &&
(l_ptr->peer_session != INVALID_SESSION)) {
- if (msg_session(msg) == l_ptr->peer_session) {
- dbg("Duplicate RESET: %u<->%u\n",
- msg_session(msg), l_ptr->peer_session);
- break; /* duplicate: ignore */
- }
+ if (less_eq(msg_session(msg), l_ptr->peer_session))
+ break; /* duplicate or old reset: ignore */
+ }
+
+ if (!msg_redundant_link(msg) && (link_working_working(l_ptr) ||
+ link_working_unknown(l_ptr))) {
+ /*
+ * peer has lost contact -- don't allow peer's links
+ * to reactivate before we recognize loss & clean up
+ */
+ l_ptr->owner->action_flags |= TIPC_WAIT_OWN_LINKS_DOWN;
}
+
+ link_state_event(l_ptr, RESET_MSG);
+
/* fall thru' */
case ACTIVATE_MSG:
/* Update link settings according other endpoint's values */
-
strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
- if ((msg_tol = msg_link_tolerance(msg)) &&
- (msg_tol > l_ptr->tolerance))
+ msg_tol = msg_link_tolerance(msg);
+ if (msg_tol > l_ptr->tolerance)
link_set_supervision_props(l_ptr, msg_tol);
if (msg_linkprio(msg) > l_ptr->priority)
@@ -2229,31 +1871,40 @@ static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
} else {
l_ptr->max_pkt = l_ptr->max_pkt_target;
}
- l_ptr->owner->bclink.supported = (max_pkt_info != 0);
- link_state_event(l_ptr, msg_type(msg));
+ /* Synchronize broadcast link info, if not done previously */
+ if (!tipc_node_is_up(l_ptr->owner)) {
+ l_ptr->owner->bclink.last_sent =
+ l_ptr->owner->bclink.last_in =
+ msg_last_bcast(msg);
+ l_ptr->owner->bclink.oos_state = 0;
+ }
l_ptr->peer_session = msg_session(msg);
l_ptr->peer_bearer_id = msg_bearer_id(msg);
- /* Synchronize broadcast sequence numbers */
- if (!tipc_node_has_redundant_links(l_ptr->owner)) {
- l_ptr->owner->bclink.last_in = mod(msg_last_bcast(msg));
- }
+ if (msg_type(msg) == ACTIVATE_MSG)
+ link_state_event(l_ptr, ACTIVATE_MSG);
break;
case STATE_MSG:
- if ((msg_tol = msg_link_tolerance(msg)))
+ msg_tol = msg_link_tolerance(msg);
+ if (msg_tol)
link_set_supervision_props(l_ptr, msg_tol);
if (msg_linkprio(msg) &&
(msg_linkprio(msg) != l_ptr->priority)) {
- warn("Resetting link <%s>, priority change %u->%u\n",
- l_ptr->name, l_ptr->priority, msg_linkprio(msg));
+ pr_warn("%s<%s>, priority change %u->%u\n",
+ link_rst_msg, l_ptr->name, l_ptr->priority,
+ msg_linkprio(msg));
l_ptr->priority = msg_linkprio(msg);
tipc_link_reset(l_ptr); /* Enforce change to take effect */
break;
}
+
+ /* Record reception; force mismatch at next timeout: */
+ l_ptr->checkpoint--;
+
link_state_event(l_ptr, TRAFFIC_MSG_EVT);
l_ptr->stats.recv_states++;
if (link_reset_unknown(l_ptr))
@@ -2266,8 +1917,6 @@ static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
max_pkt_ack = msg_max_pkt(msg);
if (max_pkt_ack > l_ptr->max_pkt) {
- dbg("Link <%s> updated MTU %u -> %u\n",
- l_ptr->name, l_ptr->max_pkt, max_pkt_ack);
l_ptr->max_pkt = max_pkt_ack;
l_ptr->max_pkt_probes = 0;
}
@@ -2275,96 +1924,81 @@ static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
max_pkt_ack = 0;
if (msg_probe(msg)) {
l_ptr->stats.recv_probes++;
- if (msg_size(msg) > sizeof(l_ptr->proto_msg)) {
+ if (msg_size(msg) > sizeof(l_ptr->proto_msg))
max_pkt_ack = msg_size(msg);
- }
}
/* Protocol message before retransmits, reduce loss risk */
-
- tipc_bclink_check_gap(l_ptr->owner, msg_last_bcast(msg));
+ if (l_ptr->owner->bclink.recv_permitted)
+ tipc_bclink_update_link_state(l_ptr->owner,
+ msg_last_bcast(msg));
if (rec_gap || (msg_probe(msg))) {
- tipc_link_send_proto_msg(l_ptr, STATE_MSG,
- 0, rec_gap, 0, 0, max_pkt_ack);
+ tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, rec_gap, 0,
+ 0, max_pkt_ack);
}
if (msg_seq_gap(msg)) {
- msg_dbg(msg, "With Gap:");
l_ptr->stats.recv_nacks++;
tipc_link_retransmit(l_ptr, l_ptr->first_out,
msg_seq_gap(msg));
}
break;
- default:
- msg_dbg(buf_msg(buf), "<DISCARDING UNKNOWN<");
}
exit:
- buf_discard(buf);
+ kfree_skb(buf);
}
-/*
- * tipc_link_tunnel(): Send one message via a link belonging to
- * another bearer. Owner node is locked.
+/* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to
+ * a different bearer. Owner node is locked.
*/
-static void tipc_link_tunnel(struct link *l_ptr,
- struct tipc_msg *tunnel_hdr,
- struct tipc_msg *msg,
- u32 selector)
+static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
+ struct tipc_msg *tunnel_hdr,
+ struct tipc_msg *msg,
+ u32 selector)
{
- struct link *tunnel;
+ struct tipc_link *tunnel;
struct sk_buff *buf;
u32 length = msg_size(msg);
tunnel = l_ptr->owner->active_links[selector & 1];
if (!tipc_link_is_up(tunnel)) {
- warn("Link changeover error, "
- "tunnel link no longer available\n");
+ pr_warn("%stunnel link no longer available\n", link_co_err);
return;
}
msg_set_size(tunnel_hdr, length + INT_H_SIZE);
buf = tipc_buf_acquire(length + INT_H_SIZE);
if (!buf) {
- warn("Link changeover error, "
- "unable to send tunnel msg\n");
+ pr_warn("%sunable to send tunnel msg\n", link_co_err);
return;
}
skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE);
skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length);
- dbg("%c->%c:", l_ptr->b_ptr->net_plane, tunnel->b_ptr->net_plane);
- msg_dbg(buf_msg(buf), ">SEND>");
- tipc_link_send_buf(tunnel, buf);
+ __tipc_link_xmit(tunnel, buf);
}
-
-/*
- * changeover(): Send whole message queue via the remaining link
- * Owner node is locked.
+/* tipc_link_failover_send_queue(): A link has gone down, but a second
+ * link is still active. We can do failover. Tunnel the failing link's
+ * whole send queue via the remaining link. This way, we don't lose
+ * any packets, and sequence order is preserved for subsequent traffic
+ * sent over the remaining link. Owner node is locked.
*/
-
-void tipc_link_changeover(struct link *l_ptr)
+void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
{
u32 msgcount = l_ptr->out_queue_size;
struct sk_buff *crs = l_ptr->first_out;
- struct link *tunnel = l_ptr->owner->active_links[0];
+ struct tipc_link *tunnel = l_ptr->owner->active_links[0];
struct tipc_msg tunnel_hdr;
int split_bundles;
if (!tunnel)
return;
- if (!l_ptr->owner->permit_changeover) {
- warn("Link changeover error, "
- "peer did not permit changeover\n");
- return;
- }
-
tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
msg_set_msgcnt(&tunnel_hdr, msgcount);
- dbg("Link changeover requires %u tunnel messages\n", msgcount);
if (!l_ptr->first_out) {
struct sk_buff *buf;
@@ -2373,13 +2007,10 @@ void tipc_link_changeover(struct link *l_ptr)
if (buf) {
skb_copy_to_linear_data(buf, &tunnel_hdr, INT_H_SIZE);
msg_set_size(&tunnel_hdr, INT_H_SIZE);
- dbg("%c->%c:", l_ptr->b_ptr->net_plane,
- tunnel->b_ptr->net_plane);
- msg_dbg(&tunnel_hdr, "EMPTY>SEND>");
- tipc_link_send_buf(tunnel, buf);
+ __tipc_link_xmit(tunnel, buf);
} else {
- warn("Link changeover error, "
- "unable to send changeover msg\n");
+ pr_warn("%sunable to send changeover msg\n",
+ link_co_err);
}
return;
}
@@ -2392,25 +2023,35 @@ void tipc_link_changeover(struct link *l_ptr)
if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
struct tipc_msg *m = msg_get_wrapped(msg);
- unchar* pos = (unchar*)m;
+ unchar *pos = (unchar *)m;
msgcount = msg_msgcnt(msg);
while (msgcount--) {
- msg_set_seqno(m,msg_seqno(msg));
- tipc_link_tunnel(l_ptr, &tunnel_hdr, m,
- msg_link_selector(m));
+ msg_set_seqno(m, msg_seqno(msg));
+ tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, m,
+ msg_link_selector(m));
pos += align(msg_size(m));
m = (struct tipc_msg *)pos;
}
} else {
- tipc_link_tunnel(l_ptr, &tunnel_hdr, msg,
- msg_link_selector(msg));
+ tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg,
+ msg_link_selector(msg));
}
crs = crs->next;
}
}
-void tipc_link_send_duplicate(struct link *l_ptr, struct link *tunnel)
+/* tipc_link_dup_queue_xmit(): A second link has become active. Tunnel a
+ * duplicate of the first link's send queue via the new link. This way, we
+ * are guaranteed that currently queued packets from a socket are delivered
+ * before future traffic from the same socket, even if this is using the
+ * new link. The last arriving copy of each duplicate packet is dropped at
+ * the receiving end by the regular protocol check, so packet cardinality
+ * and sequence order is preserved per sender/receiver socket pair.
+ * Owner node is locked.
+ */
+void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr,
+ struct tipc_link *tunnel)
{
struct sk_buff *iter;
struct tipc_msg tunnel_hdr;
@@ -2432,25 +2073,20 @@ void tipc_link_send_duplicate(struct link *l_ptr, struct link *tunnel)
msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
outbuf = tipc_buf_acquire(length + INT_H_SIZE);
if (outbuf == NULL) {
- warn("Link changeover error, "
- "unable to send duplicate msg\n");
+ pr_warn("%sunable to send duplicate msg\n",
+ link_co_err);
return;
}
skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE);
skb_copy_to_linear_data_offset(outbuf, INT_H_SIZE, iter->data,
length);
- dbg("%c->%c:", l_ptr->b_ptr->net_plane,
- tunnel->b_ptr->net_plane);
- msg_dbg(buf_msg(outbuf), ">SEND>");
- tipc_link_send_buf(tunnel, outbuf);
+ __tipc_link_xmit(tunnel, outbuf);
if (!tipc_link_is_up(l_ptr))
return;
iter = iter->next;
}
}
-
-
/**
* buf_extract - extracts embedded TIPC message from another message
* @skb: encapsulating message buffer
@@ -2459,7 +2095,6 @@ void tipc_link_send_duplicate(struct link *l_ptr, struct link *tunnel)
* Returns a new message buffer containing an embedded message. The
* encapsulating message itself is left unchanged.
*/
-
static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
{
struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos);
@@ -2472,134 +2107,142 @@ static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
return eb;
}
-/*
- * link_recv_changeover_msg(): Receive tunneled packet sent
- * via other link. Node is locked. Return extracted buffer.
+
+
+/* tipc_link_dup_rcv(): Receive a tunnelled DUPLICATE_MSG packet.
+ * Owner node is locked.
*/
+static void tipc_link_dup_rcv(struct tipc_link *l_ptr,
+ struct sk_buff *t_buf)
+{
+ struct sk_buff *buf;
-static int link_recv_changeover_msg(struct link **l_ptr,
- struct sk_buff **buf)
+ if (!tipc_link_is_up(l_ptr))
+ return;
+
+ buf = buf_extract(t_buf, INT_H_SIZE);
+ if (buf == NULL) {
+ pr_warn("%sfailed to extract inner dup pkt\n", link_co_err);
+ return;
+ }
+
+ /* Add buffer to deferred queue, if applicable: */
+ link_handle_out_of_seq_msg(l_ptr, buf);
+}
+
+/* tipc_link_failover_rcv(): Receive a tunnelled ORIGINAL_MSG packet
+ * Owner node is locked.
+ */
+static struct sk_buff *tipc_link_failover_rcv(struct tipc_link *l_ptr,
+ struct sk_buff *t_buf)
{
- struct sk_buff *tunnel_buf = *buf;
- struct link *dest_link;
+ struct tipc_msg *t_msg = buf_msg(t_buf);
+ struct sk_buff *buf = NULL;
struct tipc_msg *msg;
- struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf);
- u32 msg_typ = msg_type(tunnel_msg);
- u32 msg_count = msg_msgcnt(tunnel_msg);
- dest_link = (*l_ptr)->owner->links[msg_bearer_id(tunnel_msg)];
- if (!dest_link) {
- msg_dbg(tunnel_msg, "NOLINK/<REC<");
- goto exit;
- }
- if (dest_link == *l_ptr) {
- err("Unexpected changeover message on link <%s>\n",
- (*l_ptr)->name);
- goto exit;
- }
- dbg("%c<-%c:", dest_link->b_ptr->net_plane,
- (*l_ptr)->b_ptr->net_plane);
- *l_ptr = dest_link;
- msg = msg_get_wrapped(tunnel_msg);
-
- if (msg_typ == DUPLICATE_MSG) {
- if (less(msg_seqno(msg), mod(dest_link->next_in_no))) {
- msg_dbg(tunnel_msg, "DROP/<REC<");
+ if (tipc_link_is_up(l_ptr))
+ tipc_link_reset(l_ptr);
+
+ /* First failover packet? */
+ if (l_ptr->exp_msg_count == START_CHANGEOVER)
+ l_ptr->exp_msg_count = msg_msgcnt(t_msg);
+
+ /* Should there be an inner packet? */
+ if (l_ptr->exp_msg_count) {
+ l_ptr->exp_msg_count--;
+ buf = buf_extract(t_buf, INT_H_SIZE);
+ if (buf == NULL) {
+ pr_warn("%sno inner failover pkt\n", link_co_err);
goto exit;
}
- *buf = buf_extract(tunnel_buf,INT_H_SIZE);
- if (*buf == NULL) {
- warn("Link changeover error, duplicate msg dropped\n");
+ msg = buf_msg(buf);
+
+ if (less(msg_seqno(msg), l_ptr->reset_checkpoint)) {
+ kfree_skb(buf);
+ buf = NULL;
goto exit;
}
- msg_dbg(tunnel_msg, "TNL<REC<");
- buf_discard(tunnel_buf);
- return 1;
+ if (msg_user(msg) == MSG_FRAGMENTER) {
+ l_ptr->stats.recv_fragments++;
+ tipc_buf_append(&l_ptr->reasm_buf, &buf);
+ }
}
-
- /* First original message ?: */
-
- if (tipc_link_is_up(dest_link)) {
- msg_dbg(tunnel_msg, "UP/FIRST/<REC<");
- info("Resetting link <%s>, changeover initiated by peer\n",
- dest_link->name);
- tipc_link_reset(dest_link);
- dest_link->exp_msg_count = msg_count;
- dbg("Expecting %u tunnelled messages\n", msg_count);
- if (!msg_count)
- goto exit;
- } else if (dest_link->exp_msg_count == START_CHANGEOVER) {
- msg_dbg(tunnel_msg, "BLK/FIRST/<REC<");
- dest_link->exp_msg_count = msg_count;
- dbg("Expecting %u tunnelled messages\n", msg_count);
- if (!msg_count)
- goto exit;
+exit:
+ if ((l_ptr->exp_msg_count == 0) && (l_ptr->flags & LINK_STOPPED)) {
+ tipc_node_detach_link(l_ptr->owner, l_ptr);
+ kfree(l_ptr);
}
+ return buf;
+}
- /* Receive original message */
+/* tipc_link_tunnel_rcv(): Receive a tunnelled packet, sent
+ * via other link as result of a failover (ORIGINAL_MSG) or
+ * a new active link (DUPLICATE_MSG). Failover packets are
+ * returned to the active link for delivery upwards.
+ * Owner node is locked.
+ */
+static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr,
+ struct sk_buff **buf)
+{
+ struct sk_buff *t_buf = *buf;
+ struct tipc_link *l_ptr;
+ struct tipc_msg *t_msg = buf_msg(t_buf);
+ u32 bearer_id = msg_bearer_id(t_msg);
- if (dest_link->exp_msg_count == 0) {
- warn("Link switchover error, "
- "got too many tunnelled messages\n");
- msg_dbg(tunnel_msg, "OVERDUE/DROP/<REC<");
- dbg_print_link(dest_link, "LINK:");
+ *buf = NULL;
+
+ if (bearer_id >= MAX_BEARERS)
goto exit;
- }
- dest_link->exp_msg_count--;
- if (less(msg_seqno(msg), dest_link->reset_checkpoint)) {
- msg_dbg(tunnel_msg, "DROP/DUPL/<REC<");
+
+ l_ptr = n_ptr->links[bearer_id];
+ if (!l_ptr)
goto exit;
- } else {
- *buf = buf_extract(tunnel_buf, INT_H_SIZE);
- if (*buf != NULL) {
- msg_dbg(tunnel_msg, "TNL<REC<");
- buf_discard(tunnel_buf);
- return 1;
- } else {
- warn("Link changeover error, original msg dropped\n");
- }
- }
+
+ if (msg_type(t_msg) == DUPLICATE_MSG)
+ tipc_link_dup_rcv(l_ptr, t_buf);
+ else if (msg_type(t_msg) == ORIGINAL_MSG)
+ *buf = tipc_link_failover_rcv(l_ptr, t_buf);
+ else
+ pr_warn("%sunknown tunnel pkt received\n", link_co_err);
exit:
- *buf = NULL;
- buf_discard(tunnel_buf);
- return 0;
+ kfree_skb(t_buf);
+ return *buf != NULL;
}
/*
* Bundler functionality:
*/
-void tipc_link_recv_bundle(struct sk_buff *buf)
+void tipc_link_bundle_rcv(struct sk_buff *buf)
{
u32 msgcount = msg_msgcnt(buf_msg(buf));
u32 pos = INT_H_SIZE;
struct sk_buff *obuf;
- msg_dbg(buf_msg(buf), "<BNDL<: ");
while (msgcount--) {
obuf = buf_extract(buf, pos);
if (obuf == NULL) {
- warn("Link unable to unbundle message(s)\n");
+ pr_warn("Link unable to unbundle message(s)\n");
break;
}
pos += align(msg_size(buf_msg(obuf)));
- msg_dbg(buf_msg(obuf), " /");
tipc_net_route_msg(obuf);
}
- buf_discard(buf);
+ kfree_skb(buf);
}
/*
* Fragmentation/defragmentation:
*/
-
/*
- * link_send_long_buf: Entry for buffers needing fragmentation.
+ * tipc_link_frag_xmit: Entry for buffers needing fragmentation.
* The buffer is complete, inclusive total message length.
* Returns user data length.
*/
-static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
+static int tipc_link_frag_xmit(struct tipc_link *l_ptr, struct sk_buff *buf)
{
+ struct sk_buff *buf_chain = NULL;
+ struct sk_buff *buf_chain_tail = (struct sk_buff *)&buf_chain;
struct tipc_msg *inmsg = buf_msg(buf);
struct tipc_msg fragm_hdr;
u32 insize = msg_size(inmsg);
@@ -2608,7 +2251,7 @@ static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
u32 rest = insize;
u32 pack_sz = l_ptr->max_pkt;
u32 fragm_sz = pack_sz - INT_H_SIZE;
- u32 fragm_no = 1;
+ u32 fragm_no = 0;
u32 destaddr;
if (msg_short(inmsg))
@@ -2616,20 +2259,11 @@ static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
else
destaddr = msg_destnode(inmsg);
- if (msg_routed(inmsg))
- msg_set_prevnode(inmsg, tipc_own_addr);
-
/* Prepare reusable fragment header: */
-
tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
INT_H_SIZE, destaddr);
- msg_set_link_selector(&fragm_hdr, msg_link_selector(inmsg));
- msg_set_long_msgno(&fragm_hdr, mod(l_ptr->long_msg_seq_no++));
- msg_set_fragm_no(&fragm_hdr, fragm_no);
- l_ptr->stats.sent_fragmented++;
/* Chop up message: */
-
while (rest > 0) {
struct sk_buff *fragm;
@@ -2639,207 +2273,47 @@ static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
}
fragm = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
if (fragm == NULL) {
- warn("Link unable to fragment message\n");
- dsz = -ENOMEM;
- goto exit;
+ kfree_skb(buf);
+ kfree_skb_list(buf_chain);
+ return -ENOMEM;
}
msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
+ fragm_no++;
+ msg_set_fragm_no(&fragm_hdr, fragm_no);
skb_copy_to_linear_data(fragm, &fragm_hdr, INT_H_SIZE);
skb_copy_to_linear_data_offset(fragm, INT_H_SIZE, crs,
fragm_sz);
- /* Send queued messages first, if any: */
+ buf_chain_tail->next = fragm;
+ buf_chain_tail = fragm;
- l_ptr->stats.sent_fragments++;
- tipc_link_send_buf(l_ptr, fragm);
- if (!tipc_link_is_up(l_ptr))
- return dsz;
- msg_set_fragm_no(&fragm_hdr, ++fragm_no);
rest -= fragm_sz;
crs += fragm_sz;
msg_set_type(&fragm_hdr, FRAGMENT);
}
-exit:
- buf_discard(buf);
- return dsz;
-}
-
-/*
- * A pending message being re-assembled must store certain values
- * to handle subsequent fragments correctly. The following functions
- * help storing these values in unused, available fields in the
- * pending message. This makes dynamic memory allocation unecessary.
- */
-
-static void set_long_msg_seqno(struct sk_buff *buf, u32 seqno)
-{
- msg_set_seqno(buf_msg(buf), seqno);
-}
-
-static u32 get_fragm_size(struct sk_buff *buf)
-{
- return msg_ack(buf_msg(buf));
-}
-
-static void set_fragm_size(struct sk_buff *buf, u32 sz)
-{
- msg_set_ack(buf_msg(buf), sz);
-}
-
-static u32 get_expected_frags(struct sk_buff *buf)
-{
- return msg_bcast_ack(buf_msg(buf));
-}
-
-static void set_expected_frags(struct sk_buff *buf, u32 exp)
-{
- msg_set_bcast_ack(buf_msg(buf), exp);
-}
-
-static u32 get_timer_cnt(struct sk_buff *buf)
-{
- return msg_reroute_cnt(buf_msg(buf));
-}
+ kfree_skb(buf);
-static void incr_timer_cnt(struct sk_buff *buf)
-{
- msg_incr_reroute_cnt(buf_msg(buf));
-}
-
-/*
- * tipc_link_recv_fragment(): Called with node lock on. Returns
- * the reassembled buffer if message is complete.
- */
-int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
- struct tipc_msg **m)
-{
- struct sk_buff *prev = NULL;
- struct sk_buff *fbuf = *fb;
- struct tipc_msg *fragm = buf_msg(fbuf);
- struct sk_buff *pbuf = *pending;
- u32 long_msg_seq_no = msg_long_msgno(fragm);
-
- *fb = NULL;
- msg_dbg(fragm,"FRG<REC<");
-
- /* Is there an incomplete message waiting for this fragment? */
-
- while (pbuf && ((msg_seqno(buf_msg(pbuf)) != long_msg_seq_no) ||
- (msg_orignode(fragm) != msg_orignode(buf_msg(pbuf))))) {
- prev = pbuf;
- pbuf = pbuf->next;
- }
+ /* Append chain of fragments to send queue & send them */
+ l_ptr->long_msg_seq_no++;
+ link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
+ l_ptr->stats.sent_fragments += fragm_no;
+ l_ptr->stats.sent_fragmented++;
+ tipc_link_push_queue(l_ptr);
- if (!pbuf && (msg_type(fragm) == FIRST_FRAGMENT)) {
- struct tipc_msg *imsg = (struct tipc_msg *)msg_data(fragm);
- u32 msg_sz = msg_size(imsg);
- u32 fragm_sz = msg_data_sz(fragm);
- u32 exp_fragm_cnt = msg_sz/fragm_sz + !!(msg_sz % fragm_sz);
- u32 max = TIPC_MAX_USER_MSG_SIZE + LONG_H_SIZE;
- if (msg_type(imsg) == TIPC_MCAST_MSG)
- max = TIPC_MAX_USER_MSG_SIZE + MCAST_H_SIZE;
- if (msg_size(imsg) > max) {
- msg_dbg(fragm,"<REC<Oversized: ");
- buf_discard(fbuf);
- return 0;
- }
- pbuf = tipc_buf_acquire(msg_size(imsg));
- if (pbuf != NULL) {
- pbuf->next = *pending;
- *pending = pbuf;
- skb_copy_to_linear_data(pbuf, imsg,
- msg_data_sz(fragm));
- /* Prepare buffer for subsequent fragments. */
-
- set_long_msg_seqno(pbuf, long_msg_seq_no);
- set_fragm_size(pbuf,fragm_sz);
- set_expected_frags(pbuf,exp_fragm_cnt - 1);
- } else {
- warn("Link unable to reassemble fragmented message\n");
- }
- buf_discard(fbuf);
- return 0;
- } else if (pbuf && (msg_type(fragm) != FIRST_FRAGMENT)) {
- u32 dsz = msg_data_sz(fragm);
- u32 fsz = get_fragm_size(pbuf);
- u32 crs = ((msg_fragm_no(fragm) - 1) * fsz);
- u32 exp_frags = get_expected_frags(pbuf) - 1;
- skb_copy_to_linear_data_offset(pbuf, crs,
- msg_data(fragm), dsz);
- buf_discard(fbuf);
-
- /* Is message complete? */
-
- if (exp_frags == 0) {
- if (prev)
- prev->next = pbuf->next;
- else
- *pending = pbuf->next;
- msg_reset_reroute_cnt(buf_msg(pbuf));
- *fb = pbuf;
- *m = buf_msg(pbuf);
- return 1;
- }
- set_expected_frags(pbuf,exp_frags);
- return 0;
- }
- dbg(" Discarding orphan fragment %x\n",fbuf);
- msg_dbg(fragm,"ORPHAN:");
- dbg("Pending long buffers:\n");
- dbg_print_buf_chain(*pending);
- buf_discard(fbuf);
- return 0;
+ return dsz;
}
-/**
- * link_check_defragm_bufs - flush stale incoming message fragments
- * @l_ptr: pointer to link
- */
-
-static void link_check_defragm_bufs(struct link *l_ptr)
+static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance)
{
- struct sk_buff *prev = NULL;
- struct sk_buff *next = NULL;
- struct sk_buff *buf = l_ptr->defragm_buf;
-
- if (!buf)
- return;
- if (!link_working_working(l_ptr))
+ if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL))
return;
- while (buf) {
- u32 cnt = get_timer_cnt(buf);
-
- next = buf->next;
- if (cnt < 4) {
- incr_timer_cnt(buf);
- prev = buf;
- } else {
- dbg(" Discarding incomplete long buffer\n");
- msg_dbg(buf_msg(buf), "LONG:");
- dbg_print_link(l_ptr, "curr:");
- dbg("Pending long buffers:\n");
- dbg_print_buf_chain(l_ptr->defragm_buf);
- if (prev)
- prev->next = buf->next;
- else
- l_ptr->defragm_buf = buf->next;
- buf_discard(buf);
- }
- buf = next;
- }
-}
-
-
-static void link_set_supervision_props(struct link *l_ptr, u32 tolerance)
-{
l_ptr->tolerance = tolerance;
l_ptr->continuity_interval =
((tolerance / 4) > 500) ? 500 : tolerance / 4;
l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4);
}
-
-void tipc_link_set_queue_limits(struct link *l_ptr, u32 window)
+void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
{
/* Data messages from this node, inclusive FIRST_FRAGM */
l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window;
@@ -2852,46 +2326,155 @@ void tipc_link_set_queue_limits(struct link *l_ptr, u32 window)
l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900;
l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200;
l_ptr->queue_limit[CONN_MANAGER] = 1200;
- l_ptr->queue_limit[ROUTE_DISTRIBUTOR] = 1200;
l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500;
l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000;
/* FRAGMENT and LAST_FRAGMENT packets */
l_ptr->queue_limit[MSG_FRAGMENTER] = 4000;
}
+/* tipc_link_find_owner - locate owner node of link by link's name
+ * @name: pointer to link name string
+ * @bearer_id: pointer to index in 'node->links' array where the link was found.
+ *
+ * Returns pointer to node owning the link, or 0 if no matching link is found.
+ */
+static struct tipc_node *tipc_link_find_owner(const char *link_name,
+ unsigned int *bearer_id)
+{
+ struct tipc_link *l_ptr;
+ struct tipc_node *n_ptr;
+ struct tipc_node *found_node = 0;
+ int i;
+
+ *bearer_id = 0;
+ rcu_read_lock();
+ list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
+ tipc_node_lock(n_ptr);
+ for (i = 0; i < MAX_BEARERS; i++) {
+ l_ptr = n_ptr->links[i];
+ if (l_ptr && !strcmp(l_ptr->name, link_name)) {
+ *bearer_id = i;
+ found_node = n_ptr;
+ break;
+ }
+ }
+ tipc_node_unlock(n_ptr);
+ if (found_node)
+ break;
+ }
+ rcu_read_unlock();
+
+ return found_node;
+}
+
/**
- * link_find_link - locate link by name
- * @name - ptr to link name string
- * @node - ptr to area to be filled with ptr to associated node
+ * link_value_is_valid -- validate proposed link tolerance/priority/window
*
- * Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted;
- * this also prevents link deletion.
+ * @cmd: value type (TIPC_CMD_SET_LINK_*)
+ * @new_value: the new value
*
- * Returns pointer to link (or 0 if invalid link name).
+ * Returns 1 if value is within range, 0 if not.
*/
-
-static struct link *link_find_link(const char *name, struct tipc_node **node)
+static int link_value_is_valid(u16 cmd, u32 new_value)
{
- struct link_name link_name_parts;
- struct bearer *b_ptr;
- struct link *l_ptr;
-
- if (!link_name_validate(name, &link_name_parts))
- return NULL;
+ switch (cmd) {
+ case TIPC_CMD_SET_LINK_TOL:
+ return (new_value >= TIPC_MIN_LINK_TOL) &&
+ (new_value <= TIPC_MAX_LINK_TOL);
+ case TIPC_CMD_SET_LINK_PRI:
+ return (new_value <= TIPC_MAX_LINK_PRI);
+ case TIPC_CMD_SET_LINK_WINDOW:
+ return (new_value >= TIPC_MIN_LINK_WIN) &&
+ (new_value <= TIPC_MAX_LINK_WIN);
+ }
+ return 0;
+}
- b_ptr = tipc_bearer_find_interface(link_name_parts.if_local);
- if (!b_ptr)
- return NULL;
+/**
+ * link_cmd_set_value - change priority/tolerance/window for link/bearer/media
+ * @name: ptr to link, bearer, or media name
+ * @new_value: new value of link, bearer, or media setting
+ * @cmd: which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*)
+ *
+ * Caller must hold RTNL lock to ensure link/bearer/media is not deleted.
+ *
+ * Returns 0 if value updated and negative value on error.
+ */
+static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
+{
+ struct tipc_node *node;
+ struct tipc_link *l_ptr;
+ struct tipc_bearer *b_ptr;
+ struct tipc_media *m_ptr;
+ int bearer_id;
+ int res = 0;
+
+ node = tipc_link_find_owner(name, &bearer_id);
+ if (node) {
+ tipc_node_lock(node);
+ l_ptr = node->links[bearer_id];
- *node = tipc_node_find(link_name_parts.addr_peer);
- if (!*node)
- return NULL;
+ if (l_ptr) {
+ switch (cmd) {
+ case TIPC_CMD_SET_LINK_TOL:
+ link_set_supervision_props(l_ptr, new_value);
+ tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0,
+ new_value, 0, 0);
+ break;
+ case TIPC_CMD_SET_LINK_PRI:
+ l_ptr->priority = new_value;
+ tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0,
+ 0, new_value, 0);
+ break;
+ case TIPC_CMD_SET_LINK_WINDOW:
+ tipc_link_set_queue_limits(l_ptr, new_value);
+ break;
+ default:
+ res = -EINVAL;
+ break;
+ }
+ }
+ tipc_node_unlock(node);
+ return res;
+ }
- l_ptr = (*node)->links[b_ptr->identity];
- if (!l_ptr || strcmp(l_ptr->name, name))
- return NULL;
+ b_ptr = tipc_bearer_find(name);
+ if (b_ptr) {
+ switch (cmd) {
+ case TIPC_CMD_SET_LINK_TOL:
+ b_ptr->tolerance = new_value;
+ break;
+ case TIPC_CMD_SET_LINK_PRI:
+ b_ptr->priority = new_value;
+ break;
+ case TIPC_CMD_SET_LINK_WINDOW:
+ b_ptr->window = new_value;
+ break;
+ default:
+ res = -EINVAL;
+ break;
+ }
+ return res;
+ }
- return l_ptr;
+ m_ptr = tipc_media_find(name);
+ if (!m_ptr)
+ return -ENODEV;
+ switch (cmd) {
+ case TIPC_CMD_SET_LINK_TOL:
+ m_ptr->tolerance = new_value;
+ break;
+ case TIPC_CMD_SET_LINK_PRI:
+ m_ptr->priority = new_value;
+ break;
+ case TIPC_CMD_SET_LINK_WINDOW:
+ m_ptr->window = new_value;
+ break;
+ default:
+ res = -EINVAL;
+ break;
+ }
+ return res;
}
struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space,
@@ -2899,8 +2482,6 @@ struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space
{
struct tipc_link_config *args;
u32 new_value;
- struct link *l_ptr;
- struct tipc_node *node;
int res;
if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG))
@@ -2909,6 +2490,10 @@ struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space
args = (struct tipc_link_config *)TLV_DATA(req_tlv_area);
new_value = ntohl(args->value);
+ if (!link_value_is_valid(cmd, new_value))
+ return tipc_cfg_reply_error_string(
+ "cannot change, value invalid");
+
if (!strcmp(args->name, tipc_bclink_name)) {
if ((cmd == TIPC_CMD_SET_LINK_WINDOW) &&
(tipc_bclink_set_queue_limits(new_value) == 0))
@@ -2917,45 +2502,7 @@ struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space
" (cannot change setting on broadcast link)");
}
- read_lock_bh(&tipc_net_lock);
- l_ptr = link_find_link(args->name, &node);
- if (!l_ptr) {
- read_unlock_bh(&tipc_net_lock);
- return tipc_cfg_reply_error_string("link not found");
- }
-
- tipc_node_lock(node);
- res = -EINVAL;
- switch (cmd) {
- case TIPC_CMD_SET_LINK_TOL:
- if ((new_value >= TIPC_MIN_LINK_TOL) &&
- (new_value <= TIPC_MAX_LINK_TOL)) {
- link_set_supervision_props(l_ptr, new_value);
- tipc_link_send_proto_msg(l_ptr, STATE_MSG,
- 0, 0, new_value, 0, 0);
- res = 0;
- }
- break;
- case TIPC_CMD_SET_LINK_PRI:
- if ((new_value >= TIPC_MIN_LINK_PRI) &&
- (new_value <= TIPC_MAX_LINK_PRI)) {
- l_ptr->priority = new_value;
- tipc_link_send_proto_msg(l_ptr, STATE_MSG,
- 0, 0, 0, new_value, 0);
- res = 0;
- }
- break;
- case TIPC_CMD_SET_LINK_WINDOW:
- if ((new_value >= TIPC_MIN_LINK_WIN) &&
- (new_value <= TIPC_MAX_LINK_WIN)) {
- tipc_link_set_queue_limits(l_ptr, new_value);
- res = 0;
- }
- break;
- }
- tipc_node_unlock(node);
-
- read_unlock_bh(&tipc_net_lock);
+ res = link_cmd_set_value(args->name, new_value, cmd);
if (res)
return tipc_cfg_reply_error_string("cannot change link setting");
@@ -2966,8 +2513,7 @@ struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space
* link_reset_statistics - reset link statistics
* @l_ptr: pointer to link
*/
-
-static void link_reset_statistics(struct link *l_ptr)
+static void link_reset_statistics(struct tipc_link *l_ptr)
{
memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
l_ptr->stats.sent_info = l_ptr->next_out_no;
@@ -2977,8 +2523,9 @@ static void link_reset_statistics(struct link *l_ptr)
struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space)
{
char *link_name;
- struct link *l_ptr;
+ struct tipc_link *l_ptr;
struct tipc_node *node;
+ unsigned int bearer_id;
if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
@@ -2989,25 +2536,24 @@ struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_
return tipc_cfg_reply_error_string("link not found");
return tipc_cfg_reply_none();
}
+ node = tipc_link_find_owner(link_name, &bearer_id);
+ if (!node)
+ return tipc_cfg_reply_error_string("link not found");
- read_lock_bh(&tipc_net_lock);
- l_ptr = link_find_link(link_name, &node);
+ tipc_node_lock(node);
+ l_ptr = node->links[bearer_id];
if (!l_ptr) {
- read_unlock_bh(&tipc_net_lock);
+ tipc_node_unlock(node);
return tipc_cfg_reply_error_string("link not found");
}
-
- tipc_node_lock(node);
link_reset_statistics(l_ptr);
tipc_node_unlock(node);
- read_unlock_bh(&tipc_net_lock);
return tipc_cfg_reply_none();
}
/**
* percent - convert count to a percentage of total (rounding up or down)
*/
-
static u32 percent(u32 count, u32 total)
{
return (count * 100 + (total / 2)) / total;
@@ -3021,115 +2567,121 @@ static u32 percent(u32 count, u32 total)
*
* Returns length of print buffer data string (or 0 if error)
*/
-
static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
{
- struct print_buf pb;
- struct link *l_ptr;
+ struct tipc_link *l;
+ struct tipc_stats *s;
struct tipc_node *node;
char *status;
u32 profile_total = 0;
+ unsigned int bearer_id;
+ int ret;
if (!strcmp(name, tipc_bclink_name))
return tipc_bclink_stats(buf, buf_size);
- tipc_printbuf_init(&pb, buf, buf_size);
+ node = tipc_link_find_owner(name, &bearer_id);
+ if (!node)
+ return 0;
- read_lock_bh(&tipc_net_lock);
- l_ptr = link_find_link(name, &node);
- if (!l_ptr) {
- read_unlock_bh(&tipc_net_lock);
+ tipc_node_lock(node);
+
+ l = node->links[bearer_id];
+ if (!l) {
+ tipc_node_unlock(node);
return 0;
}
- tipc_node_lock(node);
- if (tipc_link_is_active(l_ptr))
+ s = &l->stats;
+
+ if (tipc_link_is_active(l))
status = "ACTIVE";
- else if (tipc_link_is_up(l_ptr))
+ else if (tipc_link_is_up(l))
status = "STANDBY";
else
status = "DEFUNCT";
- tipc_printf(&pb, "Link <%s>\n"
- " %s MTU:%u Priority:%u Tolerance:%u ms"
- " Window:%u packets\n",
- l_ptr->name, status, l_ptr->max_pkt,
- l_ptr->priority, l_ptr->tolerance, l_ptr->queue_limit[0]);
- tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
- l_ptr->next_in_no - l_ptr->stats.recv_info,
- l_ptr->stats.recv_fragments,
- l_ptr->stats.recv_fragmented,
- l_ptr->stats.recv_bundles,
- l_ptr->stats.recv_bundled);
- tipc_printf(&pb, " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
- l_ptr->next_out_no - l_ptr->stats.sent_info,
- l_ptr->stats.sent_fragments,
- l_ptr->stats.sent_fragmented,
- l_ptr->stats.sent_bundles,
- l_ptr->stats.sent_bundled);
- profile_total = l_ptr->stats.msg_length_counts;
+
+ ret = tipc_snprintf(buf, buf_size, "Link <%s>\n"
+ " %s MTU:%u Priority:%u Tolerance:%u ms"
+ " Window:%u packets\n",
+ l->name, status, l->max_pkt, l->priority,
+ l->tolerance, l->queue_limit[0]);
+
+ ret += tipc_snprintf(buf + ret, buf_size - ret,
+ " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
+ l->next_in_no - s->recv_info, s->recv_fragments,
+ s->recv_fragmented, s->recv_bundles,
+ s->recv_bundled);
+
+ ret += tipc_snprintf(buf + ret, buf_size - ret,
+ " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
+ l->next_out_no - s->sent_info, s->sent_fragments,
+ s->sent_fragmented, s->sent_bundles,
+ s->sent_bundled);
+
+ profile_total = s->msg_length_counts;
if (!profile_total)
profile_total = 1;
- tipc_printf(&pb, " TX profile sample:%u packets average:%u octets\n"
- " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% "
- "-16354:%u%% -32768:%u%% -66000:%u%%\n",
- l_ptr->stats.msg_length_counts,
- l_ptr->stats.msg_lengths_total / profile_total,
- percent(l_ptr->stats.msg_length_profile[0], profile_total),
- percent(l_ptr->stats.msg_length_profile[1], profile_total),
- percent(l_ptr->stats.msg_length_profile[2], profile_total),
- percent(l_ptr->stats.msg_length_profile[3], profile_total),
- percent(l_ptr->stats.msg_length_profile[4], profile_total),
- percent(l_ptr->stats.msg_length_profile[5], profile_total),
- percent(l_ptr->stats.msg_length_profile[6], profile_total));
- tipc_printf(&pb, " RX states:%u probes:%u naks:%u defs:%u dups:%u\n",
- l_ptr->stats.recv_states,
- l_ptr->stats.recv_probes,
- l_ptr->stats.recv_nacks,
- l_ptr->stats.deferred_recv,
- l_ptr->stats.duplicates);
- tipc_printf(&pb, " TX states:%u probes:%u naks:%u acks:%u dups:%u\n",
- l_ptr->stats.sent_states,
- l_ptr->stats.sent_probes,
- l_ptr->stats.sent_nacks,
- l_ptr->stats.sent_acks,
- l_ptr->stats.retransmitted);
- tipc_printf(&pb, " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n",
- l_ptr->stats.bearer_congs,
- l_ptr->stats.link_congs,
- l_ptr->stats.max_queue_sz,
- l_ptr->stats.queue_sz_counts
- ? (l_ptr->stats.accu_queue_sz / l_ptr->stats.queue_sz_counts)
- : 0);
+
+ ret += tipc_snprintf(buf + ret, buf_size - ret,
+ " TX profile sample:%u packets average:%u octets\n"
+ " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% "
+ "-16384:%u%% -32768:%u%% -66000:%u%%\n",
+ s->msg_length_counts,
+ s->msg_lengths_total / profile_total,
+ percent(s->msg_length_profile[0], profile_total),
+ percent(s->msg_length_profile[1], profile_total),
+ percent(s->msg_length_profile[2], profile_total),
+ percent(s->msg_length_profile[3], profile_total),
+ percent(s->msg_length_profile[4], profile_total),
+ percent(s->msg_length_profile[5], profile_total),
+ percent(s->msg_length_profile[6], profile_total));
+
+ ret += tipc_snprintf(buf + ret, buf_size - ret,
+ " RX states:%u probes:%u naks:%u defs:%u"
+ " dups:%u\n", s->recv_states, s->recv_probes,
+ s->recv_nacks, s->deferred_recv, s->duplicates);
+
+ ret += tipc_snprintf(buf + ret, buf_size - ret,
+ " TX states:%u probes:%u naks:%u acks:%u"
+ " dups:%u\n", s->sent_states, s->sent_probes,
+ s->sent_nacks, s->sent_acks, s->retransmitted);
+
+ ret += tipc_snprintf(buf + ret, buf_size - ret,
+ " Congestion link:%u Send queue"
+ " max:%u avg:%u\n", s->link_congs,
+ s->max_queue_sz, s->queue_sz_counts ?
+ (s->accu_queue_sz / s->queue_sz_counts) : 0);
tipc_node_unlock(node);
- read_unlock_bh(&tipc_net_lock);
- return tipc_printbuf_validate(&pb);
+ return ret;
}
-#define MAX_LINK_STATS_INFO 2000
-
struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space)
{
struct sk_buff *buf;
struct tlv_desc *rep_tlv;
int str_len;
+ int pb_len;
+ char *pb;
if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
- buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_LINK_STATS_INFO));
+ buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
if (!buf)
return NULL;
rep_tlv = (struct tlv_desc *)buf->data;
-
+ pb = TLV_DATA(rep_tlv);
+ pb_len = ULTRA_STRING_MAX_LEN;
str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area),
- (char *)TLV_DATA(rep_tlv), MAX_LINK_STATS_INFO);
+ pb, pb_len);
if (!str_len) {
- buf_discard(buf);
+ kfree_skb(buf);
return tipc_cfg_reply_error_string("link not found");
}
-
+ str_len += 1; /* for "\0" */
skb_put(buf, TLV_SPACE(str_len));
TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
@@ -3143,18 +2695,16 @@ struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_s
*
* If no active link can be found, uses default maximum packet size.
*/
-
u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
{
struct tipc_node *n_ptr;
- struct link *l_ptr;
+ struct tipc_link *l_ptr;
u32 res = MAX_PKT_DEFAULT;
if (dest == tipc_own_addr)
return MAX_MSG_SIZE;
- read_lock_bh(&tipc_net_lock);
- n_ptr = tipc_node_select(dest, selector);
+ n_ptr = tipc_node_find(dest);
if (n_ptr) {
tipc_node_lock(n_ptr);
l_ptr = n_ptr->active_links[selector & 1];
@@ -3162,70 +2712,27 @@ u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
res = l_ptr->max_pkt;
tipc_node_unlock(n_ptr);
}
- read_unlock_bh(&tipc_net_lock);
return res;
}
-static void link_dump_send_queue(struct link *l_ptr)
+static void link_print(struct tipc_link *l_ptr, const char *str)
{
- if (l_ptr->next_out) {
- info("\nContents of unsent queue:\n");
- dbg_print_buf_chain(l_ptr->next_out);
- }
- info("\nContents of send queue:\n");
- if (l_ptr->first_out) {
- dbg_print_buf_chain(l_ptr->first_out);
- }
- info("Empty send queue\n");
-}
+ struct tipc_bearer *b_ptr;
+
+ rcu_read_lock();
+ b_ptr = rcu_dereference_rtnl(bearer_list[l_ptr->bearer_id]);
+ if (b_ptr)
+ pr_info("%s Link %x<%s>:", str, l_ptr->addr, b_ptr->name);
+ rcu_read_unlock();
-static void link_print(struct link *l_ptr, struct print_buf *buf,
- const char *str)
-{
- tipc_printf(buf, str);
- if (link_reset_reset(l_ptr) || link_reset_unknown(l_ptr))
- return;
- tipc_printf(buf, "Link %x<%s>:",
- l_ptr->addr, l_ptr->b_ptr->publ.name);
- tipc_printf(buf, ": NXO(%u):", mod(l_ptr->next_out_no));
- tipc_printf(buf, "NXI(%u):", mod(l_ptr->next_in_no));
- tipc_printf(buf, "SQUE");
- if (l_ptr->first_out) {
- tipc_printf(buf, "[%u..", msg_seqno(buf_msg(l_ptr->first_out)));
- if (l_ptr->next_out)
- tipc_printf(buf, "%u..",
- msg_seqno(buf_msg(l_ptr->next_out)));
- tipc_printf(buf, "%u]", msg_seqno(buf_msg(l_ptr->last_out)));
- if ((mod(msg_seqno(buf_msg(l_ptr->last_out)) -
- msg_seqno(buf_msg(l_ptr->first_out)))
- != (l_ptr->out_queue_size - 1)) ||
- (l_ptr->last_out->next != NULL)) {
- tipc_printf(buf, "\nSend queue inconsistency\n");
- tipc_printf(buf, "first_out= %x ", l_ptr->first_out);
- tipc_printf(buf, "next_out= %x ", l_ptr->next_out);
- tipc_printf(buf, "last_out= %x ", l_ptr->last_out);
- link_dump_send_queue(l_ptr);
- }
- } else
- tipc_printf(buf, "[]");
- tipc_printf(buf, "SQSIZ(%u)", l_ptr->out_queue_size);
- if (l_ptr->oldest_deferred_in) {
- u32 o = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
- u32 n = msg_seqno(buf_msg(l_ptr->newest_deferred_in));
- tipc_printf(buf, ":RQUE[%u..%u]", o, n);
- if (l_ptr->deferred_inqueue_sz != mod((n + 1) - o)) {
- tipc_printf(buf, ":RQSIZ(%u)",
- l_ptr->deferred_inqueue_sz);
- }
- }
if (link_working_unknown(l_ptr))
- tipc_printf(buf, ":WU");
- if (link_reset_reset(l_ptr))
- tipc_printf(buf, ":RR");
- if (link_reset_unknown(l_ptr))
- tipc_printf(buf, ":RU");
- if (link_working_working(l_ptr))
- tipc_printf(buf, ":WW");
- tipc_printf(buf, "\n");
+ pr_cont(":WU\n");
+ else if (link_reset_reset(l_ptr))
+ pr_cont(":RR\n");
+ else if (link_reset_unknown(l_ptr))
+ pr_cont(":RU\n");
+ else if (link_working_working(l_ptr))
+ pr_cont(":WW\n");
+ else
+ pr_cont("\n");
}
-
diff --git a/net/tipc/link.h b/net/tipc/link.h
index c562888d25d..200d518b218 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -1,8 +1,8 @@
/*
* net/tipc/link.h: Include file for TIPC link code
*
- * Copyright (c) 1995-2006, Ericsson AB
- * Copyright (c) 2004-2005, Wind River Systems
+ * Copyright (c) 1995-2006, 2013, Ericsson AB
+ * Copyright (c) 2004-2005, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -37,51 +37,81 @@
#ifndef _TIPC_LINK_H
#define _TIPC_LINK_H
-#include "dbg.h"
#include "msg.h"
#include "node.h"
-#define PUSH_FAILED 1
-#define PUSH_FINISHED 2
-
-/*
- * Link states
+/* Out-of-range value for link sequence numbers
*/
+#define INVALID_LINK_SEQ 0x10000
+/* Link working states
+ */
#define WORKING_WORKING 560810u
#define WORKING_UNKNOWN 560811u
#define RESET_UNKNOWN 560812u
#define RESET_RESET 560813u
-/*
- * Starting value for maximum packet size negotiation on unicast links
- * (unless bearer MTU is less)
+/* Link endpoint execution states
*/
+#define LINK_STARTED 0x0001
+#define LINK_STOPPED 0x0002
+/* Starting value for maximum packet size negotiation on unicast links
+ * (unless bearer MTU is less)
+ */
#define MAX_PKT_DEFAULT 1500
+struct tipc_stats {
+ u32 sent_info; /* used in counting # sent packets */
+ u32 recv_info; /* used in counting # recv'd packets */
+ u32 sent_states;
+ u32 recv_states;
+ u32 sent_probes;
+ u32 recv_probes;
+ u32 sent_nacks;
+ u32 recv_nacks;
+ u32 sent_acks;
+ u32 sent_bundled;
+ u32 sent_bundles;
+ u32 recv_bundled;
+ u32 recv_bundles;
+ u32 retransmitted;
+ u32 sent_fragmented;
+ u32 sent_fragments;
+ u32 recv_fragmented;
+ u32 recv_fragments;
+ u32 link_congs; /* # port sends blocked by congestion */
+ u32 deferred_recv;
+ u32 duplicates;
+ u32 max_queue_sz; /* send queue size high water mark */
+ u32 accu_queue_sz; /* used for send queue size profiling */
+ u32 queue_sz_counts; /* used for send queue size profiling */
+ u32 msg_length_counts; /* used for message length profiling */
+ u32 msg_lengths_total; /* used for message length profiling */
+ u32 msg_length_profile[7]; /* used for msg. length profiling */
+};
+
/**
- * struct link - TIPC link data structure
+ * struct tipc_link - TIPC link data structure
* @addr: network address of link's peer node
* @name: link name character string
* @media_addr: media address to use when sending messages over link
* @timer: link timer
* @owner: pointer to peer node
- * @link_list: adjacent links in bearer's list of links
- * @started: indicates if link has been started
+ * @flags: execution state flags for link endpoint instance
* @checkpoint: reference point for triggering link continuity checking
* @peer_session: link session # being used by peer end of link
* @peer_bearer_id: bearer id used by link's peer endpoint
- * @b_ptr: pointer to bearer used by link
+ * @bearer_id: local bearer id used by link
* @tolerance: minimum link continuity loss needed to reset link [in ms]
* @continuity_interval: link continuity testing interval [in ms]
* @abort_limit: # of unacknowledged continuity probes needed to reset link
* @state: current state of link FSM
- * @blocked: indicates if link has been administratively blocked
* @fsm_msg_cnt: # of protocol messages link FSM has sent in current state
* @proto_msg: template for control messages generated by link
* @pmsg: convenience pointer to "proto_msg" field
* @priority: current link priority
+ * @net_plane: current link network plane ('A' through 'H')
* @queue_limit: outbound message queue congestion thresholds (indexed by user)
* @exp_msg_count: # of tunnelled messages expected during link changeover
* @reset_checkpoint: seq # of last acknowledged message at time of link reset
@@ -105,30 +135,26 @@
* @next_out: ptr to first unsent outbound message in queue
* @waiting_ports: linked list of ports waiting for link congestion to abate
* @long_msg_seq_no: next identifier to use for outbound fragmented messages
- * @defragm_buf: list of partially reassembled inbound message fragments
+ * @reasm_buf: head of partially reassembled inbound message fragments
* @stats: collects statistics regarding link activity
- * @print_buf: print buffer used to log link activity
*/
-
-struct link {
+struct tipc_link {
u32 addr;
char name[TIPC_MAX_LINK_NAME];
struct tipc_media_addr media_addr;
struct timer_list timer;
struct tipc_node *owner;
- struct list_head link_list;
/* Management and link supervision data */
- int started;
+ unsigned int flags;
u32 checkpoint;
u32 peer_session;
u32 peer_bearer_id;
- struct bearer *b_ptr;
+ u32 bearer_id;
u32 tolerance;
u32 continuity_interval;
u32 abort_limit;
int state;
- int blocked;
u32 fsm_msg_cnt;
struct {
unchar hdr[INT_H_SIZE];
@@ -136,6 +162,7 @@ struct link {
} proto_msg;
struct tipc_msg *pmsg;
u32 priority;
+ char net_plane;
u32 queue_limit[15]; /* queue_limit[0]==window limit */
/* Changeover */
@@ -169,90 +196,62 @@ struct link {
struct sk_buff *next_out;
struct list_head waiting_ports;
- /* Fragmentation/defragmentation */
+ /* Fragmentation/reassembly */
u32 long_msg_seq_no;
- struct sk_buff *defragm_buf;
+ struct sk_buff *reasm_buf;
/* Statistics */
- struct {
- u32 sent_info; /* used in counting # sent packets */
- u32 recv_info; /* used in counting # recv'd packets */
- u32 sent_states;
- u32 recv_states;
- u32 sent_probes;
- u32 recv_probes;
- u32 sent_nacks;
- u32 recv_nacks;
- u32 sent_acks;
- u32 sent_bundled;
- u32 sent_bundles;
- u32 recv_bundled;
- u32 recv_bundles;
- u32 retransmitted;
- u32 sent_fragmented;
- u32 sent_fragments;
- u32 recv_fragmented;
- u32 recv_fragments;
- u32 link_congs; /* # port sends blocked by congestion */
- u32 bearer_congs;
- u32 deferred_recv;
- u32 duplicates;
-
- /* for statistical profiling of send queue size */
-
- u32 max_queue_sz;
- u32 accu_queue_sz;
- u32 queue_sz_counts;
-
- /* for statistical profiling of message lengths */
-
- u32 msg_length_counts;
- u32 msg_lengths_total;
- u32 msg_length_profile[7];
- } stats;
-
- struct print_buf print_buf;
+ struct tipc_stats stats;
};
-struct port;
+struct tipc_port;
-struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
+struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
+ struct tipc_bearer *b_ptr,
const struct tipc_media_addr *media_addr);
-void tipc_link_delete(struct link *l_ptr);
-void tipc_link_changeover(struct link *l_ptr);
-void tipc_link_send_duplicate(struct link *l_ptr, struct link *dest);
-void tipc_link_reset_fragments(struct link *l_ptr);
-int tipc_link_is_up(struct link *l_ptr);
-int tipc_link_is_active(struct link *l_ptr);
-u32 tipc_link_push_packet(struct link *l_ptr);
-void tipc_link_stop(struct link *l_ptr);
-struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space, u16 cmd);
-struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space);
-struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space);
-void tipc_link_reset(struct link *l_ptr);
-int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector);
-int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf);
-u32 tipc_link_get_max_pkt(u32 dest,u32 selector);
-int tipc_link_send_sections_fast(struct port* sender,
- struct iovec const *msg_sect,
- const u32 num_sect,
- u32 destnode);
-void tipc_link_recv_bundle(struct sk_buff *buf);
-int tipc_link_recv_fragment(struct sk_buff **pending,
- struct sk_buff **fb,
- struct tipc_msg **msg);
-void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int prob, u32 gap,
- u32 tolerance, u32 priority, u32 acked_mtu);
-void tipc_link_push_queue(struct link *l_ptr);
+void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down);
+void tipc_link_failover_send_queue(struct tipc_link *l_ptr);
+void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr, struct tipc_link *dest);
+void tipc_link_reset_fragments(struct tipc_link *l_ptr);
+int tipc_link_is_up(struct tipc_link *l_ptr);
+int tipc_link_is_active(struct tipc_link *l_ptr);
+void tipc_link_purge_queues(struct tipc_link *l_ptr);
+struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area,
+ int req_tlv_space,
+ u16 cmd);
+struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area,
+ int req_tlv_space);
+struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area,
+ int req_tlv_space);
+void tipc_link_reset_all(struct tipc_node *node);
+void tipc_link_reset(struct tipc_link *l_ptr);
+void tipc_link_reset_list(unsigned int bearer_id);
+int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector);
+void tipc_link_names_xmit(struct list_head *message_list, u32 dest);
+int __tipc_link_xmit(struct tipc_link *l_ptr, struct sk_buff *buf);
+int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf);
+u32 tipc_link_get_max_pkt(u32 dest, u32 selector);
+int tipc_link_iovec_xmit_fast(struct tipc_port *sender,
+ struct iovec const *msg_sect,
+ unsigned int len, u32 destnode);
+void tipc_link_bundle_rcv(struct sk_buff *buf);
+void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob,
+ u32 gap, u32 tolerance, u32 priority, u32 acked_mtu);
+void tipc_link_push_queue(struct tipc_link *l_ptr);
u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
- struct sk_buff *buf);
-void tipc_link_wakeup_ports(struct link *l_ptr, int all);
-void tipc_link_set_queue_limits(struct link *l_ptr, u32 window);
-void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *start, u32 retransmits);
+ struct sk_buff *buf);
+void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all);
+void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window);
+void tipc_link_retransmit(struct tipc_link *l_ptr,
+ struct sk_buff *start, u32 retransmits);
/*
* Link sequence number manipulation routines (uses modulo 2**16 arithmetic)
*/
+static inline u32 buf_seqno(struct sk_buff *buf)
+{
+ return msg_seqno(buf_msg(buf));
+}
static inline u32 mod(u32 x)
{
@@ -287,33 +286,27 @@ static inline u32 lesser(u32 left, u32 right)
/*
* Link status checking routines
*/
-
-static inline int link_working_working(struct link *l_ptr)
+static inline int link_working_working(struct tipc_link *l_ptr)
{
return l_ptr->state == WORKING_WORKING;
}
-static inline int link_working_unknown(struct link *l_ptr)
+static inline int link_working_unknown(struct tipc_link *l_ptr)
{
return l_ptr->state == WORKING_UNKNOWN;
}
-static inline int link_reset_unknown(struct link *l_ptr)
+static inline int link_reset_unknown(struct tipc_link *l_ptr)
{
return l_ptr->state == RESET_UNKNOWN;
}
-static inline int link_reset_reset(struct link *l_ptr)
+static inline int link_reset_reset(struct tipc_link *l_ptr)
{
return l_ptr->state == RESET_RESET;
}
-static inline int link_blocked(struct link *l_ptr)
-{
- return l_ptr->exp_msg_count || l_ptr->blocked;
-}
-
-static inline int link_congested(struct link *l_ptr)
+static inline int link_congested(struct tipc_link *l_ptr)
{
return l_ptr->out_queue_size >= l_ptr->queue_limit[0];
}
diff --git a/net/tipc/user_reg.h b/net/tipc/log.c
index 109eed0d6de..abef644f27d 100644
--- a/net/tipc/user_reg.h
+++ b/net/tipc/log.c
@@ -1,8 +1,8 @@
/*
- * net/tipc/user_reg.h: Include file for TIPC user registry code
+ * net/tipc/log.c: TIPC print buffer routines for debugging
*
- * Copyright (c) 2000-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 1996-2006, Ericsson AB
+ * Copyright (c) 2005-2007, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -34,18 +34,22 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef _TIPC_USER_REG_H
-#define _TIPC_USER_REG_H
+#include "core.h"
+#include "config.h"
-#include "port.h"
-
-int tipc_reg_start(void);
-void tipc_reg_stop(void);
-
-int tipc_attach(unsigned int *userref);
-void tipc_detach(unsigned int userref);
-
-int tipc_reg_add_port(struct user_port *up_ptr);
-int tipc_reg_remove_port(struct user_port *up_ptr);
+/**
+ * tipc_snprintf - append formatted output to print buffer
+ * @buf: pointer to print buffer
+ * @len: buffer length
+ * @fmt: formatted info to be printed
+ */
+int tipc_snprintf(char *buf, int len, const char *fmt, ...)
+{
+ int i;
+ va_list args;
-#endif
+ va_start(args, fmt);
+ i = vscnprintf(buf, len, fmt, args);
+ va_end(args);
+ return i;
+}
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index ee6b4c68d4a..0a37a472c29 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -1,8 +1,8 @@
/*
* net/tipc/msg.c: TIPC message header routines
*
- * Copyright (c) 2000-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2000-2006, 2014, Ericsson AB
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -35,7 +35,6 @@
*/
#include "core.h"
-#include "addr.h"
#include "msg.h"
u32 tipc_msg_tot_importance(struct tipc_msg *m)
@@ -52,8 +51,8 @@ u32 tipc_msg_tot_importance(struct tipc_msg *m)
}
-void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type,
- u32 hsize, u32 destnode)
+void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize,
+ u32 destnode)
{
memset(m, 0, hsize);
msg_set_version(m);
@@ -62,24 +61,8 @@ void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type,
msg_set_size(m, hsize);
msg_set_prevnode(m, tipc_own_addr);
msg_set_type(m, type);
- if (!msg_short(m)) {
- msg_set_orignode(m, tipc_own_addr);
- msg_set_destnode(m, destnode);
- }
-}
-
-/**
- * tipc_msg_calc_data_size - determine total data size for message
- */
-
-int tipc_msg_calc_data_size(struct iovec const *msg_sect, u32 num_sect)
-{
- int dsz = 0;
- int i;
-
- for (i = 0; i < num_sect; i++)
- dsz += msg_sect[i].iov_len;
- return dsz;
+ msg_set_orignode(m, tipc_own_addr);
+ msg_set_destnode(m, destnode);
}
/**
@@ -89,20 +72,14 @@ int tipc_msg_calc_data_size(struct iovec const *msg_sect, u32 num_sect)
*
* Returns message data size or errno
*/
-
-int tipc_msg_build(struct tipc_msg *hdr,
- struct iovec const *msg_sect, u32 num_sect,
- int max_size, int usrmem, struct sk_buff** buf)
+int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
+ unsigned int len, int max_size, struct sk_buff **buf)
{
- int dsz, sz, hsz, pos, res, cnt;
+ int dsz, sz, hsz;
+ unsigned char *to;
- dsz = tipc_msg_calc_data_size(msg_sect, num_sect);
- if (unlikely(dsz > TIPC_MAX_USER_MSG_SIZE)) {
- *buf = NULL;
- return -EINVAL;
- }
-
- pos = hsz = msg_hdr_sz(hdr);
+ dsz = len;
+ hsz = msg_hdr_sz(hdr);
sz = hsz + dsz;
msg_set_size(hdr, sz);
if (unlikely(sz > max_size)) {
@@ -114,303 +91,69 @@ int tipc_msg_build(struct tipc_msg *hdr,
if (!(*buf))
return -ENOMEM;
skb_copy_to_linear_data(*buf, hdr, hsz);
- for (res = 1, cnt = 0; res && (cnt < num_sect); cnt++) {
- if (likely(usrmem))
- res = !copy_from_user((*buf)->data + pos,
- msg_sect[cnt].iov_base,
- msg_sect[cnt].iov_len);
- else
- skb_copy_to_linear_data_offset(*buf, pos,
- msg_sect[cnt].iov_base,
- msg_sect[cnt].iov_len);
- pos += msg_sect[cnt].iov_len;
+ to = (*buf)->data + hsz;
+ if (len && memcpy_fromiovecend(to, msg_sect, 0, dsz)) {
+ kfree_skb(*buf);
+ *buf = NULL;
+ return -EFAULT;
}
- if (likely(res))
- return dsz;
-
- buf_discard(*buf);
- *buf = NULL;
- return -EFAULT;
+ return dsz;
}
-#ifdef CONFIG_TIPC_DEBUG
-
-void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str)
+/* tipc_buf_append(): Append a buffer to the fragment list of another buffer
+ * @*headbuf: in: NULL for first frag, otherwise value returned from prev call
+ * out: set when successful non-complete reassembly, otherwise NULL
+ * @*buf: in: the buffer to append. Always defined
+ * out: head buf after sucessful complete reassembly, otherwise NULL
+ * Returns 1 when reassembly complete, otherwise 0
+ */
+int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
{
- u32 usr = msg_user(msg);
- tipc_printf(buf, str);
-
- switch (usr) {
- case MSG_BUNDLER:
- tipc_printf(buf, "BNDL::");
- tipc_printf(buf, "MSGS(%u):", msg_msgcnt(msg));
- break;
- case BCAST_PROTOCOL:
- tipc_printf(buf, "BCASTP::");
- break;
- case MSG_FRAGMENTER:
- tipc_printf(buf, "FRAGM::");
- switch (msg_type(msg)) {
- case FIRST_FRAGMENT:
- tipc_printf(buf, "FIRST:");
- break;
- case FRAGMENT:
- tipc_printf(buf, "BODY:");
- break;
- case LAST_FRAGMENT:
- tipc_printf(buf, "LAST:");
- break;
- default:
- tipc_printf(buf, "UNKNOWN:%x",msg_type(msg));
-
- }
- tipc_printf(buf, "NO(%u/%u):",msg_long_msgno(msg),
- msg_fragm_no(msg));
- break;
- case TIPC_LOW_IMPORTANCE:
- case TIPC_MEDIUM_IMPORTANCE:
- case TIPC_HIGH_IMPORTANCE:
- case TIPC_CRITICAL_IMPORTANCE:
- tipc_printf(buf, "DAT%u:", msg_user(msg));
- if (msg_short(msg)) {
- tipc_printf(buf, "CON:");
- break;
- }
- switch (msg_type(msg)) {
- case TIPC_CONN_MSG:
- tipc_printf(buf, "CON:");
- break;
- case TIPC_MCAST_MSG:
- tipc_printf(buf, "MCST:");
- break;
- case TIPC_NAMED_MSG:
- tipc_printf(buf, "NAM:");
- break;
- case TIPC_DIRECT_MSG:
- tipc_printf(buf, "DIR:");
- break;
- default:
- tipc_printf(buf, "UNKNOWN TYPE %u",msg_type(msg));
- }
- if (msg_routed(msg) && !msg_non_seq(msg))
- tipc_printf(buf, "ROUT:");
- if (msg_reroute_cnt(msg))
- tipc_printf(buf, "REROUTED(%u):",
- msg_reroute_cnt(msg));
- break;
- case NAME_DISTRIBUTOR:
- tipc_printf(buf, "NMD::");
- switch (msg_type(msg)) {
- case PUBLICATION:
- tipc_printf(buf, "PUBL(%u):", (msg_size(msg) - msg_hdr_sz(msg)) / 20); /* Items */
- break;
- case WITHDRAWAL:
- tipc_printf(buf, "WDRW:");
- break;
- default:
- tipc_printf(buf, "UNKNOWN:%x",msg_type(msg));
- }
- if (msg_routed(msg))
- tipc_printf(buf, "ROUT:");
- if (msg_reroute_cnt(msg))
- tipc_printf(buf, "REROUTED(%u):",
- msg_reroute_cnt(msg));
- break;
- case CONN_MANAGER:
- tipc_printf(buf, "CONN_MNG:");
- switch (msg_type(msg)) {
- case CONN_PROBE:
- tipc_printf(buf, "PROBE:");
- break;
- case CONN_PROBE_REPLY:
- tipc_printf(buf, "PROBE_REPLY:");
- break;
- case CONN_ACK:
- tipc_printf(buf, "CONN_ACK:");
- tipc_printf(buf, "ACK(%u):",msg_msgcnt(msg));
- break;
- default:
- tipc_printf(buf, "UNKNOWN TYPE:%x",msg_type(msg));
- }
- if (msg_routed(msg))
- tipc_printf(buf, "ROUT:");
- if (msg_reroute_cnt(msg))
- tipc_printf(buf, "REROUTED(%u):",msg_reroute_cnt(msg));
- break;
- case LINK_PROTOCOL:
- tipc_printf(buf, "PROT:TIM(%u):",msg_timestamp(msg));
- switch (msg_type(msg)) {
- case STATE_MSG:
- tipc_printf(buf, "STATE:");
- tipc_printf(buf, "%s:",msg_probe(msg) ? "PRB" :"");
- tipc_printf(buf, "NXS(%u):",msg_next_sent(msg));
- tipc_printf(buf, "GAP(%u):",msg_seq_gap(msg));
- tipc_printf(buf, "LSTBC(%u):",msg_last_bcast(msg));
- break;
- case RESET_MSG:
- tipc_printf(buf, "RESET:");
- if (msg_size(msg) != msg_hdr_sz(msg))
- tipc_printf(buf, "BEAR:%s:",msg_data(msg));
- break;
- case ACTIVATE_MSG:
- tipc_printf(buf, "ACTIVATE:");
- break;
- default:
- tipc_printf(buf, "UNKNOWN TYPE:%x",msg_type(msg));
- }
- tipc_printf(buf, "PLANE(%c):",msg_net_plane(msg));
- tipc_printf(buf, "SESS(%u):",msg_session(msg));
- break;
- case CHANGEOVER_PROTOCOL:
- tipc_printf(buf, "TUNL:");
- switch (msg_type(msg)) {
- case DUPLICATE_MSG:
- tipc_printf(buf, "DUPL:");
- break;
- case ORIGINAL_MSG:
- tipc_printf(buf, "ORIG:");
- tipc_printf(buf, "EXP(%u)",msg_msgcnt(msg));
- break;
- default:
- tipc_printf(buf, "UNKNOWN TYPE:%x",msg_type(msg));
- }
- break;
- case ROUTE_DISTRIBUTOR:
- tipc_printf(buf, "ROUTING_MNG:");
- switch (msg_type(msg)) {
- case EXT_ROUTING_TABLE:
- tipc_printf(buf, "EXT_TBL:");
- tipc_printf(buf, "TO:%x:",msg_remote_node(msg));
- break;
- case LOCAL_ROUTING_TABLE:
- tipc_printf(buf, "LOCAL_TBL:");
- tipc_printf(buf, "TO:%x:",msg_remote_node(msg));
- break;
- case SLAVE_ROUTING_TABLE:
- tipc_printf(buf, "DP_TBL:");
- tipc_printf(buf, "TO:%x:",msg_remote_node(msg));
- break;
- case ROUTE_ADDITION:
- tipc_printf(buf, "ADD:");
- tipc_printf(buf, "TO:%x:",msg_remote_node(msg));
- break;
- case ROUTE_REMOVAL:
- tipc_printf(buf, "REMOVE:");
- tipc_printf(buf, "TO:%x:",msg_remote_node(msg));
- break;
- default:
- tipc_printf(buf, "UNKNOWN TYPE:%x",msg_type(msg));
- }
- break;
- case LINK_CONFIG:
- tipc_printf(buf, "CFG:");
- switch (msg_type(msg)) {
- case DSC_REQ_MSG:
- tipc_printf(buf, "DSC_REQ:");
- break;
- case DSC_RESP_MSG:
- tipc_printf(buf, "DSC_RESP:");
- break;
- default:
- tipc_printf(buf, "UNKNOWN TYPE:%x:",msg_type(msg));
- break;
- }
- break;
- default:
- tipc_printf(buf, "UNKNOWN USER:");
- }
-
- switch (usr) {
- case CONN_MANAGER:
- case TIPC_LOW_IMPORTANCE:
- case TIPC_MEDIUM_IMPORTANCE:
- case TIPC_HIGH_IMPORTANCE:
- case TIPC_CRITICAL_IMPORTANCE:
- switch (msg_errcode(msg)) {
- case TIPC_OK:
- break;
- case TIPC_ERR_NO_NAME:
- tipc_printf(buf, "NO_NAME:");
- break;
- case TIPC_ERR_NO_PORT:
- tipc_printf(buf, "NO_PORT:");
- break;
- case TIPC_ERR_NO_NODE:
- tipc_printf(buf, "NO_PROC:");
- break;
- case TIPC_ERR_OVERLOAD:
- tipc_printf(buf, "OVERLOAD:");
- break;
- case TIPC_CONN_SHUTDOWN:
- tipc_printf(buf, "SHUTDOWN:");
- break;
- default:
- tipc_printf(buf, "UNKNOWN ERROR(%x):",
- msg_errcode(msg));
- }
- default:{}
- }
-
- tipc_printf(buf, "HZ(%u):", msg_hdr_sz(msg));
- tipc_printf(buf, "SZ(%u):", msg_size(msg));
- tipc_printf(buf, "SQNO(%u):", msg_seqno(msg));
-
- if (msg_non_seq(msg))
- tipc_printf(buf, "NOSEQ:");
- else {
- tipc_printf(buf, "ACK(%u):", msg_ack(msg));
- }
- tipc_printf(buf, "BACK(%u):", msg_bcast_ack(msg));
- tipc_printf(buf, "PRND(%x)", msg_prevnode(msg));
-
- if (msg_isdata(msg)) {
- if (msg_named(msg)) {
- tipc_printf(buf, "NTYP(%u):", msg_nametype(msg));
- tipc_printf(buf, "NINST(%u)", msg_nameinst(msg));
- }
- }
-
- if ((usr != LINK_PROTOCOL) && (usr != LINK_CONFIG) &&
- (usr != MSG_BUNDLER)) {
- if (!msg_short(msg)) {
- tipc_printf(buf, ":ORIG(%x:%u):",
- msg_orignode(msg), msg_origport(msg));
- tipc_printf(buf, ":DEST(%x:%u):",
- msg_destnode(msg), msg_destport(msg));
- } else {
- tipc_printf(buf, ":OPRT(%u):", msg_origport(msg));
- tipc_printf(buf, ":DPRT(%u):", msg_destport(msg));
- }
- if (msg_routed(msg) && !msg_non_seq(msg))
- tipc_printf(buf, ":TSEQN(%u)", msg_transp_seqno(msg));
- }
- if (msg_user(msg) == NAME_DISTRIBUTOR) {
- tipc_printf(buf, ":ONOD(%x):", msg_orignode(msg));
- tipc_printf(buf, ":DNOD(%x):", msg_destnode(msg));
- if (msg_routed(msg)) {
- tipc_printf(buf, ":CSEQN(%u)", msg_transp_seqno(msg));
- }
- }
-
- if (msg_user(msg) == LINK_CONFIG) {
- u32* raw = (u32*)msg;
- struct tipc_media_addr* orig = (struct tipc_media_addr*)&raw[5];
- tipc_printf(buf, ":REQL(%u):", msg_req_links(msg));
- tipc_printf(buf, ":DDOM(%x):", msg_dest_domain(msg));
- tipc_printf(buf, ":NETID(%u):", msg_bc_netid(msg));
- tipc_media_addr_printf(buf, orig);
- }
- if (msg_user(msg) == BCAST_PROTOCOL) {
- tipc_printf(buf, "BCNACK:AFTER(%u):", msg_bcgap_after(msg));
- tipc_printf(buf, "TO(%u):", msg_bcgap_to(msg));
+ struct sk_buff *head = *headbuf;
+ struct sk_buff *frag = *buf;
+ struct sk_buff *tail;
+ struct tipc_msg *msg = buf_msg(frag);
+ u32 fragid = msg_type(msg);
+ bool headstolen;
+ int delta;
+
+ skb_pull(frag, msg_hdr_sz(msg));
+
+ if (fragid == FIRST_FRAGMENT) {
+ if (head || skb_unclone(frag, GFP_ATOMIC))
+ goto out_free;
+ head = *headbuf = frag;
+ skb_frag_list_init(head);
+ *buf = NULL;
+ return 0;
}
- tipc_printf(buf, "\n");
- if ((usr == CHANGEOVER_PROTOCOL) && (msg_msgcnt(msg))) {
- tipc_msg_dbg(buf, msg_get_wrapped(msg), " /");
+ if (!head)
+ goto out_free;
+ tail = TIPC_SKB_CB(head)->tail;
+ if (skb_try_coalesce(head, frag, &headstolen, &delta)) {
+ kfree_skb_partial(frag, headstolen);
+ } else {
+ if (!skb_has_frag_list(head))
+ skb_shinfo(head)->frag_list = frag;
+ else
+ tail->next = frag;
+ head->truesize += frag->truesize;
+ head->data_len += frag->len;
+ head->len += frag->len;
+ TIPC_SKB_CB(head)->tail = frag;
}
- if ((usr == MSG_FRAGMENTER) && (msg_type(msg) == FIRST_FRAGMENT)) {
- tipc_msg_dbg(buf, msg_get_wrapped(msg), " /");
+ if (fragid == LAST_FRAGMENT) {
+ *buf = head;
+ TIPC_SKB_CB(head)->tail = NULL;
+ *headbuf = NULL;
+ return 1;
}
+ *buf = NULL;
+ return 0;
+out_free:
+ pr_warn_ratelimited("Unable to build fragment list\n");
+ kfree_skb(*buf);
+ kfree_skb(*headbuf);
+ *buf = *headbuf = NULL;
+ return 0;
}
-
-#endif
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index aee53864d7a..503511903d1 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -1,8 +1,8 @@
/*
* net/tipc/msg.h: Include file for TIPC message header routines
*
- * Copyright (c) 2000-2007, Ericsson AB
- * Copyright (c) 2005-2008, Wind River Systems
+ * Copyright (c) 2000-2007, 2014, Ericsson AB
+ * Copyright (c) 2005-2008, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -39,59 +39,44 @@
#include "bearer.h"
+/*
+ * Constants and routines used to read and write TIPC payload message headers
+ *
+ * Note: Some items are also used with TIPC internal message headers
+ */
#define TIPC_VERSION 2
/*
- * TIPC user data message header format, version 2:
- *
- *
- * 1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * w0:|vers | user |hdr sz |n|d|s|-| message size |
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * w1:|mstyp| error |rer cnt|lsc|opt p| broadcast ack no |
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * w2:| link level ack no | broadcast/link level seq no |
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * w3:| previous node |
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * w4:| originating port |
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * w5:| destination port |
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * w6:| originating node |
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * w7:| destination node |
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * w8:| name type / transport sequence number |
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * w9:| name instance/multicast lower bound |
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * wA:| multicast upper bound |
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * / /
- * \ options \
- * / /
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- *
+ * Payload message users are defined in TIPC's public API:
+ * - TIPC_LOW_IMPORTANCE
+ * - TIPC_MEDIUM_IMPORTANCE
+ * - TIPC_HIGH_IMPORTANCE
+ * - TIPC_CRITICAL_IMPORTANCE
*/
+/*
+ * Payload message types
+ */
#define TIPC_CONN_MSG 0
#define TIPC_MCAST_MSG 1
#define TIPC_NAMED_MSG 2
#define TIPC_DIRECT_MSG 3
-
-#define SHORT_H_SIZE 24 /* Connected, in-cluster messages */
-#define DIR_MSG_H_SIZE 32 /* Directly addressed messages */
-#define LONG_H_SIZE 40 /* Named messages */
-#define MCAST_H_SIZE 44 /* Multicast messages */
+/*
+ * Message header sizes
+ */
+#define SHORT_H_SIZE 24 /* In-cluster basic payload message */
+#define BASIC_H_SIZE 32 /* Basic payload message */
+#define NAMED_H_SIZE 40 /* Named payload message */
+#define MCAST_H_SIZE 44 /* Multicast payload message */
#define INT_H_SIZE 40 /* Internal messages */
#define MIN_H_SIZE 24 /* Smallest legal TIPC header size */
#define MAX_H_SIZE 60 /* Largest possible TIPC header size */
#define MAX_MSG_SIZE (MAX_H_SIZE + TIPC_MAX_USER_MSG_SIZE)
+#define TIPC_MEDIA_ADDR_OFFSET 5
+
struct tipc_msg {
__be32 hdr[15];
@@ -133,7 +118,6 @@ static inline void msg_swap_words(struct tipc_msg *msg, u32 a, u32 b)
/*
* Word 0
*/
-
static inline u32 msg_version(struct tipc_msg *m)
{
return msg_bits(m, 0, 29, 7);
@@ -174,7 +158,7 @@ static inline u32 msg_hdr_sz(struct tipc_msg *m)
return msg_bits(m, 0, 21, 0xf) << 2;
}
-static inline void msg_set_hdr_sz(struct tipc_msg *m,u32 n)
+static inline void msg_set_hdr_sz(struct tipc_msg *m, u32 n)
{
msg_set_bits(m, 0, 21, 0xf, n>>2);
}
@@ -228,7 +212,6 @@ static inline void msg_set_size(struct tipc_msg *m, u32 sz)
/*
* Word 1
*/
-
static inline u32 msg_type(struct tipc_msg *m)
{
return msg_bits(m, 1, 29, 0x7);
@@ -303,7 +286,6 @@ static inline void msg_set_bcast_ack(struct tipc_msg *m, u32 n)
/*
* Word 2
*/
-
static inline u32 msg_ack(struct tipc_msg *m)
{
return msg_bits(m, 2, 16, 0xffff);
@@ -325,30 +307,8 @@ static inline void msg_set_seqno(struct tipc_msg *m, u32 n)
}
/*
- * TIPC may utilize the "link ack #" and "link seq #" fields of a short
- * message header to hold the destination node for the message, since the
- * normal "dest node" field isn't present. This cache is only referenced
- * when required, so populating the cache of a longer message header is
- * harmless (as long as the header has the two link sequence fields present).
- *
- * Note: Host byte order is OK here, since the info never goes off-card.
- */
-
-static inline u32 msg_destnode_cache(struct tipc_msg *m)
-{
- return m->hdr[2];
-}
-
-static inline void msg_set_destnode_cache(struct tipc_msg *m, u32 dnode)
-{
- m->hdr[2] = dnode;
-}
-
-/*
* Words 3-10
*/
-
-
static inline u32 msg_prevnode(struct tipc_msg *m)
{
return msg_word(m, 3);
@@ -391,7 +351,7 @@ static inline void msg_set_mc_netid(struct tipc_msg *m, u32 p)
static inline int msg_short(struct tipc_msg *m)
{
- return msg_hdr_sz(m) == 24;
+ return msg_hdr_sz(m) == SHORT_H_SIZE;
}
static inline u32 msg_orignode(struct tipc_msg *m)
@@ -416,18 +376,6 @@ static inline void msg_set_destnode(struct tipc_msg *m, u32 a)
msg_set_word(m, 7, a);
}
-static inline int msg_is_dest(struct tipc_msg *m, u32 d)
-{
- return msg_short(m) || (msg_destnode(m) == d);
-}
-
-static inline u32 msg_routed(struct tipc_msg *m)
-{
- if (likely(msg_short(m)))
- return 0;
- return(msg_destnode(m) ^ msg_orignode(m)) >> 11;
-}
-
static inline u32 msg_nametype(struct tipc_msg *m)
{
return msg_word(m, 8);
@@ -438,26 +386,6 @@ static inline void msg_set_nametype(struct tipc_msg *m, u32 n)
msg_set_word(m, 8, n);
}
-static inline u32 msg_transp_seqno(struct tipc_msg *m)
-{
- return msg_word(m, 8);
-}
-
-static inline void msg_set_timestamp(struct tipc_msg *m, u32 n)
-{
- msg_set_word(m, 8, n);
-}
-
-static inline u32 msg_timestamp(struct tipc_msg *m)
-{
- return msg_word(m, 8);
-}
-
-static inline void msg_set_transp_seqno(struct tipc_msg *m, u32 n)
-{
- msg_set_word(m, 8, n);
-}
-
static inline u32 msg_nameinst(struct tipc_msg *m)
{
return msg_word(m, 9);
@@ -498,75 +426,66 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
return (struct tipc_msg *)msg_data(m);
}
-
/*
- TIPC internal message header format, version 2
-
- 1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- w0:|vers |msg usr|hdr sz |n|resrv| packet size |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- w1:|m typ| sequence gap | broadcast ack no |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- w2:| link level ack no/bc_gap_from | seq no / bcast_gap_to |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- w3:| previous node |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- w4:| next sent broadcast/fragm no | next sent pkt/ fragm msg no |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- w5:| session no |rsv=0|r|berid|link prio|netpl|p|
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- w6:| originating node |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- w7:| destination node |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- w8:| transport sequence number |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- w9:| msg count / bcast tag | link tolerance |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- \ \
- / User Specific Data /
- \ \
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-
- NB: CONN_MANAGER use data message format. LINK_CONFIG has own format.
-*/
+ * Constants and routines used to read and write TIPC internal message headers
+ */
/*
- * Internal users
+ * Internal message users
*/
-
#define BCAST_PROTOCOL 5
#define MSG_BUNDLER 6
#define LINK_PROTOCOL 7
#define CONN_MANAGER 8
-#define ROUTE_DISTRIBUTOR 9
+#define ROUTE_DISTRIBUTOR 9 /* obsoleted */
#define CHANGEOVER_PROTOCOL 10
#define NAME_DISTRIBUTOR 11
#define MSG_FRAGMENTER 12
#define LINK_CONFIG 13
-#define DSC_H_SIZE 40
/*
- * Connection management protocol messages
+ * Connection management protocol message types
*/
-
#define CONN_PROBE 0
#define CONN_PROBE_REPLY 1
#define CONN_ACK 2
/*
- * Name distributor messages
+ * Name distributor message types
*/
-
#define PUBLICATION 0
#define WITHDRAWAL 1
+/*
+ * Segmentation message types
+ */
+#define FIRST_FRAGMENT 0
+#define FRAGMENT 1
+#define LAST_FRAGMENT 2
+
+/*
+ * Link management protocol message types
+ */
+#define STATE_MSG 0
+#define RESET_MSG 1
+#define ACTIVATE_MSG 2
/*
- * Word 1
+ * Changeover tunnel message types
*/
+#define DUPLICATE_MSG 0
+#define ORIGINAL_MSG 1
+/*
+ * Config protocol message types
+ */
+#define DSC_REQ_MSG 0
+#define DSC_RESP_MSG 1
+
+
+/*
+ * Word 1
+ */
static inline u32 msg_seq_gap(struct tipc_msg *m)
{
return msg_bits(m, 1, 16, 0x1fff);
@@ -577,21 +496,20 @@ static inline void msg_set_seq_gap(struct tipc_msg *m, u32 n)
msg_set_bits(m, 1, 16, 0x1fff, n);
}
-static inline u32 msg_req_links(struct tipc_msg *m)
+static inline u32 msg_node_sig(struct tipc_msg *m)
{
- return msg_bits(m, 1, 16, 0xfff);
+ return msg_bits(m, 1, 0, 0xffff);
}
-static inline void msg_set_req_links(struct tipc_msg *m, u32 n)
+static inline void msg_set_node_sig(struct tipc_msg *m, u32 n)
{
- msg_set_bits(m, 1, 16, 0xfff, n);
+ msg_set_bits(m, 1, 0, 0xffff, n);
}
/*
* Word 2
*/
-
static inline u32 msg_dest_domain(struct tipc_msg *m)
{
return msg_word(m, 2);
@@ -626,7 +544,6 @@ static inline void msg_set_bcgap_to(struct tipc_msg *m, u32 n)
/*
* Word 4
*/
-
static inline u32 msg_last_bcast(struct tipc_msg *m)
{
return msg_bits(m, 4, 16, 0xffff);
@@ -637,12 +554,6 @@ static inline void msg_set_last_bcast(struct tipc_msg *m, u32 n)
msg_set_bits(m, 4, 16, 0xffff, n);
}
-
-static inline u32 msg_fragm_no(struct tipc_msg *m)
-{
- return msg_bits(m, 4, 16, 0xffff);
-}
-
static inline void msg_set_fragm_no(struct tipc_msg *m, u32 n)
{
msg_set_bits(m, 4, 16, 0xffff, n);
@@ -659,12 +570,6 @@ static inline void msg_set_next_sent(struct tipc_msg *m, u32 n)
msg_set_bits(m, 4, 0, 0xffff, n);
}
-
-static inline u32 msg_long_msgno(struct tipc_msg *m)
-{
- return msg_bits(m, 4, 0, 0xffff);
-}
-
static inline void msg_set_long_msgno(struct tipc_msg *m, u32 n)
{
msg_set_bits(m, 4, 0, 0xffff, n);
@@ -687,13 +592,12 @@ static inline u32 msg_link_selector(struct tipc_msg *m)
static inline void msg_set_link_selector(struct tipc_msg *m, u32 n)
{
- msg_set_bits(m, 4, 0, 1, (n & 1));
+ msg_set_bits(m, 4, 0, 1, n);
}
/*
* Word 5
*/
-
static inline u32 msg_session(struct tipc_msg *m)
{
return msg_bits(m, 5, 16, 0xffff);
@@ -711,7 +615,7 @@ static inline u32 msg_probe(struct tipc_msg *m)
static inline void msg_set_probe(struct tipc_msg *m, u32 val)
{
- msg_set_bits(m, 5, 0, 1, (val & 1));
+ msg_set_bits(m, 5, 0, 1, val);
}
static inline char msg_net_plane(struct tipc_msg *m)
@@ -749,21 +653,19 @@ static inline u32 msg_redundant_link(struct tipc_msg *m)
return msg_bits(m, 5, 12, 0x1);
}
-static inline void msg_set_redundant_link(struct tipc_msg *m)
+static inline void msg_set_redundant_link(struct tipc_msg *m, u32 r)
{
- msg_set_bits(m, 5, 12, 0x1, 1);
+ msg_set_bits(m, 5, 12, 0x1, r);
}
-static inline void msg_clear_redundant_link(struct tipc_msg *m)
+static inline char *msg_media_addr(struct tipc_msg *m)
{
- msg_set_bits(m, 5, 12, 0x1, 0);
+ return (char *)&m->hdr[TIPC_MEDIA_ADDR_OFFSET];
}
-
/*
* Word 9
*/
-
static inline u32 msg_msgcnt(struct tipc_msg *m)
{
return msg_bits(m, 9, 16, 0xffff);
@@ -804,80 +706,12 @@ static inline void msg_set_link_tolerance(struct tipc_msg *m, u32 n)
msg_set_bits(m, 9, 0, 0xffff, n);
}
-/*
- * Routing table message data
- */
-
-
-static inline u32 msg_remote_node(struct tipc_msg *m)
-{
- return msg_word(m, msg_hdr_sz(m)/4);
-}
-
-static inline void msg_set_remote_node(struct tipc_msg *m, u32 a)
-{
- msg_set_word(m, msg_hdr_sz(m)/4, a);
-}
-
-static inline void msg_set_dataoctet(struct tipc_msg *m, u32 pos)
-{
- msg_data(m)[pos + 4] = 1;
-}
-
-/*
- * Segmentation message types
- */
-
-#define FIRST_FRAGMENT 0
-#define FRAGMENT 1
-#define LAST_FRAGMENT 2
-
-/*
- * Link management protocol message types
- */
-
-#define STATE_MSG 0
-#define RESET_MSG 1
-#define ACTIVATE_MSG 2
-
-/*
- * Changeover tunnel message types
- */
-#define DUPLICATE_MSG 0
-#define ORIGINAL_MSG 1
-
-/*
- * Routing table message types
- */
-#define EXT_ROUTING_TABLE 0
-#define LOCAL_ROUTING_TABLE 1
-#define SLAVE_ROUTING_TABLE 2
-#define ROUTE_ADDITION 3
-#define ROUTE_REMOVAL 4
-
-/*
- * Config protocol message types
- */
-
-#define DSC_REQ_MSG 0
-#define DSC_RESP_MSG 1
-
u32 tipc_msg_tot_importance(struct tipc_msg *m);
-void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type,
- u32 hsize, u32 destnode);
-int tipc_msg_calc_data_size(struct iovec const *msg_sect, u32 num_sect);
-int tipc_msg_build(struct tipc_msg *hdr,
- struct iovec const *msg_sect, u32 num_sect,
- int max_size, int usrmem, struct sk_buff** buf);
-
-static inline void msg_set_media_addr(struct tipc_msg *m, struct tipc_media_addr *a)
-{
- memcpy(&((int *)m)[5], a, sizeof(*a));
-}
+void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize,
+ u32 destnode);
+int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
+ unsigned int len, int max_size, struct sk_buff **buf);
-static inline void msg_get_media_addr(struct tipc_msg *m, struct tipc_media_addr *a)
-{
- memcpy(a, &((int*)m)[5], sizeof(*a));
-}
+int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf);
#endif
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 10ff48be3c0..8ce730984aa 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -2,7 +2,7 @@
* net/tipc/name_distr.c: TIPC name distribution code
*
* Copyright (c) 2000-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -35,51 +35,45 @@
*/
#include "core.h"
-#include "cluster.h"
#include "link.h"
#include "name_distr.h"
-#define ITEM_SIZE sizeof(struct distr_item)
-
/**
- * struct distr_item - publication info distributed to other nodes
- * @type: name sequence type
- * @lower: name sequence lower bound
- * @upper: name sequence upper bound
- * @ref: publishing port reference
- * @key: publication key
- *
- * ===> All fields are stored in network byte order. <===
- *
- * First 3 fields identify (name or) name sequence being published.
- * Reference field uniquely identifies port that published name sequence.
- * Key field uniquely identifies publication, in the event a port has
- * multiple publications of the same name sequence.
- *
- * Note: There is no field that identifies the publishing node because it is
- * the same for all items contained within a publication message.
+ * struct publ_list - list of publications made by this node
+ * @list: circular list of publications
+ * @list_size: number of entries in list
*/
+struct publ_list {
+ struct list_head list;
+ u32 size;
+};
-struct distr_item {
- __be32 type;
- __be32 lower;
- __be32 upper;
- __be32 ref;
- __be32 key;
+static struct publ_list publ_zone = {
+ .list = LIST_HEAD_INIT(publ_zone.list),
+ .size = 0,
};
-/**
- * List of externally visible publications by this node --
- * that is, all publications having scope > TIPC_NODE_SCOPE.
- */
+static struct publ_list publ_cluster = {
+ .list = LIST_HEAD_INIT(publ_cluster.list),
+ .size = 0,
+};
+
+static struct publ_list publ_node = {
+ .list = LIST_HEAD_INIT(publ_node.list),
+ .size = 0,
+};
+
+static struct publ_list *publ_lists[] = {
+ NULL,
+ &publ_zone, /* publ_lists[TIPC_ZONE_SCOPE] */
+ &publ_cluster, /* publ_lists[TIPC_CLUSTER_SCOPE] */
+ &publ_node /* publ_lists[TIPC_NODE_SCOPE] */
+};
-static LIST_HEAD(publ_root);
-static u32 publ_cnt = 0;
/**
* publ_to_item - add publication info to a publication message
*/
-
static void publ_to_item(struct distr_item *i, struct publication *p)
{
i->type = htonl(p->type);
@@ -87,100 +81,120 @@ static void publ_to_item(struct distr_item *i, struct publication *p)
i->upper = htonl(p->upper);
i->ref = htonl(p->ref);
i->key = htonl(p->key);
- dbg("publ_to_item: %u, %u, %u\n", p->type, p->lower, p->upper);
}
/**
* named_prepare_buf - allocate & initialize a publication message
*/
-
static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
{
- struct sk_buff *buf = tipc_buf_acquire(LONG_H_SIZE + size);
+ struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size);
struct tipc_msg *msg;
if (buf != NULL) {
msg = buf_msg(buf);
- tipc_msg_init(msg, NAME_DISTRIBUTOR, type, LONG_H_SIZE, dest);
- msg_set_size(msg, LONG_H_SIZE + size);
+ tipc_msg_init(msg, NAME_DISTRIBUTOR, type, INT_H_SIZE, dest);
+ msg_set_size(msg, INT_H_SIZE + size);
}
return buf;
}
+void named_cluster_distribute(struct sk_buff *buf)
+{
+ struct sk_buff *buf_copy;
+ struct tipc_node *n_ptr;
+ struct tipc_link *l_ptr;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
+ tipc_node_lock(n_ptr);
+ l_ptr = n_ptr->active_links[n_ptr->addr & 1];
+ if (l_ptr) {
+ buf_copy = skb_copy(buf, GFP_ATOMIC);
+ if (!buf_copy) {
+ tipc_node_unlock(n_ptr);
+ break;
+ }
+ msg_set_destnode(buf_msg(buf_copy), n_ptr->addr);
+ __tipc_link_xmit(l_ptr, buf_copy);
+ }
+ tipc_node_unlock(n_ptr);
+ }
+ rcu_read_unlock();
+
+ kfree_skb(buf);
+}
+
/**
* tipc_named_publish - tell other nodes about a new publication by this node
*/
-
-void tipc_named_publish(struct publication *publ)
+struct sk_buff *tipc_named_publish(struct publication *publ)
{
struct sk_buff *buf;
struct distr_item *item;
- list_add_tail(&publ->local_list, &publ_root);
- publ_cnt++;
+ list_add_tail(&publ->local_list, &publ_lists[publ->scope]->list);
+ publ_lists[publ->scope]->size++;
+
+ if (publ->scope == TIPC_NODE_SCOPE)
+ return NULL;
buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0);
if (!buf) {
- warn("Publication distribution failure\n");
- return;
+ pr_warn("Publication distribution failure\n");
+ return NULL;
}
item = (struct distr_item *)msg_data(buf_msg(buf));
publ_to_item(item, publ);
- dbg("tipc_named_withdraw: broadcasting publish msg\n");
- tipc_cltr_broadcast(buf);
+ return buf;
}
/**
* tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
*/
-
-void tipc_named_withdraw(struct publication *publ)
+struct sk_buff *tipc_named_withdraw(struct publication *publ)
{
struct sk_buff *buf;
struct distr_item *item;
list_del(&publ->local_list);
- publ_cnt--;
+ publ_lists[publ->scope]->size--;
+
+ if (publ->scope == TIPC_NODE_SCOPE)
+ return NULL;
buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0);
if (!buf) {
- warn("Withdrawl distribution failure\n");
- return;
+ pr_warn("Withdrawal distribution failure\n");
+ return NULL;
}
item = (struct distr_item *)msg_data(buf_msg(buf));
publ_to_item(item, publ);
- dbg("tipc_named_withdraw: broadcasting withdraw msg\n");
- tipc_cltr_broadcast(buf);
+ return buf;
}
-/**
- * tipc_named_node_up - tell specified node about all publications by this node
+/*
+ * named_distribute - prepare name info for bulk distribution to another node
*/
-
-void tipc_named_node_up(unsigned long node)
+static void named_distribute(struct list_head *message_list, u32 node,
+ struct publ_list *pls, u32 max_item_buf)
{
struct publication *publ;
- struct distr_item *item = NULL;
struct sk_buff *buf = NULL;
+ struct distr_item *item = NULL;
u32 left = 0;
- u32 rest;
- u32 max_item_buf;
+ u32 rest = pls->size * ITEM_SIZE;
- read_lock_bh(&tipc_nametbl_lock);
- max_item_buf = TIPC_MAX_USER_MSG_SIZE / ITEM_SIZE;
- max_item_buf *= ITEM_SIZE;
- rest = publ_cnt * ITEM_SIZE;
-
- list_for_each_entry(publ, &publ_root, local_list) {
+ list_for_each_entry(publ, &pls->list, local_list) {
if (!buf) {
left = (rest <= max_item_buf) ? rest : max_item_buf;
rest -= left;
buf = named_prepare_buf(PUBLICATION, left, node);
if (!buf) {
- warn("Bulk publication distribution failure\n");
- goto exit;
+ pr_warn("Bulk publication failure\n");
+ return;
}
item = (struct distr_item *)msg_data(buf_msg(buf));
}
@@ -188,57 +202,58 @@ void tipc_named_node_up(unsigned long node)
item++;
left -= ITEM_SIZE;
if (!left) {
- msg_set_link_selector(buf_msg(buf), node);
- dbg("tipc_named_node_up: sending publish msg to "
- "<%u.%u.%u>\n", tipc_zone(node),
- tipc_cluster(node), tipc_node(node));
- tipc_link_send(buf, node, node);
+ list_add_tail((struct list_head *)buf, message_list);
buf = NULL;
}
}
-exit:
+}
+
+/**
+ * tipc_named_node_up - tell specified node about all publications by this node
+ */
+void tipc_named_node_up(u32 max_item_buf, u32 node)
+{
+ LIST_HEAD(message_list);
+
+ read_lock_bh(&tipc_nametbl_lock);
+ named_distribute(&message_list, node, &publ_cluster, max_item_buf);
+ named_distribute(&message_list, node, &publ_zone, max_item_buf);
read_unlock_bh(&tipc_nametbl_lock);
+
+ tipc_link_names_xmit(&message_list, node);
}
/**
- * node_is_down - remove publication associated with a failed node
+ * named_purge_publ - remove publication associated with a failed node
*
* Invoked for each publication issued by a newly failed node.
* Removes publication structure from name table & deletes it.
- * In rare cases the link may have come back up again when this
- * function is called, and we have two items representing the same
- * publication. Nudge this item's key to distinguish it from the other.
- * (Note: Publication's node subscription is already unsubscribed.)
*/
-
-static void node_is_down(struct publication *publ)
+static void named_purge_publ(struct publication *publ)
{
struct publication *p;
write_lock_bh(&tipc_nametbl_lock);
- dbg("node_is_down: withdrawing %u, %u, %u\n",
- publ->type, publ->lower, publ->upper);
- publ->key += 1222345;
p = tipc_nametbl_remove_publ(publ->type, publ->lower,
publ->node, publ->ref, publ->key);
+ if (p)
+ tipc_nodesub_unsubscribe(&p->subscr);
write_unlock_bh(&tipc_nametbl_lock);
if (p != publ) {
- err("Unable to remove publication from failed node\n"
- "(type=%u, lower=%u, node=0x%x, ref=%u, key=%u)\n",
- publ->type, publ->lower, publ->node, publ->ref, publ->key);
+ pr_err("Unable to remove publication from failed node\n"
+ " (type=%u, lower=%u, node=0x%x, ref=%u, key=%u)\n",
+ publ->type, publ->lower, publ->node, publ->ref,
+ publ->key);
}
- if (p) {
- kfree(p);
- }
+ kfree(p);
}
/**
- * tipc_named_recv - process name table update message sent by another node
+ * tipc_named_rcv - process name table update message sent by another node
*/
-
-void tipc_named_recv(struct sk_buff *buf)
+void tipc_named_rcv(struct sk_buff *buf)
{
struct publication *publ;
struct tipc_msg *msg = buf_msg(buf);
@@ -248,9 +263,6 @@ void tipc_named_recv(struct sk_buff *buf)
write_lock_bh(&tipc_nametbl_lock);
while (count--) {
if (msg_type(msg) == PUBLICATION) {
- dbg("tipc_named_recv: got publication for %u, %u, %u\n",
- ntohl(item->type), ntohl(item->lower),
- ntohl(item->upper));
publ = tipc_nametbl_insert_publ(ntohl(item->type),
ntohl(item->lower),
ntohl(item->upper),
@@ -262,12 +274,10 @@ void tipc_named_recv(struct sk_buff *buf)
tipc_nodesub_subscribe(&publ->subscr,
msg_orignode(msg),
publ,
- (net_ev_handler)node_is_down);
+ (net_ev_handler)
+ named_purge_publ);
}
} else if (msg_type(msg) == WITHDRAWAL) {
- dbg("tipc_named_recv: got withdrawl for %u, %u, %u\n",
- ntohl(item->type), ntohl(item->lower),
- ntohl(item->upper));
publ = tipc_nametbl_remove_publ(ntohl(item->type),
ntohl(item->lower),
msg_orignode(msg),
@@ -278,39 +288,38 @@ void tipc_named_recv(struct sk_buff *buf)
tipc_nodesub_unsubscribe(&publ->subscr);
kfree(publ);
} else {
- err("Unable to remove publication by node 0x%x\n"
- "(type=%u, lower=%u, ref=%u, key=%u)\n",
- msg_orignode(msg),
- ntohl(item->type), ntohl(item->lower),
- ntohl(item->ref), ntohl(item->key));
+ pr_err("Unable to remove publication by node 0x%x\n"
+ " (type=%u, lower=%u, ref=%u, key=%u)\n",
+ msg_orignode(msg), ntohl(item->type),
+ ntohl(item->lower), ntohl(item->ref),
+ ntohl(item->key));
}
} else {
- warn("Unrecognized name table message received\n");
+ pr_warn("Unrecognized name table message received\n");
}
item++;
}
write_unlock_bh(&tipc_nametbl_lock);
- buf_discard(buf);
+ kfree_skb(buf);
}
/**
- * tipc_named_reinit - re-initialize local publication list
+ * tipc_named_reinit - re-initialize local publications
*
- * This routine is called whenever TIPC networking is (re)enabled.
- * All existing publications by this node that have "cluster" or "zone" scope
- * are updated to reflect the node's current network address.
- * (If the node's address is unchanged, the update loop terminates immediately.)
+ * This routine is called whenever TIPC networking is enabled.
+ * All name table entries published by this node are updated to reflect
+ * the node's new network address.
*/
-
void tipc_named_reinit(void)
{
struct publication *publ;
+ int scope;
write_lock_bh(&tipc_nametbl_lock);
- list_for_each_entry(publ, &publ_root, local_list) {
- if (publ->node == tipc_own_addr)
- break;
- publ->node = tipc_own_addr;
- }
+
+ for (scope = TIPC_ZONE_SCOPE; scope <= TIPC_NODE_SCOPE; scope++)
+ list_for_each_entry(publ, &publ_lists[scope]->list, local_list)
+ publ->node = tipc_own_addr;
+
write_unlock_bh(&tipc_nametbl_lock);
}
diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h
index 1e41bdd4f25..b2eed4ec152 100644
--- a/net/tipc/name_distr.h
+++ b/net/tipc/name_distr.h
@@ -39,10 +39,39 @@
#include "name_table.h"
-void tipc_named_publish(struct publication *publ);
-void tipc_named_withdraw(struct publication *publ);
-void tipc_named_node_up(unsigned long node);
-void tipc_named_recv(struct sk_buff *buf);
+#define ITEM_SIZE sizeof(struct distr_item)
+
+/**
+ * struct distr_item - publication info distributed to other nodes
+ * @type: name sequence type
+ * @lower: name sequence lower bound
+ * @upper: name sequence upper bound
+ * @ref: publishing port reference
+ * @key: publication key
+ *
+ * ===> All fields are stored in network byte order. <===
+ *
+ * First 3 fields identify (name or) name sequence being published.
+ * Reference field uniquely identifies port that published name sequence.
+ * Key field uniquely identifies publication, in the event a port has
+ * multiple publications of the same name sequence.
+ *
+ * Note: There is no field that identifies the publishing node because it is
+ * the same for all items contained within a publication message.
+ */
+struct distr_item {
+ __be32 type;
+ __be32 lower;
+ __be32 upper;
+ __be32 ref;
+ __be32 key;
+};
+
+struct sk_buff *tipc_named_publish(struct publication *publ);
+struct sk_buff *tipc_named_withdraw(struct publication *publ);
+void named_cluster_distribute(struct sk_buff *buf);
+void tipc_named_node_up(u32 max_item_buf, u32 node);
+void tipc_named_rcv(struct sk_buff *buf);
void tipc_named_reinit(void);
#endif
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index d5adb045674..9d7d37d9518 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -2,7 +2,7 @@
* net/tipc/name_table.c: TIPC name table code
*
* Copyright (c) 2000-2006, Ericsson AB
- * Copyright (c) 2004-2008, Wind River Systems
+ * Copyright (c) 2004-2008, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -41,12 +41,10 @@
#include "subscr.h"
#include "port.h"
-static int tipc_nametbl_size = 1024; /* must be a power of 2 */
+#define TIPC_NAMETBL_SIZE 1024 /* must be a power of 2 */
/**
- * struct sub_seq - container for all published instances of a name sequence
- * @lower: name sequence lower bound
- * @upper: name sequence upper bound
+ * struct name_info - name sequence publication info
* @node_list: circular list of publications made by own node
* @cluster_list: circular list of publications made by own cluster
* @zone_list: circular list of publications made by own zone
@@ -58,16 +56,25 @@ static int tipc_nametbl_size = 1024; /* must be a power of 2 */
* publications of the associated name sequence belong to it.
* (The cluster and node lists may be empty.)
*/
+struct name_info {
+ struct list_head node_list;
+ struct list_head cluster_list;
+ struct list_head zone_list;
+ u32 node_list_size;
+ u32 cluster_list_size;
+ u32 zone_list_size;
+};
+/**
+ * struct sub_seq - container for all published instances of a name sequence
+ * @lower: name sequence lower bound
+ * @upper: name sequence upper bound
+ * @info: pointer to name sequence publication info
+ */
struct sub_seq {
u32 lower;
u32 upper;
- struct publication *node_list;
- struct publication *cluster_list;
- struct publication *zone_list;
- u32 node_list_size;
- u32 cluster_list_size;
- u32 zone_list_size;
+ struct name_info *info;
};
/**
@@ -81,7 +88,6 @@ struct sub_seq {
* @subscriptions: list of subscriptions for this 'type'
* @lock: spinlock controlling access to publication lists of all sub-sequences
*/
-
struct name_seq {
u32 type;
struct sub_seq *sseqs;
@@ -98,33 +104,29 @@ struct name_seq {
* accessed via hashing on 'type'; name sequence lists are *not* sorted
* @local_publ_count: number of publications issued by this node
*/
-
struct name_table {
struct hlist_head *types;
u32 local_publ_count;
};
-static struct name_table table = { NULL } ;
-static atomic_t rsv_publ_ok = ATOMIC_INIT(0);
+static struct name_table table;
DEFINE_RWLOCK(tipc_nametbl_lock);
-
static int hash(int x)
{
- return x & (tipc_nametbl_size - 1);
+ return x & (TIPC_NAMETBL_SIZE - 1);
}
/**
* publ_create - create a publication structure
*/
-
static struct publication *publ_create(u32 type, u32 lower, u32 upper,
u32 scope, u32 node, u32 port_ref,
u32 key)
{
struct publication *publ = kzalloc(sizeof(*publ), GFP_ATOMIC);
if (publ == NULL) {
- warn("Publication creation failure, no memory\n");
+ pr_warn("Publication creation failure, no memory\n");
return NULL;
}
@@ -144,11 +146,9 @@ static struct publication *publ_create(u32 type, u32 lower, u32 upper,
/**
* tipc_subseq_alloc - allocate a specified number of sub-sequence structures
*/
-
static struct sub_seq *tipc_subseq_alloc(u32 cnt)
{
- struct sub_seq *sseq = kcalloc(cnt, sizeof(struct sub_seq), GFP_ATOMIC);
- return sseq;
+ return kcalloc(cnt, sizeof(struct sub_seq), GFP_ATOMIC);
}
/**
@@ -156,14 +156,13 @@ static struct sub_seq *tipc_subseq_alloc(u32 cnt)
*
* Allocates a single sub-sequence structure and sets it to all 0's.
*/
-
static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_head)
{
struct name_seq *nseq = kzalloc(sizeof(*nseq), GFP_ATOMIC);
struct sub_seq *sseq = tipc_subseq_alloc(1);
if (!nseq || !sseq) {
- warn("Name sequence creation failed, no memory\n");
+ pr_warn("Name sequence creation failed, no memory\n");
kfree(nseq);
kfree(sseq);
return NULL;
@@ -172,8 +171,6 @@ static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_hea
spin_lock_init(&nseq->lock);
nseq->type = type;
nseq->sseqs = sseq;
- dbg("tipc_nameseq_create(): nseq = %p, type %u, ssseqs %p, ff: %u\n",
- nseq, type, nseq->sseqs, nseq->first_free);
nseq->alloc = 1;
INIT_HLIST_NODE(&nseq->ns_list);
INIT_LIST_HEAD(&nseq->subscriptions);
@@ -181,12 +178,23 @@ static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_hea
return nseq;
}
+/*
+ * nameseq_delete_empty - deletes a name sequence structure if now unused
+ */
+static void nameseq_delete_empty(struct name_seq *seq)
+{
+ if (!seq->first_free && list_empty(&seq->subscriptions)) {
+ hlist_del_init(&seq->ns_list);
+ kfree(seq->sseqs);
+ kfree(seq);
+ }
+}
+
/**
* nameseq_find_subseq - find sub-sequence (if any) matching a name instance
*
* Very time-critical, so binary searches through sub-sequence array.
*/
-
static struct sub_seq *nameseq_find_subseq(struct name_seq *nseq,
u32 instance)
{
@@ -216,7 +224,6 @@ static struct sub_seq *nameseq_find_subseq(struct name_seq *nseq,
*
* Note: Similar to binary search code for locating a sub-sequence.
*/
-
static u32 nameseq_locate_subseq(struct name_seq *nseq, u32 instance)
{
struct sub_seq *sseqs = nseq->sseqs;
@@ -237,125 +244,111 @@ static u32 nameseq_locate_subseq(struct name_seq *nseq, u32 instance)
}
/**
- * tipc_nameseq_insert_publ -
+ * tipc_nameseq_insert_publ
*/
-
static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
u32 type, u32 lower, u32 upper,
u32 scope, u32 node, u32 port, u32 key)
{
- struct subscription *s;
- struct subscription *st;
+ struct tipc_subscription *s;
+ struct tipc_subscription *st;
struct publication *publ;
struct sub_seq *sseq;
+ struct name_info *info;
int created_subseq = 0;
sseq = nameseq_find_subseq(nseq, lower);
- dbg("nameseq_ins: for seq %p, {%u,%u}, found sseq %p\n",
- nseq, type, lower, sseq);
if (sseq) {
/* Lower end overlaps existing entry => need an exact match */
-
if ((sseq->lower != lower) || (sseq->upper != upper)) {
- warn("Cannot publish {%u,%u,%u}, overlap error\n",
- type, lower, upper);
+ pr_warn("Cannot publish {%u,%u,%u}, overlap error\n",
+ type, lower, upper);
return NULL;
}
+
+ info = sseq->info;
+
+ /* Check if an identical publication already exists */
+ list_for_each_entry(publ, &info->zone_list, zone_list) {
+ if ((publ->ref == port) && (publ->key == key) &&
+ (!publ->node || (publ->node == node)))
+ return NULL;
+ }
} else {
u32 inspos;
struct sub_seq *freesseq;
/* Find where lower end should be inserted */
-
inspos = nameseq_locate_subseq(nseq, lower);
/* Fail if upper end overlaps into an existing entry */
-
if ((inspos < nseq->first_free) &&
(upper >= nseq->sseqs[inspos].lower)) {
- warn("Cannot publish {%u,%u,%u}, overlap error\n",
- type, lower, upper);
+ pr_warn("Cannot publish {%u,%u,%u}, overlap error\n",
+ type, lower, upper);
return NULL;
}
/* Ensure there is space for new sub-sequence */
-
if (nseq->first_free == nseq->alloc) {
struct sub_seq *sseqs = tipc_subseq_alloc(nseq->alloc * 2);
if (!sseqs) {
- warn("Cannot publish {%u,%u,%u}, no memory\n",
- type, lower, upper);
+ pr_warn("Cannot publish {%u,%u,%u}, no memory\n",
+ type, lower, upper);
return NULL;
}
- dbg("Allocated %u more sseqs\n", nseq->alloc);
memcpy(sseqs, nseq->sseqs,
nseq->alloc * sizeof(struct sub_seq));
kfree(nseq->sseqs);
nseq->sseqs = sseqs;
nseq->alloc *= 2;
}
- dbg("Have %u sseqs for type %u\n", nseq->alloc, type);
- /* Insert new sub-sequence */
+ info = kzalloc(sizeof(*info), GFP_ATOMIC);
+ if (!info) {
+ pr_warn("Cannot publish {%u,%u,%u}, no memory\n",
+ type, lower, upper);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&info->node_list);
+ INIT_LIST_HEAD(&info->cluster_list);
+ INIT_LIST_HEAD(&info->zone_list);
- dbg("ins in pos %u, ff = %u\n", inspos, nseq->first_free);
+ /* Insert new sub-sequence */
sseq = &nseq->sseqs[inspos];
freesseq = &nseq->sseqs[nseq->first_free];
- memmove(sseq + 1, sseq, (freesseq - sseq) * sizeof (*sseq));
- memset(sseq, 0, sizeof (*sseq));
+ memmove(sseq + 1, sseq, (freesseq - sseq) * sizeof(*sseq));
+ memset(sseq, 0, sizeof(*sseq));
nseq->first_free++;
sseq->lower = lower;
sseq->upper = upper;
+ sseq->info = info;
created_subseq = 1;
}
- dbg("inserting {%u,%u,%u} from <0x%x:%u> into sseq %p(%u,%u) of seq %p\n",
- type, lower, upper, node, port, sseq,
- sseq->lower, sseq->upper, nseq);
-
- /* Insert a publication: */
+ /* Insert a publication */
publ = publ_create(type, lower, upper, scope, node, port, key);
if (!publ)
return NULL;
- dbg("inserting publ %p, node=0x%x publ->node=0x%x, subscr->node=%p\n",
- publ, node, publ->node, publ->subscr.node);
-
- sseq->zone_list_size++;
- if (!sseq->zone_list)
- sseq->zone_list = publ->zone_list_next = publ;
- else {
- publ->zone_list_next = sseq->zone_list->zone_list_next;
- sseq->zone_list->zone_list_next = publ;
- }
+
+ list_add(&publ->zone_list, &info->zone_list);
+ info->zone_list_size++;
if (in_own_cluster(node)) {
- sseq->cluster_list_size++;
- if (!sseq->cluster_list)
- sseq->cluster_list = publ->cluster_list_next = publ;
- else {
- publ->cluster_list_next =
- sseq->cluster_list->cluster_list_next;
- sseq->cluster_list->cluster_list_next = publ;
- }
+ list_add(&publ->cluster_list, &info->cluster_list);
+ info->cluster_list_size++;
}
- if (node == tipc_own_addr) {
- sseq->node_list_size++;
- if (!sseq->node_list)
- sseq->node_list = publ->node_list_next = publ;
- else {
- publ->node_list_next = sseq->node_list->node_list_next;
- sseq->node_list->node_list_next = publ;
- }
+ if (in_own_node(node)) {
+ list_add(&publ->node_list, &info->node_list);
+ info->node_list_size++;
}
- /*
- * Any subscriptions waiting for notification?
- */
+ /* Any subscriptions waiting for notification? */
list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
- dbg("calling report_overlap()\n");
tipc_subscr_report_overlap(s,
publ->lower,
publ->upper,
@@ -368,7 +361,7 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
}
/**
- * tipc_nameseq_remove_publ -
+ * tipc_nameseq_remove_publ
*
* NOTE: There may be cases where TIPC is asked to remove a publication
* that is not in the name table. For example, if another node issues a
@@ -378,121 +371,55 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
* A failed withdraw request simply returns a failure indication and lets the
* caller issue any error or warning messages associated with such a problem.
*/
-
static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 inst,
u32 node, u32 ref, u32 key)
{
struct publication *publ;
- struct publication *curr;
- struct publication *prev;
struct sub_seq *sseq = nameseq_find_subseq(nseq, inst);
+ struct name_info *info;
struct sub_seq *free;
- struct subscription *s, *st;
+ struct tipc_subscription *s, *st;
int removed_subseq = 0;
if (!sseq)
return NULL;
- dbg("tipc_nameseq_remove_publ: seq: %p, sseq %p, {%u,%u}, key %u\n",
- nseq, sseq, nseq->type, inst, key);
-
- /* Remove publication from zone scope list */
-
- prev = sseq->zone_list;
- publ = sseq->zone_list->zone_list_next;
- while ((publ->key != key) || (publ->ref != ref) ||
- (publ->node && (publ->node != node))) {
- prev = publ;
- publ = publ->zone_list_next;
- if (prev == sseq->zone_list) {
-
- /* Prevent endless loop if publication not found */
+ info = sseq->info;
- return NULL;
- }
- }
- if (publ != sseq->zone_list)
- prev->zone_list_next = publ->zone_list_next;
- else if (publ->zone_list_next != publ) {
- prev->zone_list_next = publ->zone_list_next;
- sseq->zone_list = publ->zone_list_next;
- } else {
- sseq->zone_list = NULL;
+ /* Locate publication, if it exists */
+ list_for_each_entry(publ, &info->zone_list, zone_list) {
+ if ((publ->key == key) && (publ->ref == ref) &&
+ (!publ->node || (publ->node == node)))
+ goto found;
}
- sseq->zone_list_size--;
+ return NULL;
- /* Remove publication from cluster scope list, if present */
+found:
+ /* Remove publication from zone scope list */
+ list_del(&publ->zone_list);
+ info->zone_list_size--;
+ /* Remove publication from cluster scope list, if present */
if (in_own_cluster(node)) {
- prev = sseq->cluster_list;
- curr = sseq->cluster_list->cluster_list_next;
- while (curr != publ) {
- prev = curr;
- curr = curr->cluster_list_next;
- if (prev == sseq->cluster_list) {
-
- /* Prevent endless loop for malformed list */
-
- err("Unable to de-list cluster publication\n"
- "{%u%u}, node=0x%x, ref=%u, key=%u)\n",
- publ->type, publ->lower, publ->node,
- publ->ref, publ->key);
- goto end_cluster;
- }
- }
- if (publ != sseq->cluster_list)
- prev->cluster_list_next = publ->cluster_list_next;
- else if (publ->cluster_list_next != publ) {
- prev->cluster_list_next = publ->cluster_list_next;
- sseq->cluster_list = publ->cluster_list_next;
- } else {
- sseq->cluster_list = NULL;
- }
- sseq->cluster_list_size--;
+ list_del(&publ->cluster_list);
+ info->cluster_list_size--;
}
-end_cluster:
/* Remove publication from node scope list, if present */
-
- if (node == tipc_own_addr) {
- prev = sseq->node_list;
- curr = sseq->node_list->node_list_next;
- while (curr != publ) {
- prev = curr;
- curr = curr->node_list_next;
- if (prev == sseq->node_list) {
-
- /* Prevent endless loop for malformed list */
-
- err("Unable to de-list node publication\n"
- "{%u%u}, node=0x%x, ref=%u, key=%u)\n",
- publ->type, publ->lower, publ->node,
- publ->ref, publ->key);
- goto end_node;
- }
- }
- if (publ != sseq->node_list)
- prev->node_list_next = publ->node_list_next;
- else if (publ->node_list_next != publ) {
- prev->node_list_next = publ->node_list_next;
- sseq->node_list = publ->node_list_next;
- } else {
- sseq->node_list = NULL;
- }
- sseq->node_list_size--;
+ if (in_own_node(node)) {
+ list_del(&publ->node_list);
+ info->node_list_size--;
}
-end_node:
/* Contract subseq list if no more publications for that subseq */
-
- if (!sseq->zone_list) {
+ if (list_empty(&info->zone_list)) {
+ kfree(info);
free = &nseq->sseqs[nseq->first_free--];
- memmove(sseq, sseq + 1, (free - (sseq + 1)) * sizeof (*sseq));
+ memmove(sseq, sseq + 1, (free - (sseq + 1)) * sizeof(*sseq));
removed_subseq = 1;
}
/* Notify any waiting subscriptions */
-
list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
tipc_subscr_report_overlap(s,
publ->lower,
@@ -507,12 +434,12 @@ end_node:
}
/**
- * tipc_nameseq_subscribe: attach a subscription, and issue
+ * tipc_nameseq_subscribe - attach a subscription, and issue
* the prescribed number of events if there is any sub-
* sequence overlapping with the requested sequence
*/
-
-static void tipc_nameseq_subscribe(struct name_seq *nseq, struct subscription *s)
+static void tipc_nameseq_subscribe(struct name_seq *nseq,
+ struct tipc_subscription *s)
{
struct sub_seq *sseq = nseq->sseqs;
@@ -522,12 +449,12 @@ static void tipc_nameseq_subscribe(struct name_seq *nseq, struct subscription *s
return;
while (sseq != &nseq->sseqs[nseq->first_free]) {
- struct publication *zl = sseq->zone_list;
- if (zl && tipc_subscr_overlap(s,sseq->lower,sseq->upper)) {
- struct publication *crs = zl;
+ if (tipc_subscr_overlap(s, sseq->lower, sseq->upper)) {
+ struct publication *crs;
+ struct name_info *info = sseq->info;
int must_report = 1;
- do {
+ list_for_each_entry(crs, &info->zone_list, zone_list) {
tipc_subscr_report_overlap(s,
sseq->lower,
sseq->upper,
@@ -536,8 +463,7 @@ static void tipc_nameseq_subscribe(struct name_seq *nseq, struct subscription *s
crs->node,
must_report);
must_report = 0;
- crs = crs->zone_list_next;
- } while (crs != zl);
+ }
}
sseq++;
}
@@ -546,18 +472,12 @@ static void tipc_nameseq_subscribe(struct name_seq *nseq, struct subscription *s
static struct name_seq *nametbl_find_seq(u32 type)
{
struct hlist_head *seq_head;
- struct hlist_node *seq_node;
struct name_seq *ns;
- dbg("find_seq %u,(%u,0x%x) table = %p, hash[type] = %u\n",
- type, htonl(type), type, table.types, hash(type));
-
seq_head = &table.types[hash(type)];
- hlist_for_each_entry(ns, seq_node, seq_head, ns_list) {
- if (ns->type == type) {
- dbg("found %p\n", ns);
+ hlist_for_each_entry(ns, seq_head, ns_list) {
+ if (ns->type == type)
return ns;
- }
}
return NULL;
@@ -568,18 +488,15 @@ struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
{
struct name_seq *seq = nametbl_find_seq(type);
- dbg("tipc_nametbl_insert_publ: {%u,%u,%u} found %p\n", type, lower, upper, seq);
- if (lower > upper) {
- warn("Failed to publish illegal {%u,%u,%u}\n",
- type, lower, upper);
+ if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE) ||
+ (lower > upper)) {
+ pr_debug("Failed to publish illegal {%u,%u,%u} with scope %u\n",
+ type, lower, upper, scope);
return NULL;
}
- dbg("Publishing {%u,%u,%u} from 0x%x\n", type, lower, upper, node);
- if (!seq) {
+ if (!seq)
seq = tipc_nameseq_create(type, &table.types[hash(type)]);
- dbg("tipc_nametbl_insert_publ: created %p\n", seq);
- }
if (!seq)
return NULL;
@@ -596,30 +513,32 @@ struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
if (!seq)
return NULL;
- dbg("Withdrawing {%u,%u} from 0x%x\n", type, lower, node);
publ = tipc_nameseq_remove_publ(seq, lower, node, ref, key);
-
- if (!seq->first_free && list_empty(&seq->subscriptions)) {
- hlist_del_init(&seq->ns_list);
- kfree(seq->sseqs);
- kfree(seq);
- }
+ nameseq_delete_empty(seq);
return publ;
}
-/*
- * tipc_nametbl_translate - translate name to port id
+/**
+ * tipc_nametbl_translate - perform name translation
*
- * Note: on entry 'destnode' is the search domain used during translation;
- * on exit it passes back the node address of the matching port (if any)
+ * On entry, 'destnode' is the search domain used during translation.
+ *
+ * On exit:
+ * - if name translation is deferred to another node/cluster/zone,
+ * leaves 'destnode' unchanged (will be non-zero) and returns 0
+ * - if name translation is attempted and succeeds, sets 'destnode'
+ * to publishing node and returns port reference (will be non-zero)
+ * - if name translation is attempted and fails, sets 'destnode' to 0
+ * and returns 0
*/
-
u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
{
struct sub_seq *sseq;
- struct publication *publ = NULL;
+ struct name_info *info;
+ struct publication *publ;
struct name_seq *seq;
- u32 ref;
+ u32 ref = 0;
+ u32 node = 0;
if (!tipc_in_scope(*destnode, tipc_own_addr))
return 0;
@@ -632,55 +551,58 @@ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
if (unlikely(!sseq))
goto not_found;
spin_lock_bh(&seq->lock);
+ info = sseq->info;
- /* Closest-First Algorithm: */
+ /* Closest-First Algorithm */
if (likely(!*destnode)) {
- publ = sseq->node_list;
- if (publ) {
- sseq->node_list = publ->node_list_next;
-found:
- ref = publ->ref;
- *destnode = publ->node;
- spin_unlock_bh(&seq->lock);
- read_unlock_bh(&tipc_nametbl_lock);
- return ref;
- }
- publ = sseq->cluster_list;
- if (publ) {
- sseq->cluster_list = publ->cluster_list_next;
- goto found;
- }
- publ = sseq->zone_list;
- if (publ) {
- sseq->zone_list = publ->zone_list_next;
- goto found;
+ if (!list_empty(&info->node_list)) {
+ publ = list_first_entry(&info->node_list,
+ struct publication,
+ node_list);
+ list_move_tail(&publ->node_list,
+ &info->node_list);
+ } else if (!list_empty(&info->cluster_list)) {
+ publ = list_first_entry(&info->cluster_list,
+ struct publication,
+ cluster_list);
+ list_move_tail(&publ->cluster_list,
+ &info->cluster_list);
+ } else {
+ publ = list_first_entry(&info->zone_list,
+ struct publication,
+ zone_list);
+ list_move_tail(&publ->zone_list,
+ &info->zone_list);
}
}
- /* Round-Robin Algorithm: */
+ /* Round-Robin Algorithm */
else if (*destnode == tipc_own_addr) {
- publ = sseq->node_list;
- if (publ) {
- sseq->node_list = publ->node_list_next;
- goto found;
- }
- } else if (in_own_cluster(*destnode)) {
- publ = sseq->cluster_list;
- if (publ) {
- sseq->cluster_list = publ->cluster_list_next;
- goto found;
- }
+ if (list_empty(&info->node_list))
+ goto no_match;
+ publ = list_first_entry(&info->node_list, struct publication,
+ node_list);
+ list_move_tail(&publ->node_list, &info->node_list);
+ } else if (in_own_cluster_exact(*destnode)) {
+ if (list_empty(&info->cluster_list))
+ goto no_match;
+ publ = list_first_entry(&info->cluster_list, struct publication,
+ cluster_list);
+ list_move_tail(&publ->cluster_list, &info->cluster_list);
} else {
- publ = sseq->zone_list;
- if (publ) {
- sseq->zone_list = publ->zone_list_next;
- goto found;
- }
+ publ = list_first_entry(&info->zone_list, struct publication,
+ zone_list);
+ list_move_tail(&publ->zone_list, &info->zone_list);
}
+
+ ref = publ->ref;
+ node = publ->node;
+no_match:
spin_unlock_bh(&seq->lock);
not_found:
read_unlock_bh(&tipc_nametbl_lock);
- return 0;
+ *destnode = node;
+ return ref;
}
/**
@@ -695,13 +617,13 @@ not_found:
*
* Returns non-zero if any off-node ports overlap
*/
-
int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
- struct port_list *dports)
+ struct tipc_port_list *dports)
{
struct name_seq *seq;
struct sub_seq *sseq;
struct sub_seq *sseq_stop;
+ struct name_info *info;
int res = 0;
read_lock_bh(&tipc_nametbl_lock);
@@ -719,16 +641,13 @@ int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
if (sseq->lower > upper)
break;
- publ = sseq->node_list;
- if (publ) {
- do {
- if (publ->scope <= limit)
- tipc_port_list_add(dports, publ->ref);
- publ = publ->node_list_next;
- } while (publ != sseq->node_list);
+ info = sseq->info;
+ list_for_each_entry(publ, &info->node_list, node_list) {
+ if (publ->scope <= limit)
+ tipc_port_list_add(dports, publ->ref);
}
- if (sseq->cluster_list_size != sseq->node_list_size)
+ if (info->cluster_list_size != info->node_list_size)
res = 1;
}
@@ -738,102 +657,82 @@ exit:
return res;
}
-/**
- * tipc_nametbl_publish_rsv - publish port name using a reserved name type
- */
-
-int tipc_nametbl_publish_rsv(u32 ref, unsigned int scope,
- struct tipc_name_seq const *seq)
-{
- int res;
-
- atomic_inc(&rsv_publ_ok);
- res = tipc_publish(ref, scope, seq);
- atomic_dec(&rsv_publ_ok);
- return res;
-}
-
-/**
+/*
* tipc_nametbl_publish - add name publication to network name tables
*/
-
struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
- u32 scope, u32 port_ref, u32 key)
+ u32 scope, u32 port_ref, u32 key)
{
struct publication *publ;
+ struct sk_buff *buf = NULL;
- if (table.local_publ_count >= tipc_max_publications) {
- warn("Publication failed, local publication limit reached (%u)\n",
- tipc_max_publications);
- return NULL;
- }
- if ((type < TIPC_RESERVED_TYPES) && !atomic_read(&rsv_publ_ok)) {
- warn("Publication failed, reserved name {%u,%u,%u}\n",
- type, lower, upper);
+ if (table.local_publ_count >= TIPC_MAX_PUBLICATIONS) {
+ pr_warn("Publication failed, local publication limit reached (%u)\n",
+ TIPC_MAX_PUBLICATIONS);
return NULL;
}
write_lock_bh(&tipc_nametbl_lock);
- table.local_publ_count++;
publ = tipc_nametbl_insert_publ(type, lower, upper, scope,
tipc_own_addr, port_ref, key);
- if (publ && (scope != TIPC_NODE_SCOPE)) {
- tipc_named_publish(publ);
+ if (likely(publ)) {
+ table.local_publ_count++;
+ buf = tipc_named_publish(publ);
}
write_unlock_bh(&tipc_nametbl_lock);
+
+ if (buf)
+ named_cluster_distribute(buf);
return publ;
}
/**
* tipc_nametbl_withdraw - withdraw name publication from network name tables
*/
-
int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
{
struct publication *publ;
+ struct sk_buff *buf;
- dbg("tipc_nametbl_withdraw: {%u,%u}, key=%u\n", type, lower, key);
write_lock_bh(&tipc_nametbl_lock);
publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key);
if (likely(publ)) {
table.local_publ_count--;
- if (publ->scope != TIPC_NODE_SCOPE)
- tipc_named_withdraw(publ);
+ buf = tipc_named_withdraw(publ);
write_unlock_bh(&tipc_nametbl_lock);
list_del_init(&publ->pport_list);
kfree(publ);
+
+ if (buf)
+ named_cluster_distribute(buf);
return 1;
}
write_unlock_bh(&tipc_nametbl_lock);
- err("Unable to remove local publication\n"
- "(type=%u, lower=%u, ref=%u, key=%u)\n",
- type, lower, ref, key);
+ pr_err("Unable to remove local publication\n"
+ "(type=%u, lower=%u, ref=%u, key=%u)\n",
+ type, lower, ref, key);
return 0;
}
/**
* tipc_nametbl_subscribe - add a subscription object to the name table
*/
-
-void tipc_nametbl_subscribe(struct subscription *s)
+void tipc_nametbl_subscribe(struct tipc_subscription *s)
{
u32 type = s->seq.type;
struct name_seq *seq;
write_lock_bh(&tipc_nametbl_lock);
seq = nametbl_find_seq(type);
- if (!seq) {
+ if (!seq)
seq = tipc_nameseq_create(type, &table.types[hash(type)]);
- }
- if (seq){
+ if (seq) {
spin_lock_bh(&seq->lock);
- dbg("tipc_nametbl_subscribe:found %p for {%u,%u,%u}\n",
- seq, type, s->seq.lower, s->seq.upper);
tipc_nameseq_subscribe(seq, s);
spin_unlock_bh(&seq->lock);
} else {
- warn("Failed to create subscription for {%u,%u,%u}\n",
- s->seq.type, s->seq.lower, s->seq.upper);
+ pr_warn("Failed to create subscription for {%u,%u,%u}\n",
+ s->seq.type, s->seq.lower, s->seq.upper);
}
write_unlock_bh(&tipc_nametbl_lock);
}
@@ -841,101 +740,99 @@ void tipc_nametbl_subscribe(struct subscription *s)
/**
* tipc_nametbl_unsubscribe - remove a subscription object from name table
*/
-
-void tipc_nametbl_unsubscribe(struct subscription *s)
+void tipc_nametbl_unsubscribe(struct tipc_subscription *s)
{
struct name_seq *seq;
write_lock_bh(&tipc_nametbl_lock);
seq = nametbl_find_seq(s->seq.type);
- if (seq != NULL){
+ if (seq != NULL) {
spin_lock_bh(&seq->lock);
list_del_init(&s->nameseq_list);
spin_unlock_bh(&seq->lock);
- if ((seq->first_free == 0) && list_empty(&seq->subscriptions)) {
- hlist_del_init(&seq->ns_list);
- kfree(seq->sseqs);
- kfree(seq);
- }
+ nameseq_delete_empty(seq);
}
write_unlock_bh(&tipc_nametbl_lock);
}
/**
- * subseq_list: print specified sub-sequence contents into the given buffer
+ * subseq_list - print specified sub-sequence contents into the given buffer
*/
-
-static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth,
- u32 index)
+static int subseq_list(struct sub_seq *sseq, char *buf, int len, u32 depth,
+ u32 index)
{
char portIdStr[27];
const char *scope_str[] = {"", " zone", " cluster", " node"};
- struct publication *publ = sseq->zone_list;
+ struct publication *publ;
+ struct name_info *info;
+ int ret;
- tipc_printf(buf, "%-10u %-10u ", sseq->lower, sseq->upper);
+ ret = tipc_snprintf(buf, len, "%-10u %-10u ", sseq->lower, sseq->upper);
- if (depth == 2 || !publ) {
- tipc_printf(buf, "\n");
- return;
+ if (depth == 2) {
+ ret += tipc_snprintf(buf - ret, len + ret, "\n");
+ return ret;
}
- do {
- sprintf (portIdStr, "<%u.%u.%u:%u>",
+ info = sseq->info;
+
+ list_for_each_entry(publ, &info->zone_list, zone_list) {
+ sprintf(portIdStr, "<%u.%u.%u:%u>",
tipc_zone(publ->node), tipc_cluster(publ->node),
tipc_node(publ->node), publ->ref);
- tipc_printf(buf, "%-26s ", portIdStr);
+ ret += tipc_snprintf(buf + ret, len - ret, "%-26s ", portIdStr);
if (depth > 3) {
- tipc_printf(buf, "%-10u %s", publ->key,
- scope_str[publ->scope]);
+ ret += tipc_snprintf(buf + ret, len - ret, "%-10u %s",
+ publ->key, scope_str[publ->scope]);
}
+ if (!list_is_last(&publ->zone_list, &info->zone_list))
+ ret += tipc_snprintf(buf + ret, len - ret,
+ "\n%33s", " ");
+ }
- publ = publ->zone_list_next;
- if (publ == sseq->zone_list)
- break;
-
- tipc_printf(buf, "\n%33s", " ");
- } while (1);
-
- tipc_printf(buf, "\n");
+ ret += tipc_snprintf(buf + ret, len - ret, "\n");
+ return ret;
}
/**
- * nameseq_list: print specified name sequence contents into the given buffer
+ * nameseq_list - print specified name sequence contents into the given buffer
*/
-
-static void nameseq_list(struct name_seq *seq, struct print_buf *buf, u32 depth,
- u32 type, u32 lowbound, u32 upbound, u32 index)
+static int nameseq_list(struct name_seq *seq, char *buf, int len, u32 depth,
+ u32 type, u32 lowbound, u32 upbound, u32 index)
{
struct sub_seq *sseq;
char typearea[11];
+ int ret = 0;
if (seq->first_free == 0)
- return;
+ return 0;
sprintf(typearea, "%-10u", seq->type);
if (depth == 1) {
- tipc_printf(buf, "%s\n", typearea);
- return;
+ ret += tipc_snprintf(buf, len, "%s\n", typearea);
+ return ret;
}
for (sseq = seq->sseqs; sseq != &seq->sseqs[seq->first_free]; sseq++) {
if ((lowbound <= sseq->upper) && (upbound >= sseq->lower)) {
- tipc_printf(buf, "%s ", typearea);
+ ret += tipc_snprintf(buf + ret, len - ret, "%s ",
+ typearea);
spin_lock_bh(&seq->lock);
- subseq_list(sseq, buf, depth, index);
+ ret += subseq_list(sseq, buf + ret, len - ret,
+ depth, index);
spin_unlock_bh(&seq->lock);
sprintf(typearea, "%10s", " ");
}
}
+ return ret;
}
/**
* nametbl_header - print name table header into the given buffer
*/
-
-static void nametbl_header(struct print_buf *buf, u32 depth)
+static int nametbl_header(char *buf, int len, u32 depth)
{
const char *header[] = {
"Type ",
@@ -945,25 +842,26 @@ static void nametbl_header(struct print_buf *buf, u32 depth)
};
int i;
+ int ret = 0;
if (depth > 4)
depth = 4;
for (i = 0; i < depth; i++)
- tipc_printf(buf, header[i]);
- tipc_printf(buf, "\n");
+ ret += tipc_snprintf(buf + ret, len - ret, header[i]);
+ ret += tipc_snprintf(buf + ret, len - ret, "\n");
+ return ret;
}
/**
* nametbl_list - print specified name table contents into the given buffer
*/
-
-static void nametbl_list(struct print_buf *buf, u32 depth_info,
- u32 type, u32 lowbound, u32 upbound)
+static int nametbl_list(char *buf, int len, u32 depth_info,
+ u32 type, u32 lowbound, u32 upbound)
{
struct hlist_head *seq_head;
- struct hlist_node *seq_node;
struct name_seq *seq;
int all_types;
+ int ret = 0;
u32 depth;
u32 i;
@@ -971,65 +869,69 @@ static void nametbl_list(struct print_buf *buf, u32 depth_info,
depth = (depth_info & ~TIPC_NTQ_ALLTYPES);
if (depth == 0)
- return;
+ return 0;
if (all_types) {
/* display all entries in name table to specified depth */
- nametbl_header(buf, depth);
+ ret += nametbl_header(buf, len, depth);
lowbound = 0;
upbound = ~0;
- for (i = 0; i < tipc_nametbl_size; i++) {
+ for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
seq_head = &table.types[i];
- hlist_for_each_entry(seq, seq_node, seq_head, ns_list) {
- nameseq_list(seq, buf, depth, seq->type,
- lowbound, upbound, i);
+ hlist_for_each_entry(seq, seq_head, ns_list) {
+ ret += nameseq_list(seq, buf + ret, len - ret,
+ depth, seq->type,
+ lowbound, upbound, i);
}
}
} else {
/* display only the sequence that matches the specified type */
if (upbound < lowbound) {
- tipc_printf(buf, "invalid name sequence specified\n");
- return;
+ ret += tipc_snprintf(buf + ret, len - ret,
+ "invalid name sequence specified\n");
+ return ret;
}
- nametbl_header(buf, depth);
+ ret += nametbl_header(buf + ret, len - ret, depth);
i = hash(type);
seq_head = &table.types[i];
- hlist_for_each_entry(seq, seq_node, seq_head, ns_list) {
+ hlist_for_each_entry(seq, seq_head, ns_list) {
if (seq->type == type) {
- nameseq_list(seq, buf, depth, type,
- lowbound, upbound, i);
+ ret += nameseq_list(seq, buf + ret, len - ret,
+ depth, type,
+ lowbound, upbound, i);
break;
}
}
}
+ return ret;
}
-#define MAX_NAME_TBL_QUERY 32768
-
struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space)
{
struct sk_buff *buf;
struct tipc_name_table_query *argv;
struct tlv_desc *rep_tlv;
- struct print_buf b;
+ char *pb;
+ int pb_len;
int str_len;
if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NAME_TBL_QUERY))
return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
- buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_NAME_TBL_QUERY));
+ buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
if (!buf)
return NULL;
rep_tlv = (struct tlv_desc *)buf->data;
- tipc_printbuf_init(&b, TLV_DATA(rep_tlv), MAX_NAME_TBL_QUERY);
+ pb = TLV_DATA(rep_tlv);
+ pb_len = ULTRA_STRING_MAX_LEN;
argv = (struct tipc_name_table_query *)TLV_DATA(req_tlv_area);
read_lock_bh(&tipc_nametbl_lock);
- nametbl_list(&b, ntohl(argv->depth), ntohl(argv->type),
- ntohl(argv->lowbound), ntohl(argv->upbound));
+ str_len = nametbl_list(pb, pb_len, ntohl(argv->depth),
+ ntohl(argv->type),
+ ntohl(argv->lowbound), ntohl(argv->upbound));
read_unlock_bh(&tipc_nametbl_lock);
- str_len = tipc_printbuf_validate(&b);
-
+ str_len += 1; /* for "\0" */
skb_put(buf, TLV_SPACE(str_len));
TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
@@ -1038,7 +940,7 @@ struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space)
int tipc_nametbl_init(void)
{
- table.types = kcalloc(tipc_nametbl_size, sizeof(struct hlist_head),
+ table.types = kcalloc(TIPC_NAMETBL_SIZE, sizeof(struct hlist_head),
GFP_ATOMIC);
if (!table.types)
return -ENOMEM;
@@ -1047,22 +949,50 @@ int tipc_nametbl_init(void)
return 0;
}
-void tipc_nametbl_stop(void)
+/**
+ * tipc_purge_publications - remove all publications for a given type
+ *
+ * tipc_nametbl_lock must be held when calling this function
+ */
+static void tipc_purge_publications(struct name_seq *seq)
{
- u32 i;
+ struct publication *publ, *safe;
+ struct sub_seq *sseq;
+ struct name_info *info;
- if (!table.types)
+ if (!seq->sseqs) {
+ nameseq_delete_empty(seq);
return;
+ }
+ sseq = seq->sseqs;
+ info = sseq->info;
+ list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) {
+ tipc_nametbl_remove_publ(publ->type, publ->lower, publ->node,
+ publ->ref, publ->key);
+ kfree(publ);
+ }
+}
- /* Verify name table is empty, then release it */
+void tipc_nametbl_stop(void)
+{
+ u32 i;
+ struct name_seq *seq;
+ struct hlist_head *seq_head;
+ struct hlist_node *safe;
+ /* Verify name table is empty and purge any lingering
+ * publications, then release the name table
+ */
write_lock_bh(&tipc_nametbl_lock);
- for (i = 0; i < tipc_nametbl_size; i++) {
- if (!hlist_empty(&table.types[i]))
- err("tipc_nametbl_stop(): hash chain %u is non-null\n", i);
+ for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
+ if (hlist_empty(&table.types[i]))
+ continue;
+ seq_head = &table.types[i];
+ hlist_for_each_entry_safe(seq, safe, seq_head, ns_list) {
+ tipc_purge_publications(seq);
+ }
}
kfree(table.types);
table.types = NULL;
write_unlock_bh(&tipc_nametbl_lock);
}
-
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
index 139882d4ed0..f02f48b9a21 100644
--- a/net/tipc/name_table.h
+++ b/net/tipc/name_table.h
@@ -2,7 +2,7 @@
* net/tipc/name_table.h: Include file for TIPC name table code
*
* Copyright (c) 2000-2006, Ericsson AB
- * Copyright (c) 2004-2005, Wind River Systems
+ * Copyright (c) 2004-2005, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -39,15 +39,13 @@
#include "node_subscr.h"
-struct subscription;
-struct port_list;
+struct tipc_subscription;
+struct tipc_port_list;
/*
* TIPC name types reserved for internal TIPC use (both current and planned)
*/
-
-#define TIPC_ZM_SRV 3 /* zone master service name type */
-
+#define TIPC_ZM_SRV 3 /* zone master service name type */
/**
* struct publication - info about a published (name or) name sequence
@@ -61,13 +59,12 @@ struct port_list;
* @subscr: subscription to "node down" event (for off-node publications only)
* @local_list: adjacent entries in list of publications made by this node
* @pport_list: adjacent entries in list of publications made by this port
- * @node_list: next matching name seq publication with >= node scope
- * @cluster_list: next matching name seq publication with >= cluster scope
- * @zone_list: next matching name seq publication with >= zone scope
+ * @node_list: adjacent matching name seq publications with >= node scope
+ * @cluster_list: adjacent matching name seq publications with >= cluster scope
+ * @zone_list: adjacent matching name seq publications with >= zone scope
*
* Note that the node list, cluster list, and zone list are circular lists.
*/
-
struct publication {
u32 type;
u32 lower;
@@ -79,9 +76,9 @@ struct publication {
struct tipc_node_subscr subscr;
struct list_head local_list;
struct list_head pport_list;
- struct publication *node_list_next;
- struct publication *cluster_list_next;
- struct publication *zone_list_next;
+ struct list_head node_list;
+ struct list_head cluster_list;
+ struct list_head zone_list;
};
@@ -90,18 +87,17 @@ extern rwlock_t tipc_nametbl_lock;
struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space);
u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *node);
int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
- struct port_list *dports);
-int tipc_nametbl_publish_rsv(u32 ref, unsigned int scope,
- struct tipc_name_seq const *seq);
+ struct tipc_port_list *dports);
struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
- u32 scope, u32 port_ref, u32 key);
+ u32 scope, u32 port_ref, u32 key);
int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key);
struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
- u32 scope, u32 node, u32 ref, u32 key);
-struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
- u32 node, u32 ref, u32 key);
-void tipc_nametbl_subscribe(struct subscription *s);
-void tipc_nametbl_unsubscribe(struct subscription *s);
+ u32 scope, u32 node, u32 ref,
+ u32 key);
+struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower, u32 node,
+ u32 ref, u32 key);
+void tipc_nametbl_subscribe(struct tipc_subscription *s);
+void tipc_nametbl_unsubscribe(struct tipc_subscription *s);
int tipc_nametbl_init(void);
void tipc_nametbl_stop(void);
diff --git a/net/tipc/net.c b/net/tipc/net.c
index c2b4b86c2e6..f64375e7f99 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -2,7 +2,7 @@
* net/tipc/net.c: TIPC network routing code
*
* Copyright (c) 1995-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -36,50 +36,44 @@
#include "core.h"
#include "net.h"
-#include "zone.h"
-#include "name_table.h"
#include "name_distr.h"
#include "subscr.h"
-#include "link.h"
#include "port.h"
+#include "socket.h"
+#include "node.h"
#include "config.h"
/*
* The TIPC locking policy is designed to ensure a very fine locking
* granularity, permitting complete parallel access to individual
- * port and node/link instances. The code consists of three major
+ * port and node/link instances. The code consists of four major
* locking domains, each protected with their own disjunct set of locks.
*
- * 1: The routing hierarchy.
- * Comprises the structures 'zone', 'cluster', 'node', 'link'
- * and 'bearer'. The whole hierarchy is protected by a big
- * read/write lock, tipc_net_lock, to enssure that nothing is added
- * or removed while code is accessing any of these structures.
- * This layer must not be called from the two others while they
- * hold any of their own locks.
- * Neither must it itself do any upcalls to the other two before
- * it has released tipc_net_lock and other protective locks.
+ * 1: The bearer level.
+ * RTNL lock is used to serialize the process of configuring bearer
+ * on update side, and RCU lock is applied on read side to make
+ * bearer instance valid on both paths of message transmission and
+ * reception.
*
- * Within the tipc_net_lock domain there are two sub-domains;'node' and
- * 'bearer', where local write operations are permitted,
- * provided that those are protected by individual spin_locks
- * per instance. Code holding tipc_net_lock(read) and a node spin_lock
- * is permitted to poke around in both the node itself and its
- * subordinate links. I.e, it can update link counters and queues,
- * change link state, send protocol messages, and alter the
- * "active_links" array in the node; but it can _not_ remove a link
- * or a node from the overall structure.
- * Correspondingly, individual bearers may change status within a
- * tipc_net_lock(read), protected by an individual spin_lock ber bearer
- * instance, but it needs tipc_net_lock(write) to remove/add any bearers.
+ * 2: The node and link level.
+ * All node instances are saved into two tipc_node_list and node_htable
+ * lists. The two lists are protected by node_list_lock on write side,
+ * and they are guarded with RCU lock on read side. Especially node
+ * instance is destroyed only when TIPC module is removed, and we can
+ * confirm that there has no any user who is accessing the node at the
+ * moment. Therefore, Except for iterating the two lists within RCU
+ * protection, it's no needed to hold RCU that we access node instance
+ * in other places.
*
+ * In addition, all members in node structure including link instances
+ * are protected by node spin lock.
*
- * 2: The transport level of the protocol.
- * This consists of the structures port, (and its user level
- * representations, such as user_port and tipc_sock), reference and
- * tipc_user (port.c, reg.c, socket.c).
+ * 3: The transport level of the protocol.
+ * This consists of the structures port, (and its user level
+ * representations, such as user_port and tipc_sock), reference and
+ * tipc_user (port.c, reg.c, socket.c).
*
- * This layer has four different locks:
+ * This layer has four different locks:
* - The tipc_port spin_lock. This is protecting each port instance
* from parallel data access and removal. Since we can not place
* this lock in the port itself, it has been placed in the
@@ -98,7 +92,7 @@
* There are two such lists; 'port_list', which is used for management,
* and 'wait_list', which is used to queue ports during congestion.
*
- * 3: The name table (name_table.c, name_distr.c, subscription.c)
+ * 4: The name table (name_table.c, name_distr.c, subscription.c)
* - There is one big read/write-lock (tipc_nametbl_lock) protecting the
* overall name table structure. Nothing must be added/removed to
* this structure without holding write access to it.
@@ -110,49 +104,6 @@
* - A local spin_lock protecting the queue of subscriber events.
*/
-DEFINE_RWLOCK(tipc_net_lock);
-static struct _zone *tipc_zones[256] = { NULL, };
-struct network tipc_net = { tipc_zones };
-
-struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref)
-{
- return tipc_zone_select_remote_node(tipc_net.zones[tipc_zone(addr)], addr, ref);
-}
-
-u32 tipc_net_select_router(u32 addr, u32 ref)
-{
- return tipc_zone_select_router(tipc_net.zones[tipc_zone(addr)], addr, ref);
-}
-
-void tipc_net_remove_as_router(u32 router)
-{
- u32 z_num;
-
- for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
- if (!tipc_net.zones[z_num])
- continue;
- tipc_zone_remove_as_router(tipc_net.zones[z_num], router);
- }
-}
-
-void tipc_net_send_external_routes(u32 dest)
-{
- u32 z_num;
-
- for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
- if (tipc_net.zones[z_num])
- tipc_zone_send_external_routes(tipc_net.zones[z_num], dest);
- }
-}
-
-static void net_stop(void)
-{
- u32 z_num;
-
- for (z_num = 1; z_num <= tipc_max_zones; z_num++)
- tipc_zone_delete(tipc_net.zones[z_num]);
-}
-
static void net_route_named_msg(struct sk_buff *buf)
{
struct tipc_msg *msg = buf_msg(buf);
@@ -160,22 +111,18 @@ static void net_route_named_msg(struct sk_buff *buf)
u32 dport;
if (!msg_named(msg)) {
- msg_dbg(msg, "tipc_net->drop_nam:");
- buf_discard(buf);
+ kfree_skb(buf);
return;
}
dnode = addr_domain(msg_lookup_scope(msg));
dport = tipc_nametbl_translate(msg_nametype(msg), msg_nameinst(msg), &dnode);
- dbg("tipc_net->lookup<%u,%u>-><%u,%x>\n",
- msg_nametype(msg), msg_nameinst(msg), dport, dnode);
if (dport) {
msg_set_destnode(msg, dnode);
msg_set_destport(msg, dport);
tipc_net_route_msg(buf);
return;
}
- msg_dbg(msg, "tipc_net->rej:NO NAME: ");
tipc_reject_msg(buf, TIPC_ERR_NO_NAME);
}
@@ -188,54 +135,34 @@ void tipc_net_route_msg(struct sk_buff *buf)
return;
msg = buf_msg(buf);
- msg_incr_reroute_cnt(msg);
- if (msg_reroute_cnt(msg) > 6) {
- if (msg_errcode(msg)) {
- msg_dbg(msg, "NET>DISC>:");
- buf_discard(buf);
- } else {
- msg_dbg(msg, "NET>REJ>:");
- tipc_reject_msg(buf, msg_destport(msg) ?
- TIPC_ERR_NO_PORT : TIPC_ERR_NO_NAME);
- }
- return;
- }
-
- msg_dbg(msg, "tipc_net->rout: ");
-
/* Handle message for this node */
dnode = msg_short(msg) ? tipc_own_addr : msg_destnode(msg);
if (tipc_in_scope(dnode, tipc_own_addr)) {
if (msg_isdata(msg)) {
if (msg_mcast(msg))
- tipc_port_recv_mcast(buf, NULL);
+ tipc_port_mcast_rcv(buf, NULL);
else if (msg_destport(msg))
- tipc_port_recv_msg(buf);
+ tipc_sk_rcv(buf);
else
net_route_named_msg(buf);
return;
}
switch (msg_user(msg)) {
- case ROUTE_DISTRIBUTOR:
- tipc_cltr_recv_routing_table(buf);
- break;
case NAME_DISTRIBUTOR:
- tipc_named_recv(buf);
+ tipc_named_rcv(buf);
break;
case CONN_MANAGER:
- tipc_port_recv_proto_msg(buf);
+ tipc_port_proto_rcv(buf);
break;
default:
- msg_dbg(msg,"DROP/NET/<REC<");
- buf_discard(buf);
+ kfree_skb(buf);
}
return;
}
/* Handle message for another node */
- msg_dbg(msg, "NET>SEND>: ");
skb_trim(buf, msg_size(msg));
- tipc_link_send(buf, dnode, msg_link_selector(msg));
+ tipc_link_xmit(buf, dnode, msg_link_selector(msg));
}
int tipc_net_start(u32 addr)
@@ -243,41 +170,33 @@ int tipc_net_start(u32 addr)
char addr_string[16];
int res;
- if (tipc_mode != TIPC_NODE_MODE)
- return -ENOPROTOOPT;
-
- tipc_subscr_stop();
- tipc_cfg_stop();
-
tipc_own_addr = addr;
- tipc_mode = TIPC_NET_MODE;
tipc_named_reinit();
tipc_port_reinit();
-
- if ((res = tipc_cltr_init()) ||
- (res = tipc_bclink_init())) {
+ res = tipc_bclink_init();
+ if (res)
return res;
- }
- tipc_k_signal((Handler)tipc_subscr_start, 0);
- tipc_k_signal((Handler)tipc_cfg_init, 0);
+ tipc_nametbl_publish(TIPC_CFG_SRV, tipc_own_addr, tipc_own_addr,
+ TIPC_ZONE_SCOPE, 0, tipc_own_addr);
- info("Started in network mode\n");
- info("Own node address %s, network identity %u\n",
- tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id);
+ pr_info("Started in network mode\n");
+ pr_info("Own node address %s, network identity %u\n",
+ tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id);
return 0;
}
void tipc_net_stop(void)
{
- if (tipc_mode != TIPC_NET_MODE)
+ if (!tipc_own_addr)
return;
- write_lock_bh(&tipc_net_lock);
+
+ tipc_nametbl_withdraw(TIPC_CFG_SRV, tipc_own_addr, 0, tipc_own_addr);
+ rtnl_lock();
tipc_bearer_stop();
- tipc_mode = TIPC_NODE_MODE;
tipc_bclink_stop();
- net_stop();
- write_unlock_bh(&tipc_net_lock);
- info("Left network mode\n");
-}
+ tipc_node_stop();
+ rtnl_unlock();
+ pr_info("Left network mode\n");
+}
diff --git a/net/tipc/net.h b/net/tipc/net.h
index de2b9ad8f64..c6c2b46f7c2 100644
--- a/net/tipc/net.h
+++ b/net/tipc/net.h
@@ -2,7 +2,7 @@
* net/tipc/net.h: Include file for TIPC network routing code
*
* Copyright (c) 1995-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -37,26 +37,7 @@
#ifndef _TIPC_NET_H
#define _TIPC_NET_H
-struct _zone;
-
-/**
- * struct network - TIPC network structure
- * @zones: array of pointers to all zones within network
- */
-
-struct network {
- struct _zone **zones;
-};
-
-
-extern struct network tipc_net;
-extern rwlock_t tipc_net_lock;
-
-void tipc_net_remove_as_router(u32 router);
-void tipc_net_send_external_routes(u32 dest);
void tipc_net_route_msg(struct sk_buff *buf);
-struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref);
-u32 tipc_net_select_router(u32 addr, u32 ref);
int tipc_net_start(u32 addr);
void tipc_net_stop(void);
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index 7bda8e3d139..ad844d36534 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -44,17 +44,17 @@ static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
struct nlmsghdr *rep_nlh;
struct nlmsghdr *req_nlh = info->nlhdr;
struct tipc_genlmsghdr *req_userhdr = info->userhdr;
- int hdr_space = NLMSG_SPACE(GENL_HDRLEN + TIPC_GENL_HDRLEN);
+ int hdr_space = nlmsg_total_size(GENL_HDRLEN + TIPC_GENL_HDRLEN);
u16 cmd;
- if ((req_userhdr->cmd & 0xC000) && (!capable(CAP_NET_ADMIN)))
+ if ((req_userhdr->cmd & 0xC000) && (!netlink_capable(skb, CAP_NET_ADMIN)))
cmd = TIPC_CMD_NOT_NET_ADMIN;
else
cmd = req_userhdr->cmd;
rep_buf = tipc_cfg_do_cmd(req_userhdr->dest, cmd,
- NLMSG_DATA(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN,
- NLMSG_PAYLOAD(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN),
+ nlmsg_data(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN,
+ nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN),
hdr_space);
if (rep_buf) {
@@ -62,7 +62,7 @@ static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
rep_nlh = nlmsg_hdr(rep_buf);
memcpy(rep_nlh, req_nlh, hdr_space);
rep_nlh->nlmsg_len = rep_buf->len;
- genlmsg_unicast(&init_net, rep_buf, NETLINK_CB(skb).pid);
+ genlmsg_unicast(&init_net, rep_buf, NETLINK_CB(skb).portid);
}
return 0;
@@ -76,33 +76,26 @@ static struct genl_family tipc_genl_family = {
.maxattr = 0,
};
-static struct genl_ops tipc_genl_ops = {
- .cmd = TIPC_GENL_CMD,
- .doit = handle_cmd,
+static struct genl_ops tipc_genl_ops[] = {
+ {
+ .cmd = TIPC_GENL_CMD,
+ .doit = handle_cmd,
+ },
};
-static int tipc_genl_family_registered;
-
int tipc_netlink_start(void)
{
int res;
- res = genl_register_family_with_ops(&tipc_genl_family,
- &tipc_genl_ops, 1);
+ res = genl_register_family_with_ops(&tipc_genl_family, tipc_genl_ops);
if (res) {
- err("Failed to register netlink interface\n");
+ pr_err("Failed to register netlink interface\n");
return res;
}
-
- tipc_genl_family_registered = 1;
return 0;
}
void tipc_netlink_stop(void)
{
- if (!tipc_genl_family_registered)
- return;
-
genl_unregister_family(&tipc_genl_family);
- tipc_genl_family_registered = 0;
}
diff --git a/net/tipc/node.c b/net/tipc/node.c
index df71dfc3a9a..5b44c3041be 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -1,8 +1,8 @@
/*
* net/tipc/node.c: TIPC node management routines
*
- * Copyright (c) 2000-2006, Ericsson AB
- * Copyright (c) 2005-2006, Wind River Systems
+ * Copyright (c) 2000-2006, 2012 Ericsson AB
+ * Copyright (c) 2005-2006, 2010-2014, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -37,143 +37,154 @@
#include "core.h"
#include "config.h"
#include "node.h"
-#include "port.h"
#include "name_distr.h"
+#define NODE_HTABLE_SIZE 512
+
static void node_lost_contact(struct tipc_node *n_ptr);
static void node_established_contact(struct tipc_node *n_ptr);
-/* sorted list of nodes within cluster */
-static struct tipc_node *tipc_nodes = NULL;
-
-static DEFINE_SPINLOCK(node_create_lock);
-
-u32 tipc_own_tag = 0;
+static struct hlist_head node_htable[NODE_HTABLE_SIZE];
+LIST_HEAD(tipc_node_list);
+static u32 tipc_num_nodes;
+static u32 tipc_num_links;
+static DEFINE_SPINLOCK(node_list_lock);
-/**
- * tipc_node_create - create neighboring node
- *
- * Currently, this routine is called by neighbor discovery code, which holds
- * net_lock for reading only. We must take node_create_lock to ensure a node
- * isn't created twice if two different bearers discover the node at the same
- * time. (It would be preferable to switch to holding net_lock in write mode,
- * but this is a non-trivial change.)
+/*
+ * A trivial power-of-two bitmask technique is used for speed, since this
+ * operation is done for every incoming TIPC packet. The number of hash table
+ * entries has been chosen so that no hash chain exceeds 8 nodes and will
+ * usually be much smaller (typically only a single node).
*/
+static unsigned int tipc_hashfn(u32 addr)
+{
+ return addr & (NODE_HTABLE_SIZE - 1);
+}
-struct tipc_node *tipc_node_create(u32 addr)
+/*
+ * tipc_node_find - locate specified node object, if it exists
+ */
+struct tipc_node *tipc_node_find(u32 addr)
{
- struct cluster *c_ptr;
- struct tipc_node *n_ptr;
- struct tipc_node **curr_node;
+ struct tipc_node *node;
- spin_lock_bh(&node_create_lock);
+ if (unlikely(!in_own_cluster_exact(addr)))
+ return NULL;
- for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) {
- if (addr < n_ptr->addr)
- break;
- if (addr == n_ptr->addr) {
- spin_unlock_bh(&node_create_lock);
- return n_ptr;
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(node, &node_htable[tipc_hashfn(addr)], hash) {
+ if (node->addr == addr) {
+ rcu_read_unlock();
+ return node;
}
}
+ rcu_read_unlock();
+ return NULL;
+}
- n_ptr = kzalloc(sizeof(*n_ptr),GFP_ATOMIC);
- if (!n_ptr) {
- spin_unlock_bh(&node_create_lock);
- warn("Node creation failed, no memory\n");
- return NULL;
- }
+struct tipc_node *tipc_node_create(u32 addr)
+{
+ struct tipc_node *n_ptr, *temp_node;
- c_ptr = tipc_cltr_find(addr);
- if (!c_ptr) {
- c_ptr = tipc_cltr_create(addr);
- }
- if (!c_ptr) {
- spin_unlock_bh(&node_create_lock);
- kfree(n_ptr);
+ spin_lock_bh(&node_list_lock);
+
+ n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC);
+ if (!n_ptr) {
+ spin_unlock_bh(&node_list_lock);
+ pr_warn("Node creation failed, no memory\n");
return NULL;
}
n_ptr->addr = addr;
- spin_lock_init(&n_ptr->lock);
+ spin_lock_init(&n_ptr->lock);
+ INIT_HLIST_NODE(&n_ptr->hash);
+ INIT_LIST_HEAD(&n_ptr->list);
INIT_LIST_HEAD(&n_ptr->nsub);
- n_ptr->owner = c_ptr;
- tipc_cltr_attach_node(c_ptr, n_ptr);
- n_ptr->last_router = -1;
-
- /* Insert node into ordered list */
- for (curr_node = &tipc_nodes; *curr_node;
- curr_node = &(*curr_node)->next) {
- if (addr < (*curr_node)->addr) {
- n_ptr->next = *curr_node;
+
+ hlist_add_head_rcu(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]);
+
+ list_for_each_entry_rcu(temp_node, &tipc_node_list, list) {
+ if (n_ptr->addr < temp_node->addr)
break;
- }
}
- (*curr_node) = n_ptr;
- spin_unlock_bh(&node_create_lock);
+ list_add_tail_rcu(&n_ptr->list, &temp_node->list);
+ n_ptr->action_flags = TIPC_WAIT_PEER_LINKS_DOWN;
+ n_ptr->signature = INVALID_NODE_SIG;
+
+ tipc_num_nodes++;
+
+ spin_unlock_bh(&node_list_lock);
return n_ptr;
}
-void tipc_node_delete(struct tipc_node *n_ptr)
+static void tipc_node_delete(struct tipc_node *n_ptr)
{
- if (!n_ptr)
- return;
+ list_del_rcu(&n_ptr->list);
+ hlist_del_rcu(&n_ptr->hash);
+ kfree_rcu(n_ptr, rcu);
- dbg("node %x deleted\n", n_ptr->addr);
- kfree(n_ptr);
+ tipc_num_nodes--;
}
+void tipc_node_stop(void)
+{
+ struct tipc_node *node, *t_node;
+
+ spin_lock_bh(&node_list_lock);
+ list_for_each_entry_safe(node, t_node, &tipc_node_list, list)
+ tipc_node_delete(node);
+ spin_unlock_bh(&node_list_lock);
+}
/**
* tipc_node_link_up - handle addition of link
*
* Link becomes active (alone or shared) or standby, depending on its priority.
*/
-
-void tipc_node_link_up(struct tipc_node *n_ptr, struct link *l_ptr)
+void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
{
- struct link **active = &n_ptr->active_links[0];
+ struct tipc_link **active = &n_ptr->active_links[0];
+ u32 addr = n_ptr->addr;
n_ptr->working_links++;
-
- info("Established link <%s> on network plane %c\n",
- l_ptr->name, l_ptr->b_ptr->net_plane);
+ tipc_nametbl_publish(TIPC_LINK_STATE, addr, addr, TIPC_NODE_SCOPE,
+ l_ptr->bearer_id, addr);
+ pr_info("Established link <%s> on network plane %c\n",
+ l_ptr->name, l_ptr->net_plane);
if (!active[0]) {
- dbg(" link %x into %x/%x\n", l_ptr, &active[0], &active[1]);
active[0] = active[1] = l_ptr;
node_established_contact(n_ptr);
return;
}
if (l_ptr->priority < active[0]->priority) {
- info("New link <%s> becomes standby\n", l_ptr->name);
+ pr_info("New link <%s> becomes standby\n", l_ptr->name);
return;
}
- tipc_link_send_duplicate(active[0], l_ptr);
+ tipc_link_dup_queue_xmit(active[0], l_ptr);
if (l_ptr->priority == active[0]->priority) {
active[0] = l_ptr;
return;
}
- info("Old link <%s> becomes standby\n", active[0]->name);
+ pr_info("Old link <%s> becomes standby\n", active[0]->name);
if (active[1] != active[0])
- info("Old link <%s> becomes standby\n", active[1]->name);
+ pr_info("Old link <%s> becomes standby\n", active[1]->name);
active[0] = active[1] = l_ptr;
}
/**
* node_select_active_links - select active link
*/
-
static void node_select_active_links(struct tipc_node *n_ptr)
{
- struct link **active = &n_ptr->active_links[0];
+ struct tipc_link **active = &n_ptr->active_links[0];
u32 i;
u32 highest_prio = 0;
active[0] = active[1] = NULL;
for (i = 0; i < MAX_BEARERS; i++) {
- struct link *l_ptr = n_ptr->links[i];
+ struct tipc_link *l_ptr = n_ptr->links[i];
if (!l_ptr || !tipc_link_is_up(l_ptr) ||
(l_ptr->priority < highest_prio))
@@ -191,20 +202,21 @@ static void node_select_active_links(struct tipc_node *n_ptr)
/**
* tipc_node_link_down - handle loss of link
*/
-
-void tipc_node_link_down(struct tipc_node *n_ptr, struct link *l_ptr)
+void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
{
- struct link **active;
+ struct tipc_link **active;
+ u32 addr = n_ptr->addr;
n_ptr->working_links--;
+ tipc_nametbl_withdraw(TIPC_LINK_STATE, addr, l_ptr->bearer_id, addr);
if (!tipc_link_is_active(l_ptr)) {
- info("Lost standby link <%s> on network plane %c\n",
- l_ptr->name, l_ptr->b_ptr->net_plane);
+ pr_info("Lost standby link <%s> on network plane %c\n",
+ l_ptr->name, l_ptr->net_plane);
return;
}
- info("Lost link <%s> on network plane %c\n",
- l_ptr->name, l_ptr->b_ptr->net_plane);
+ pr_info("Lost link <%s> on network plane %c\n",
+ l_ptr->name, l_ptr->net_plane);
active = &n_ptr->active_links[0];
if (active[0] == l_ptr)
@@ -214,233 +226,80 @@ void tipc_node_link_down(struct tipc_node *n_ptr, struct link *l_ptr)
if (active[0] == l_ptr)
node_select_active_links(n_ptr);
if (tipc_node_is_up(n_ptr))
- tipc_link_changeover(l_ptr);
+ tipc_link_failover_send_queue(l_ptr);
else
node_lost_contact(n_ptr);
}
-int tipc_node_has_active_links(struct tipc_node *n_ptr)
+int tipc_node_active_links(struct tipc_node *n_ptr)
{
return n_ptr->active_links[0] != NULL;
}
-int tipc_node_has_redundant_links(struct tipc_node *n_ptr)
-{
- return n_ptr->working_links > 1;
-}
-
-static int tipc_node_has_active_routes(struct tipc_node *n_ptr)
+int tipc_node_is_up(struct tipc_node *n_ptr)
{
- return n_ptr && (n_ptr->last_router >= 0);
+ return tipc_node_active_links(n_ptr);
}
-int tipc_node_is_up(struct tipc_node *n_ptr)
+void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
{
- return tipc_node_has_active_links(n_ptr) || tipc_node_has_active_routes(n_ptr);
+ n_ptr->links[l_ptr->bearer_id] = l_ptr;
+ spin_lock_bh(&node_list_lock);
+ tipc_num_links++;
+ spin_unlock_bh(&node_list_lock);
+ n_ptr->link_cnt++;
}
-struct tipc_node *tipc_node_attach_link(struct link *l_ptr)
+void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
{
- struct tipc_node *n_ptr = tipc_node_find(l_ptr->addr);
-
- if (!n_ptr)
- n_ptr = tipc_node_create(l_ptr->addr);
- if (n_ptr) {
- u32 bearer_id = l_ptr->b_ptr->identity;
- char addr_string[16];
-
- if (n_ptr->link_cnt >= 2) {
- err("Attempt to create third link to %s\n",
- tipc_addr_string_fill(addr_string, n_ptr->addr));
- return NULL;
- }
+ int i;
- if (!n_ptr->links[bearer_id]) {
- n_ptr->links[bearer_id] = l_ptr;
- tipc_net.zones[tipc_zone(l_ptr->addr)]->links++;
- n_ptr->link_cnt++;
- return n_ptr;
- }
- err("Attempt to establish second link on <%s> to %s\n",
- l_ptr->b_ptr->publ.name,
- tipc_addr_string_fill(addr_string, l_ptr->addr));
+ for (i = 0; i < MAX_BEARERS; i++) {
+ if (l_ptr != n_ptr->links[i])
+ continue;
+ n_ptr->links[i] = NULL;
+ spin_lock_bh(&node_list_lock);
+ tipc_num_links--;
+ spin_unlock_bh(&node_list_lock);
+ n_ptr->link_cnt--;
}
- return NULL;
}
-void tipc_node_detach_link(struct tipc_node *n_ptr, struct link *l_ptr)
-{
- n_ptr->links[l_ptr->b_ptr->identity] = NULL;
- tipc_net.zones[tipc_zone(l_ptr->addr)]->links--;
- n_ptr->link_cnt--;
-}
-
-/*
- * Routing table management - five cases to handle:
- *
- * 1: A link towards a zone/cluster external node comes up.
- * => Send a multicast message updating routing tables of all
- * system nodes within own cluster that the new destination
- * can be reached via this node.
- * (node.establishedContact()=>cluster.multicastNewRoute())
- *
- * 2: A link towards a slave node comes up.
- * => Send a multicast message updating routing tables of all
- * system nodes within own cluster that the new destination
- * can be reached via this node.
- * (node.establishedContact()=>cluster.multicastNewRoute())
- * => Send a message to the slave node about existence
- * of all system nodes within cluster:
- * (node.establishedContact()=>cluster.sendLocalRoutes())
- *
- * 3: A new cluster local system node becomes available.
- * => Send message(s) to this particular node containing
- * information about all cluster external and slave
- * nodes which can be reached via this node.
- * (node.establishedContact()==>network.sendExternalRoutes())
- * (node.establishedContact()==>network.sendSlaveRoutes())
- * => Send messages to all directly connected slave nodes
- * containing information about the existence of the new node
- * (node.establishedContact()=>cluster.multicastNewRoute())
- *
- * 4: The link towards a zone/cluster external node or slave
- * node goes down.
- * => Send a multcast message updating routing tables of all
- * nodes within cluster that the new destination can not any
- * longer be reached via this node.
- * (node.lostAllLinks()=>cluster.bcastLostRoute())
- *
- * 5: A cluster local system node becomes unavailable.
- * => Remove all references to this node from the local
- * routing tables. Note: This is a completely node
- * local operation.
- * (node.lostAllLinks()=>network.removeAsRouter())
- * => Send messages to all directly connected slave nodes
- * containing information about loss of the node
- * (node.establishedContact()=>cluster.multicastLostRoute())
- *
- */
-
static void node_established_contact(struct tipc_node *n_ptr)
{
- struct cluster *c_ptr;
-
- dbg("node_established_contact:-> %x\n", n_ptr->addr);
- if (!tipc_node_has_active_routes(n_ptr) && in_own_cluster(n_ptr->addr)) {
- tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr);
- }
-
- /* Syncronize broadcast acks */
+ n_ptr->action_flags |= TIPC_NOTIFY_NODE_UP;
+ n_ptr->bclink.oos_state = 0;
n_ptr->bclink.acked = tipc_bclink_get_last_sent();
-
- if (is_slave(tipc_own_addr))
- return;
- if (!in_own_cluster(n_ptr->addr)) {
- /* Usage case 1 (see above) */
- c_ptr = tipc_cltr_find(tipc_own_addr);
- if (!c_ptr)
- c_ptr = tipc_cltr_create(tipc_own_addr);
- if (c_ptr)
- tipc_cltr_bcast_new_route(c_ptr, n_ptr->addr, 1,
- tipc_max_nodes);
- return;
- }
-
- c_ptr = n_ptr->owner;
- if (is_slave(n_ptr->addr)) {
- /* Usage case 2 (see above) */
- tipc_cltr_bcast_new_route(c_ptr, n_ptr->addr, 1, tipc_max_nodes);
- tipc_cltr_send_local_routes(c_ptr, n_ptr->addr);
- return;
- }
-
- if (n_ptr->bclink.supported) {
- tipc_nmap_add(&tipc_cltr_bcast_nodes, n_ptr->addr);
- if (n_ptr->addr < tipc_own_addr)
- tipc_own_tag++;
- }
-
- /* Case 3 (see above) */
- tipc_net_send_external_routes(n_ptr->addr);
- tipc_cltr_send_slave_routes(c_ptr, n_ptr->addr);
- tipc_cltr_bcast_new_route(c_ptr, n_ptr->addr, LOWEST_SLAVE,
- tipc_highest_allowed_slave);
-}
-
-static void node_cleanup_finished(unsigned long node_addr)
-{
- struct tipc_node *n_ptr;
-
- read_lock_bh(&tipc_net_lock);
- n_ptr = tipc_node_find(node_addr);
- if (n_ptr) {
- tipc_node_lock(n_ptr);
- n_ptr->cleanup_required = 0;
- tipc_node_unlock(n_ptr);
- }
- read_unlock_bh(&tipc_net_lock);
+ tipc_bclink_add_node(n_ptr->addr);
}
static void node_lost_contact(struct tipc_node *n_ptr)
{
- struct cluster *c_ptr;
- struct tipc_node_subscr *ns, *tns;
char addr_string[16];
u32 i;
- /* Clean up broadcast reception remains */
- n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = 0;
- while (n_ptr->bclink.deferred_head) {
- struct sk_buff* buf = n_ptr->bclink.deferred_head;
- n_ptr->bclink.deferred_head = buf->next;
- buf_discard(buf);
- }
- if (n_ptr->bclink.defragm) {
- buf_discard(n_ptr->bclink.defragm);
- n_ptr->bclink.defragm = NULL;
- }
- if (in_own_cluster(n_ptr->addr) && n_ptr->bclink.supported) {
- tipc_bclink_acknowledge(n_ptr, mod(n_ptr->bclink.acked + 10000));
- }
+ pr_info("Lost contact with %s\n",
+ tipc_addr_string_fill(addr_string, n_ptr->addr));
- /* Update routing tables */
- if (is_slave(tipc_own_addr)) {
- tipc_net_remove_as_router(n_ptr->addr);
- } else {
- if (!in_own_cluster(n_ptr->addr)) {
- /* Case 4 (see above) */
- c_ptr = tipc_cltr_find(tipc_own_addr);
- tipc_cltr_bcast_lost_route(c_ptr, n_ptr->addr, 1,
- tipc_max_nodes);
- } else {
- /* Case 5 (see above) */
- c_ptr = tipc_cltr_find(n_ptr->addr);
- if (is_slave(n_ptr->addr)) {
- tipc_cltr_bcast_lost_route(c_ptr, n_ptr->addr, 1,
- tipc_max_nodes);
- } else {
- if (n_ptr->bclink.supported) {
- tipc_nmap_remove(&tipc_cltr_bcast_nodes,
- n_ptr->addr);
- if (n_ptr->addr < tipc_own_addr)
- tipc_own_tag--;
- }
- tipc_net_remove_as_router(n_ptr->addr);
- tipc_cltr_bcast_lost_route(c_ptr, n_ptr->addr,
- LOWEST_SLAVE,
- tipc_highest_allowed_slave);
- }
+ /* Flush broadcast link info associated with lost node */
+ if (n_ptr->bclink.recv_permitted) {
+ kfree_skb_list(n_ptr->bclink.deferred_head);
+ n_ptr->bclink.deferred_size = 0;
+
+ if (n_ptr->bclink.reasm_buf) {
+ kfree_skb(n_ptr->bclink.reasm_buf);
+ n_ptr->bclink.reasm_buf = NULL;
}
- }
- if (tipc_node_has_active_routes(n_ptr))
- return;
- info("Lost contact with %s\n",
- tipc_addr_string_fill(addr_string, n_ptr->addr));
+ tipc_bclink_remove_node(n_ptr->addr);
+ tipc_bclink_acknowledge(n_ptr, INVALID_LINK_SEQ);
+
+ n_ptr->bclink.recv_permitted = false;
+ }
/* Abort link changeover */
for (i = 0; i < MAX_BEARERS; i++) {
- struct link *l_ptr = n_ptr->links[i];
+ struct tipc_link *l_ptr = n_ptr->links[i];
if (!l_ptr)
continue;
l_ptr->reset_checkpoint = l_ptr->next_in_no;
@@ -448,137 +307,13 @@ static void node_lost_contact(struct tipc_node *n_ptr)
tipc_link_reset_fragments(l_ptr);
}
- /* Notify subscribers */
- list_for_each_entry_safe(ns, tns, &n_ptr->nsub, nodesub_list) {
- ns->node = NULL;
- list_del_init(&ns->nodesub_list);
- tipc_k_signal((Handler)ns->handle_node_down,
- (unsigned long)ns->usr_handle);
- }
-
- /* Prevent re-contact with node until all cleanup is done */
-
- n_ptr->cleanup_required = 1;
- tipc_k_signal((Handler)node_cleanup_finished, n_ptr->addr);
-}
-
-/**
- * tipc_node_select_next_hop - find the next-hop node for a message
- *
- * Called by when cluster local lookup has failed.
- */
-
-struct tipc_node *tipc_node_select_next_hop(u32 addr, u32 selector)
-{
- struct tipc_node *n_ptr;
- u32 router_addr;
-
- if (!tipc_addr_domain_valid(addr))
- return NULL;
-
- /* Look for direct link to destination processsor */
- n_ptr = tipc_node_find(addr);
- if (n_ptr && tipc_node_has_active_links(n_ptr))
- return n_ptr;
-
- /* Cluster local system nodes *must* have direct links */
- if (!is_slave(addr) && in_own_cluster(addr))
- return NULL;
-
- /* Look for cluster local router with direct link to node */
- router_addr = tipc_node_select_router(n_ptr, selector);
- if (router_addr)
- return tipc_node_select(router_addr, selector);
-
- /* Slave nodes can only be accessed within own cluster via a
- known router with direct link -- if no router was found,give up */
- if (is_slave(addr))
- return NULL;
-
- /* Inter zone/cluster -- find any direct link to remote cluster */
- addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0);
- n_ptr = tipc_net_select_remote_node(addr, selector);
- if (n_ptr && tipc_node_has_active_links(n_ptr))
- return n_ptr;
-
- /* Last resort -- look for any router to anywhere in remote zone */
- router_addr = tipc_net_select_router(addr, selector);
- if (router_addr)
- return tipc_node_select(router_addr, selector);
+ n_ptr->action_flags &= ~TIPC_WAIT_OWN_LINKS_DOWN;
- return NULL;
-}
-
-/**
- * tipc_node_select_router - select router to reach specified node
- *
- * Uses a deterministic and fair algorithm for selecting router node.
- */
-
-u32 tipc_node_select_router(struct tipc_node *n_ptr, u32 ref)
-{
- u32 ulim;
- u32 mask;
- u32 start;
- u32 r;
-
- if (!n_ptr)
- return 0;
-
- if (n_ptr->last_router < 0)
- return 0;
- ulim = ((n_ptr->last_router + 1) * 32) - 1;
-
- /* Start entry must be random */
- mask = tipc_max_nodes;
- while (mask > ulim)
- mask >>= 1;
- start = ref & mask;
- r = start;
-
- /* Lookup upwards with wrap-around */
- do {
- if (((n_ptr->routers[r / 32]) >> (r % 32)) & 1)
- break;
- } while (++r <= ulim);
- if (r > ulim) {
- r = 1;
- do {
- if (((n_ptr->routers[r / 32]) >> (r % 32)) & 1)
- break;
- } while (++r < start);
- assert(r != start);
- }
- assert(r && (r <= ulim));
- return tipc_addr(own_zone(), own_cluster(), r);
-}
-
-void tipc_node_add_router(struct tipc_node *n_ptr, u32 router)
-{
- u32 r_num = tipc_node(router);
-
- n_ptr->routers[r_num / 32] =
- ((1 << (r_num % 32)) | n_ptr->routers[r_num / 32]);
- n_ptr->last_router = tipc_max_nodes / 32;
- while ((--n_ptr->last_router >= 0) &&
- !n_ptr->routers[n_ptr->last_router]);
-}
-
-void tipc_node_remove_router(struct tipc_node *n_ptr, u32 router)
-{
- u32 r_num = tipc_node(router);
-
- if (n_ptr->last_router < 0)
- return; /* No routes */
-
- n_ptr->routers[r_num / 32] =
- ((~(1 << (r_num % 32))) & (n_ptr->routers[r_num / 32]));
- n_ptr->last_router = tipc_max_nodes / 32;
- while ((--n_ptr->last_router >= 0) &&
- !n_ptr->routers[n_ptr->last_router]);
-
- if (!tipc_node_is_up(n_ptr))
- node_lost_contact(n_ptr);
+ /* Notify subscribers and prevent re-contact with node until
+ * cleanup is done.
+ */
+ n_ptr->action_flags |= TIPC_WAIT_PEER_LINKS_DOWN |
+ TIPC_NOTIFY_NODE_DOWN;
}
struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
@@ -597,30 +332,28 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
" (network address)");
- read_lock_bh(&tipc_net_lock);
- if (!tipc_nodes) {
- read_unlock_bh(&tipc_net_lock);
+ spin_lock_bh(&node_list_lock);
+ if (!tipc_num_nodes) {
+ spin_unlock_bh(&node_list_lock);
return tipc_cfg_reply_none();
}
- /* For now, get space for all other nodes
- (will need to modify this when slave nodes are supported */
-
- payload_size = TLV_SPACE(sizeof(node_info)) * (tipc_max_nodes - 1);
+ /* For now, get space for all other nodes */
+ payload_size = TLV_SPACE(sizeof(node_info)) * tipc_num_nodes;
if (payload_size > 32768u) {
- read_unlock_bh(&tipc_net_lock);
+ spin_unlock_bh(&node_list_lock);
return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
" (too many nodes)");
}
+ spin_unlock_bh(&node_list_lock);
+
buf = tipc_cfg_reply_alloc(payload_size);
- if (!buf) {
- read_unlock_bh(&tipc_net_lock);
+ if (!buf)
return NULL;
- }
/* Add TLVs for all nodes in scope */
-
- for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
if (!tipc_in_scope(domain, n_ptr->addr))
continue;
node_info.addr = htonl(n_ptr->addr);
@@ -628,8 +361,7 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
tipc_cfg_append_tlv(buf, TIPC_TLV_NODE_INFO,
&node_info, sizeof(node_info));
}
-
- read_unlock_bh(&tipc_net_lock);
+ rcu_read_unlock();
return buf;
}
@@ -649,36 +381,32 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
" (network address)");
- if (tipc_mode != TIPC_NET_MODE)
+ if (!tipc_own_addr)
return tipc_cfg_reply_none();
- read_lock_bh(&tipc_net_lock);
-
- /* Get space for all unicast links + multicast link */
-
- payload_size = TLV_SPACE(sizeof(link_info)) *
- (tipc_net.zones[tipc_zone(tipc_own_addr)]->links + 1);
+ spin_lock_bh(&node_list_lock);
+ /* Get space for all unicast links + broadcast link */
+ payload_size = TLV_SPACE((sizeof(link_info)) * (tipc_num_links + 1));
if (payload_size > 32768u) {
- read_unlock_bh(&tipc_net_lock);
+ spin_unlock_bh(&node_list_lock);
return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
" (too many links)");
}
+ spin_unlock_bh(&node_list_lock);
+
buf = tipc_cfg_reply_alloc(payload_size);
- if (!buf) {
- read_unlock_bh(&tipc_net_lock);
+ if (!buf)
return NULL;
- }
/* Add TLV for broadcast link */
-
- link_info.dest = htonl(tipc_own_addr & 0xfffff00);
+ link_info.dest = htonl(tipc_cluster_mask(tipc_own_addr));
link_info.up = htonl(1);
strlcpy(link_info.str, tipc_bclink_name, TIPC_MAX_LINK_NAME);
tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info));
/* Add TLVs for any other links in scope */
-
- for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
u32 i;
if (!tipc_in_scope(domain, n_ptr->addr))
@@ -695,7 +423,66 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
}
tipc_node_unlock(n_ptr);
}
-
- read_unlock_bh(&tipc_net_lock);
+ rcu_read_unlock();
return buf;
}
+
+/**
+ * tipc_node_get_linkname - get the name of a link
+ *
+ * @bearer_id: id of the bearer
+ * @node: peer node address
+ * @linkname: link name output buffer
+ *
+ * Returns 0 on success
+ */
+int tipc_node_get_linkname(u32 bearer_id, u32 addr, char *linkname, size_t len)
+{
+ struct tipc_link *link;
+ struct tipc_node *node = tipc_node_find(addr);
+
+ if ((bearer_id >= MAX_BEARERS) || !node)
+ return -EINVAL;
+ tipc_node_lock(node);
+ link = node->links[bearer_id];
+ if (link) {
+ strncpy(linkname, link->name, len);
+ tipc_node_unlock(node);
+ return 0;
+ }
+ tipc_node_unlock(node);
+ return -EINVAL;
+}
+
+void tipc_node_unlock(struct tipc_node *node)
+{
+ LIST_HEAD(nsub_list);
+ struct tipc_link *link;
+ int pkt_sz = 0;
+ u32 addr = 0;
+
+ if (likely(!node->action_flags)) {
+ spin_unlock_bh(&node->lock);
+ return;
+ }
+
+ if (node->action_flags & TIPC_NOTIFY_NODE_DOWN) {
+ list_replace_init(&node->nsub, &nsub_list);
+ node->action_flags &= ~TIPC_NOTIFY_NODE_DOWN;
+ }
+ if (node->action_flags & TIPC_NOTIFY_NODE_UP) {
+ link = node->active_links[0];
+ node->action_flags &= ~TIPC_NOTIFY_NODE_UP;
+ if (link) {
+ pkt_sz = ((link->max_pkt - INT_H_SIZE) / ITEM_SIZE) *
+ ITEM_SIZE;
+ addr = node->addr;
+ }
+ }
+ spin_unlock_bh(&node->lock);
+
+ if (!list_empty(&nsub_list))
+ tipc_nodesub_notify(&nsub_list);
+ if (pkt_sz)
+ tipc_named_node_up(pkt_sz, addr);
+}
diff --git a/net/tipc/node.h b/net/tipc/node.h
index fff331b2d26..9087063793f 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -2,7 +2,7 @@
* net/tipc/node.h: Include file for TIPC node management routines
*
* Copyright (c) 2000-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005, 2010-2014, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -39,109 +39,108 @@
#include "node_subscr.h"
#include "addr.h"
-#include "cluster.h"
+#include "net.h"
#include "bearer.h"
+/*
+ * Out-of-range value for node signature
+ */
+#define INVALID_NODE_SIG 0x10000
+
+/* Flags used to take different actions according to flag type
+ * TIPC_WAIT_PEER_LINKS_DOWN: wait to see that peer's links are down
+ * TIPC_WAIT_OWN_LINKS_DOWN: wait until peer node is declared down
+ * TIPC_NOTIFY_NODE_DOWN: notify node is down
+ * TIPC_NOTIFY_NODE_UP: notify node is up
+ */
+enum {
+ TIPC_WAIT_PEER_LINKS_DOWN = (1 << 1),
+ TIPC_WAIT_OWN_LINKS_DOWN = (1 << 2),
+ TIPC_NOTIFY_NODE_DOWN = (1 << 3),
+ TIPC_NOTIFY_NODE_UP = (1 << 4)
+};
+
+/**
+ * struct tipc_node_bclink - TIPC node bclink structure
+ * @acked: sequence # of last outbound b'cast message acknowledged by node
+ * @last_in: sequence # of last in-sequence b'cast message received from node
+ * @last_sent: sequence # of last b'cast message sent by node
+ * @oos_state: state tracker for handling OOS b'cast messages
+ * @deferred_size: number of OOS b'cast messages in deferred queue
+ * @deferred_head: oldest OOS b'cast message received from node
+ * @deferred_tail: newest OOS b'cast message received from node
+ * @reasm_buf: broadcast reassembly queue head from node
+ * @recv_permitted: true if node is allowed to receive b'cast messages
+ */
+struct tipc_node_bclink {
+ u32 acked;
+ u32 last_in;
+ u32 last_sent;
+ u32 oos_state;
+ u32 deferred_size;
+ struct sk_buff *deferred_head;
+ struct sk_buff *deferred_tail;
+ struct sk_buff *reasm_buf;
+ bool recv_permitted;
+};
+
/**
* struct tipc_node - TIPC node structure
* @addr: network address of node
* @lock: spinlock governing access to structure
- * @owner: pointer to cluster that node belongs to
- * @next: pointer to next node in sorted list of cluster's nodes
- * @nsub: list of "node down" subscriptions monitoring node
+ * @hash: links to adjacent nodes in unsorted hash chain
* @active_links: pointers to active links to node
* @links: pointers to all links to node
+ * @action_flags: bit mask of different types of node actions
+ * @bclink: broadcast-related info
+ * @list: links to adjacent nodes in sorted list of cluster's nodes
* @working_links: number of working links to node (both active and standby)
- * @cleanup_required: non-zero if cleaning up after a prior loss of contact
* @link_cnt: number of links to node
- * @permit_changeover: non-zero if node has redundant links to this system
- * @routers: bitmap (used for multicluster communication)
- * @last_router: (used for multicluster communication)
- * @bclink: broadcast-related info
- * @supported: non-zero if node supports TIPC b'cast capability
- * @acked: sequence # of last outbound b'cast message acknowledged by node
- * @last_in: sequence # of last in-sequence b'cast message received from node
- * @gap_after: sequence # of last message not requiring a NAK request
- * @gap_to: sequence # of last message requiring a NAK request
- * @nack_sync: counter that determines when NAK requests should be sent
- * @deferred_head: oldest OOS b'cast message received from node
- * @deferred_tail: newest OOS b'cast message received from node
- * @defragm: list of partially reassembled b'cast message fragments from node
+ * @signature: node instance identifier
+ * @nsub: list of "node down" subscriptions monitoring node
+ * @rcu: rcu struct for tipc_node
*/
-
struct tipc_node {
u32 addr;
spinlock_t lock;
- struct cluster *owner;
- struct tipc_node *next;
- struct list_head nsub;
- struct link *active_links[2];
- struct link *links[MAX_BEARERS];
+ struct hlist_node hash;
+ struct tipc_link *active_links[2];
+ struct tipc_link *links[MAX_BEARERS];
+ unsigned int action_flags;
+ struct tipc_node_bclink bclink;
+ struct list_head list;
int link_cnt;
int working_links;
- int cleanup_required;
- int permit_changeover;
- u32 routers[512/32];
- int last_router;
- struct {
- int supported;
- u32 acked;
- u32 last_in;
- u32 gap_after;
- u32 gap_to;
- u32 nack_sync;
- struct sk_buff *deferred_head;
- struct sk_buff *deferred_tail;
- struct sk_buff *defragm;
- } bclink;
+ u32 signature;
+ struct list_head nsub;
+ struct rcu_head rcu;
};
-extern u32 tipc_own_tag;
+extern struct list_head tipc_node_list;
+struct tipc_node *tipc_node_find(u32 addr);
struct tipc_node *tipc_node_create(u32 addr);
-void tipc_node_delete(struct tipc_node *n_ptr);
-struct tipc_node *tipc_node_attach_link(struct link *l_ptr);
-void tipc_node_detach_link(struct tipc_node *n_ptr, struct link *l_ptr);
-void tipc_node_link_down(struct tipc_node *n_ptr, struct link *l_ptr);
-void tipc_node_link_up(struct tipc_node *n_ptr, struct link *l_ptr);
-int tipc_node_has_active_links(struct tipc_node *n_ptr);
-int tipc_node_has_redundant_links(struct tipc_node *n_ptr);
-u32 tipc_node_select_router(struct tipc_node *n_ptr, u32 ref);
-struct tipc_node *tipc_node_select_next_hop(u32 addr, u32 selector);
+void tipc_node_stop(void);
+void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
+void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
+void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
+void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
+int tipc_node_active_links(struct tipc_node *n_ptr);
int tipc_node_is_up(struct tipc_node *n_ptr);
-void tipc_node_add_router(struct tipc_node *n_ptr, u32 router);
-void tipc_node_remove_router(struct tipc_node *n_ptr, u32 router);
struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space);
struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space);
+int tipc_node_get_linkname(u32 bearer_id, u32 node, char *linkname, size_t len);
+void tipc_node_unlock(struct tipc_node *node);
-static inline struct tipc_node *tipc_node_find(u32 addr)
-{
- if (likely(in_own_cluster(addr)))
- return tipc_local_nodes[tipc_node(addr)];
- else if (tipc_addr_domain_valid(addr)) {
- struct cluster *c_ptr = tipc_cltr_find(addr);
-
- if (c_ptr)
- return c_ptr->nodes[tipc_node(addr)];
- }
- return NULL;
-}
-
-static inline struct tipc_node *tipc_node_select(u32 addr, u32 selector)
-{
- if (likely(in_own_cluster(addr)))
- return tipc_local_nodes[tipc_node(addr)];
- return tipc_node_select_next_hop(addr, selector);
-}
-
-static inline void tipc_node_lock(struct tipc_node *n_ptr)
+static inline void tipc_node_lock(struct tipc_node *node)
{
- spin_lock_bh(&n_ptr->lock);
+ spin_lock_bh(&node->lock);
}
-static inline void tipc_node_unlock(struct tipc_node *n_ptr)
+static inline bool tipc_node_blocked(struct tipc_node *node)
{
- spin_unlock_bh(&n_ptr->lock);
+ return (node->action_flags & (TIPC_WAIT_PEER_LINKS_DOWN |
+ TIPC_NOTIFY_NODE_DOWN | TIPC_WAIT_OWN_LINKS_DOWN));
}
#endif
diff --git a/net/tipc/node_subscr.c b/net/tipc/node_subscr.c
index 018a55332d9..7c59ab1d6ec 100644
--- a/net/tipc/node_subscr.c
+++ b/net/tipc/node_subscr.c
@@ -2,7 +2,7 @@
* net/tipc/node_subscr.c: TIPC "node down" subscription handling
*
* Copyright (c) 1995-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -41,18 +41,18 @@
/**
* tipc_nodesub_subscribe - create "node down" subscription for specified node
*/
-
void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr,
- void *usr_handle, net_ev_handler handle_down)
+ void *usr_handle, net_ev_handler handle_down)
{
- if (addr == tipc_own_addr) {
+ if (in_own_node(addr)) {
node_sub->node = NULL;
return;
}
node_sub->node = tipc_node_find(addr);
if (!node_sub->node) {
- warn("Node subscription rejected, unknown node 0x%x\n", addr);
+ pr_warn("Node subscription rejected, unknown node 0x%x\n",
+ addr);
return;
}
node_sub->handle_node_down = handle_down;
@@ -66,7 +66,6 @@ void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr,
/**
* tipc_nodesub_unsubscribe - cancel "node down" subscription (if any)
*/
-
void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub)
{
if (!node_sub->node)
@@ -76,3 +75,20 @@ void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub)
list_del_init(&node_sub->nodesub_list);
tipc_node_unlock(node_sub->node);
}
+
+/**
+ * tipc_nodesub_notify - notify subscribers that a node is unreachable
+ *
+ * Note: node is locked by caller
+ */
+void tipc_nodesub_notify(struct list_head *nsub_list)
+{
+ struct tipc_node_subscr *ns, *safe;
+
+ list_for_each_entry_safe(ns, safe, nsub_list, nodesub_list) {
+ if (ns->handle_node_down) {
+ ns->handle_node_down(ns->usr_handle);
+ ns->handle_node_down = NULL;
+ }
+ }
+}
diff --git a/net/tipc/node_subscr.h b/net/tipc/node_subscr.h
index 006ed739f51..d91b8cc81e3 100644
--- a/net/tipc/node_subscr.h
+++ b/net/tipc/node_subscr.h
@@ -2,7 +2,7 @@
* net/tipc/node_subscr.h: Include file for TIPC "node down" subscription handling
*
* Copyright (c) 1995-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -48,7 +48,6 @@ typedef void (*net_ev_handler) (void *usr_handle);
* @usr_handle: argument to pass to routine when node fails
* @nodesub_list: adjacent entries in list of subscriptions for the node
*/
-
struct tipc_node_subscr {
struct tipc_node *node;
net_ev_handler handle_node_down;
@@ -59,5 +58,6 @@ struct tipc_node_subscr {
void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr,
void *usr_handle, net_ev_handler handle_down);
void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub);
+void tipc_nodesub_notify(struct list_head *nsub_list);
#endif
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 7873283f496..5fd7acce01e 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -1,8 +1,8 @@
/*
* net/tipc/port.c: TIPC port code
*
- * Copyright (c) 1992-2007, Ericsson AB
- * Copyright (c) 2004-2008, Wind River Systems
+ * Copyright (c) 1992-2007, 2014, Ericsson AB
+ * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -38,7 +38,7 @@
#include "config.h"
#include "port.h"
#include "name_table.h"
-#include "user_reg.h"
+#include "socket.h"
/* Connection management: */
#define PROBING_INTERVAL 3600000 /* [ms] => 1 h */
@@ -47,101 +47,89 @@
#define MAX_REJECT_SIZE 1024
-static struct sk_buff *msg_queue_head = NULL;
-static struct sk_buff *msg_queue_tail = NULL;
-
DEFINE_SPINLOCK(tipc_port_list_lock);
-static DEFINE_SPINLOCK(queue_lock);
static LIST_HEAD(ports);
static void port_handle_node_down(unsigned long ref);
-static struct sk_buff* port_build_self_abort_msg(struct port *,u32 err);
-static struct sk_buff* port_build_peer_abort_msg(struct port *,u32 err);
+static struct sk_buff *port_build_self_abort_msg(struct tipc_port *, u32 err);
+static struct sk_buff *port_build_peer_abort_msg(struct tipc_port *, u32 err);
static void port_timeout(unsigned long ref);
-
-static u32 port_peernode(struct port *p_ptr)
-{
- return msg_destnode(&p_ptr->publ.phdr);
-}
-
-static u32 port_peerport(struct port *p_ptr)
-{
- return msg_destport(&p_ptr->publ.phdr);
-}
-
-static u32 port_out_seqno(struct port *p_ptr)
+/**
+ * tipc_port_peer_msg - verify message was sent by connected port's peer
+ *
+ * Handles cases where the node's network address has changed from
+ * the default of <0.0.0> to its configured setting.
+ */
+int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg)
{
- return msg_transp_seqno(&p_ptr->publ.phdr);
-}
+ u32 peernode;
+ u32 orignode;
-static void port_incr_out_seqno(struct port *p_ptr)
-{
- struct tipc_msg *m = &p_ptr->publ.phdr;
+ if (msg_origport(msg) != tipc_port_peerport(p_ptr))
+ return 0;
- if (likely(!msg_routed(m)))
- return;
- msg_set_transp_seqno(m, (msg_transp_seqno(m) + 1));
+ orignode = msg_orignode(msg);
+ peernode = tipc_port_peernode(p_ptr);
+ return (orignode == peernode) ||
+ (!orignode && (peernode == tipc_own_addr)) ||
+ (!peernode && (orignode == tipc_own_addr));
}
/**
- * tipc_multicast - send a multicast message to local and remote destinations
+ * tipc_port_mcast_xmit - send a multicast message to local and remote
+ * destinations
*/
-
-int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
- u32 num_sect, struct iovec const *msg_sect)
+int tipc_port_mcast_xmit(struct tipc_port *oport,
+ struct tipc_name_seq const *seq,
+ struct iovec const *msg_sect,
+ unsigned int len)
{
struct tipc_msg *hdr;
struct sk_buff *buf;
struct sk_buff *ibuf = NULL;
- struct port_list dports = {0, NULL, };
- struct port *oport = tipc_port_deref(ref);
+ struct tipc_port_list dports = {0, NULL, };
int ext_targets;
int res;
- if (unlikely(!oport))
- return -EINVAL;
-
/* Create multicast message */
-
- hdr = &oport->publ.phdr;
+ hdr = &oport->phdr;
msg_set_type(hdr, TIPC_MCAST_MSG);
+ msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
+ msg_set_destport(hdr, 0);
+ msg_set_destnode(hdr, 0);
msg_set_nametype(hdr, seq->type);
msg_set_namelower(hdr, seq->lower);
msg_set_nameupper(hdr, seq->upper);
msg_set_hdr_sz(hdr, MCAST_H_SIZE);
- res = tipc_msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE,
- !oport->user_port, &buf);
+ res = tipc_msg_build(hdr, msg_sect, len, MAX_MSG_SIZE, &buf);
if (unlikely(!buf))
return res;
/* Figure out where to send multicast message */
-
ext_targets = tipc_nametbl_mc_translate(seq->type, seq->lower, seq->upper,
TIPC_NODE_SCOPE, &dports);
/* Send message to destinations (duplicate it only if necessary) */
-
if (ext_targets) {
if (dports.count != 0) {
ibuf = skb_copy(buf, GFP_ATOMIC);
if (ibuf == NULL) {
tipc_port_list_free(&dports);
- buf_discard(buf);
+ kfree_skb(buf);
return -ENOMEM;
}
}
- res = tipc_bclink_send_msg(buf);
- if ((res < 0) && (dports.count != 0)) {
- buf_discard(ibuf);
- }
+ res = tipc_bclink_xmit(buf);
+ if ((res < 0) && (dports.count != 0))
+ kfree_skb(ibuf);
} else {
ibuf = buf;
}
if (res >= 0) {
if (ibuf)
- tipc_port_recv_mcast(ibuf, &dports);
+ tipc_port_mcast_rcv(ibuf, &dports);
} else {
tipc_port_list_free(&dports);
}
@@ -149,22 +137,20 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
}
/**
- * tipc_port_recv_mcast - deliver multicast message to all destination ports
+ * tipc_port_mcast_rcv - deliver multicast message to all destination ports
*
* If there is no port list, perform a lookup to create one
*/
-
-void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
+void tipc_port_mcast_rcv(struct sk_buff *buf, struct tipc_port_list *dp)
{
- struct tipc_msg* msg;
- struct port_list dports = {0, NULL, };
- struct port_list *item = dp;
+ struct tipc_msg *msg;
+ struct tipc_port_list dports = {0, NULL, };
+ struct tipc_port_list *item = dp;
int cnt = 0;
msg = buf_msg(buf);
/* Create destination port list, if one wasn't supplied */
-
if (dp == NULL) {
tipc_nametbl_mc_translate(msg_nametype(msg),
msg_namelower(msg),
@@ -175,11 +161,11 @@ void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
}
/* Deliver a copy of message to each destination port */
-
if (dp->count != 0) {
+ msg_set_destnode(msg, tipc_own_addr);
if (dp->count == 1) {
msg_set_destport(msg, dp->ports[0]);
- tipc_port_recv_msg(buf);
+ tipc_sk_rcv(buf);
tipc_port_list_free(dp);
return;
}
@@ -188,187 +174,107 @@ void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
struct sk_buff *b = skb_clone(buf, GFP_ATOMIC);
if (b == NULL) {
- warn("Unable to deliver multicast message(s)\n");
- msg_dbg(msg, "LOST:");
+ pr_warn("Unable to deliver multicast message(s)\n");
goto exit;
}
- if ((index == 0) && (cnt != 0)) {
+ if ((index == 0) && (cnt != 0))
item = item->next;
- }
- msg_set_destport(buf_msg(b),item->ports[index]);
- tipc_port_recv_msg(b);
+ msg_set_destport(buf_msg(b), item->ports[index]);
+ tipc_sk_rcv(b);
}
}
exit:
- buf_discard(buf);
+ kfree_skb(buf);
tipc_port_list_free(dp);
}
-/**
- * tipc_createport_raw - create a generic TIPC port
+
+void tipc_port_wakeup(struct tipc_port *port)
+{
+ tipc_sock_wakeup(tipc_port_to_sock(port));
+}
+
+/* tipc_port_init - intiate TIPC port and lock it
*
- * Returns pointer to (locked) TIPC port, or NULL if unable to create it
+ * Returns obtained reference if initialization is successful, zero otherwise
*/
-
-struct tipc_port *tipc_createport_raw(void *usr_handle,
- u32 (*dispatcher)(struct tipc_port *, struct sk_buff *),
- void (*wakeup)(struct tipc_port *),
- const u32 importance)
+u32 tipc_port_init(struct tipc_port *p_ptr,
+ const unsigned int importance)
{
- struct port *p_ptr;
struct tipc_msg *msg;
u32 ref;
- p_ptr = kzalloc(sizeof(*p_ptr), GFP_ATOMIC);
- if (!p_ptr) {
- warn("Port creation failed, no memory\n");
- return NULL;
- }
- ref = tipc_ref_acquire(p_ptr, &p_ptr->publ.lock);
+ ref = tipc_ref_acquire(p_ptr, &p_ptr->lock);
if (!ref) {
- warn("Port creation failed, reference table exhausted\n");
- kfree(p_ptr);
- return NULL;
+ pr_warn("Port registration failed, ref. table exhausted\n");
+ return 0;
}
- p_ptr->publ.usr_handle = usr_handle;
- p_ptr->publ.max_pkt = MAX_PKT_DEFAULT;
- p_ptr->publ.ref = ref;
- msg = &p_ptr->publ.phdr;
- tipc_msg_init(msg, importance, TIPC_NAMED_MSG, LONG_H_SIZE, 0);
- msg_set_origport(msg, ref);
- p_ptr->last_in_seqno = 41;
- p_ptr->sent = 1;
+ p_ptr->max_pkt = MAX_PKT_DEFAULT;
+ p_ptr->ref = ref;
INIT_LIST_HEAD(&p_ptr->wait_list);
INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list);
- p_ptr->dispatcher = dispatcher;
- p_ptr->wakeup = wakeup;
- p_ptr->user_port = NULL;
k_init_timer(&p_ptr->timer, (Handler)port_timeout, ref);
- spin_lock_bh(&tipc_port_list_lock);
INIT_LIST_HEAD(&p_ptr->publications);
INIT_LIST_HEAD(&p_ptr->port_list);
+
+ /*
+ * Must hold port list lock while initializing message header template
+ * to ensure a change to node's own network address doesn't result
+ * in template containing out-dated network address information
+ */
+ spin_lock_bh(&tipc_port_list_lock);
+ msg = &p_ptr->phdr;
+ tipc_msg_init(msg, importance, TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
+ msg_set_origport(msg, ref);
list_add_tail(&p_ptr->port_list, &ports);
spin_unlock_bh(&tipc_port_list_lock);
- return &(p_ptr->publ);
+ return ref;
}
-int tipc_deleteport(u32 ref)
+void tipc_port_destroy(struct tipc_port *p_ptr)
{
- struct port *p_ptr;
struct sk_buff *buf = NULL;
- tipc_withdraw(ref, 0, NULL);
- p_ptr = tipc_port_lock(ref);
- if (!p_ptr)
- return -EINVAL;
+ tipc_withdraw(p_ptr, 0, NULL);
- tipc_ref_discard(ref);
- tipc_port_unlock(p_ptr);
+ spin_lock_bh(p_ptr->lock);
+ tipc_ref_discard(p_ptr->ref);
+ spin_unlock_bh(p_ptr->lock);
k_cancel_timer(&p_ptr->timer);
- if (p_ptr->publ.connected) {
+ if (p_ptr->connected) {
buf = port_build_peer_abort_msg(p_ptr, TIPC_ERR_NO_PORT);
tipc_nodesub_unsubscribe(&p_ptr->subscription);
}
- if (p_ptr->user_port) {
- tipc_reg_remove_port(p_ptr->user_port);
- kfree(p_ptr->user_port);
- }
spin_lock_bh(&tipc_port_list_lock);
list_del(&p_ptr->port_list);
list_del(&p_ptr->wait_list);
spin_unlock_bh(&tipc_port_list_lock);
k_term_timer(&p_ptr->timer);
- kfree(p_ptr);
- dbg("Deleted port %u\n", ref);
tipc_net_route_msg(buf);
- return 0;
-}
-
-static int port_unreliable(struct port *p_ptr)
-{
- return msg_src_droppable(&p_ptr->publ.phdr);
-}
-
-int tipc_portunreliable(u32 ref, unsigned int *isunreliable)
-{
- struct port *p_ptr;
-
- p_ptr = tipc_port_lock(ref);
- if (!p_ptr)
- return -EINVAL;
- *isunreliable = port_unreliable(p_ptr);
- tipc_port_unlock(p_ptr);
- return 0;
-}
-
-int tipc_set_portunreliable(u32 ref, unsigned int isunreliable)
-{
- struct port *p_ptr;
-
- p_ptr = tipc_port_lock(ref);
- if (!p_ptr)
- return -EINVAL;
- msg_set_src_droppable(&p_ptr->publ.phdr, (isunreliable != 0));
- tipc_port_unlock(p_ptr);
- return 0;
-}
-
-static int port_unreturnable(struct port *p_ptr)
-{
- return msg_dest_droppable(&p_ptr->publ.phdr);
-}
-
-int tipc_portunreturnable(u32 ref, unsigned int *isunrejectable)
-{
- struct port *p_ptr;
-
- p_ptr = tipc_port_lock(ref);
- if (!p_ptr)
- return -EINVAL;
- *isunrejectable = port_unreturnable(p_ptr);
- tipc_port_unlock(p_ptr);
- return 0;
-}
-
-int tipc_set_portunreturnable(u32 ref, unsigned int isunrejectable)
-{
- struct port *p_ptr;
-
- p_ptr = tipc_port_lock(ref);
- if (!p_ptr)
- return -EINVAL;
- msg_set_dest_droppable(&p_ptr->publ.phdr, (isunrejectable != 0));
- tipc_port_unlock(p_ptr);
- return 0;
}
/*
- * port_build_proto_msg(): build a port level protocol
- * or a connection abortion message. Called with
- * tipc_port lock on.
+ * port_build_proto_msg(): create connection protocol message for port
+ *
+ * On entry the port must be locked and connected.
*/
-static struct sk_buff *port_build_proto_msg(u32 destport, u32 destnode,
- u32 origport, u32 orignode,
- u32 usr, u32 type, u32 err,
- u32 seqno, u32 ack)
+static struct sk_buff *port_build_proto_msg(struct tipc_port *p_ptr,
+ u32 type, u32 ack)
{
struct sk_buff *buf;
struct tipc_msg *msg;
- buf = tipc_buf_acquire(LONG_H_SIZE);
+ buf = tipc_buf_acquire(INT_H_SIZE);
if (buf) {
msg = buf_msg(buf);
- tipc_msg_init(msg, usr, type, LONG_H_SIZE, destnode);
- msg_set_errcode(msg, err);
- msg_set_destport(msg, destport);
- msg_set_origport(msg, origport);
- msg_set_orignode(msg, orignode);
- msg_set_transp_seqno(msg, seqno);
+ tipc_msg_init(msg, CONN_MANAGER, type, INT_H_SIZE,
+ tipc_port_peernode(p_ptr));
+ msg_set_destport(msg, tipc_port_peerport(p_ptr));
+ msg_set_origport(msg, p_ptr->ref);
msg_set_msgcnt(msg, ack);
- msg_dbg(msg, "PORT>SEND>:");
}
return buf;
}
@@ -379,75 +285,80 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
struct sk_buff *rbuf;
struct tipc_msg *rmsg;
int hdr_sz;
- u32 imp = msg_importance(msg);
+ u32 imp;
u32 data_sz = msg_data_sz(msg);
-
- if (data_sz > MAX_REJECT_SIZE)
- data_sz = MAX_REJECT_SIZE;
- if (msg_connected(msg) && (imp < TIPC_CRITICAL_IMPORTANCE))
- imp++;
- msg_dbg(msg, "port->rej: ");
+ u32 src_node;
+ u32 rmsg_sz;
/* discard rejected message if it shouldn't be returned to sender */
- if (msg_errcode(msg) || msg_dest_droppable(msg)) {
- buf_discard(buf);
- return data_sz;
+ if (WARN(!msg_isdata(msg),
+ "attempt to reject message with user=%u", msg_user(msg))) {
+ dump_stack();
+ goto exit;
}
+ if (msg_errcode(msg) || msg_dest_droppable(msg))
+ goto exit;
+
+ /*
+ * construct returned message by copying rejected message header and
+ * data (or subset), then updating header fields that need adjusting
+ */
+ hdr_sz = msg_hdr_sz(msg);
+ rmsg_sz = hdr_sz + min_t(u32, data_sz, MAX_REJECT_SIZE);
+
+ rbuf = tipc_buf_acquire(rmsg_sz);
+ if (rbuf == NULL)
+ goto exit;
- /* construct rejected message */
- if (msg_mcast(msg))
- hdr_sz = MCAST_H_SIZE;
- else
- hdr_sz = LONG_H_SIZE;
- rbuf = tipc_buf_acquire(data_sz + hdr_sz);
- if (rbuf == NULL) {
- buf_discard(buf);
- return data_sz;
- }
rmsg = buf_msg(rbuf);
- tipc_msg_init(rmsg, imp, msg_type(msg), hdr_sz, msg_orignode(msg));
- msg_set_errcode(rmsg, err);
- msg_set_destport(rmsg, msg_origport(msg));
- msg_set_origport(rmsg, msg_destport(msg));
- if (msg_short(msg)) {
- msg_set_orignode(rmsg, tipc_own_addr);
- /* leave name type & instance as zeroes */
- } else {
- msg_set_orignode(rmsg, msg_destnode(msg));
- msg_set_nametype(rmsg, msg_nametype(msg));
- msg_set_nameinst(rmsg, msg_nameinst(msg));
+ skb_copy_to_linear_data(rbuf, msg, rmsg_sz);
+
+ if (msg_connected(rmsg)) {
+ imp = msg_importance(rmsg);
+ if (imp < TIPC_CRITICAL_IMPORTANCE)
+ msg_set_importance(rmsg, ++imp);
}
- msg_set_size(rmsg, data_sz + hdr_sz);
- skb_copy_to_linear_data_offset(rbuf, hdr_sz, msg_data(msg), data_sz);
+ msg_set_non_seq(rmsg, 0);
+ msg_set_size(rmsg, rmsg_sz);
+ msg_set_errcode(rmsg, err);
+ msg_set_prevnode(rmsg, tipc_own_addr);
+ msg_swap_words(rmsg, 4, 5);
+ if (!msg_short(rmsg))
+ msg_swap_words(rmsg, 6, 7);
/* send self-abort message when rejecting on a connected port */
if (msg_connected(msg)) {
- struct sk_buff *abuf = NULL;
- struct port *p_ptr = tipc_port_lock(msg_destport(msg));
+ struct tipc_port *p_ptr = tipc_port_lock(msg_destport(msg));
if (p_ptr) {
- if (p_ptr->publ.connected)
+ struct sk_buff *abuf = NULL;
+
+ if (p_ptr->connected)
abuf = port_build_self_abort_msg(p_ptr, err);
tipc_port_unlock(p_ptr);
+ tipc_net_route_msg(abuf);
}
- tipc_net_route_msg(abuf);
}
- /* send rejected message */
- buf_discard(buf);
- tipc_net_route_msg(rbuf);
+ /* send returned message & dispose of rejected message */
+ src_node = msg_prevnode(msg);
+ if (in_own_node(src_node))
+ tipc_sk_rcv(rbuf);
+ else
+ tipc_link_xmit(rbuf, src_node, msg_link_selector(rmsg));
+exit:
+ kfree_skb(buf);
return data_sz;
}
-int tipc_port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
- struct iovec const *msg_sect, u32 num_sect,
- int err)
+int tipc_port_iovec_reject(struct tipc_port *p_ptr, struct tipc_msg *hdr,
+ struct iovec const *msg_sect, unsigned int len,
+ int err)
{
struct sk_buff *buf;
int res;
- res = tipc_msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE,
- !p_ptr->user_port, &buf);
+ res = tipc_msg_build(hdr, msg_sect, len, MAX_MSG_SIZE, &buf);
if (!buf)
return res;
@@ -456,13 +367,13 @@ int tipc_port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
static void port_timeout(unsigned long ref)
{
- struct port *p_ptr = tipc_port_lock(ref);
+ struct tipc_port *p_ptr = tipc_port_lock(ref);
struct sk_buff *buf = NULL;
if (!p_ptr)
return;
- if (!p_ptr->publ.connected) {
+ if (!p_ptr->connected) {
tipc_port_unlock(p_ptr);
return;
}
@@ -471,16 +382,7 @@ static void port_timeout(unsigned long ref)
if (p_ptr->probing_state == PROBING) {
buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_PORT);
} else {
- buf = port_build_proto_msg(port_peerport(p_ptr),
- port_peernode(p_ptr),
- p_ptr->publ.ref,
- tipc_own_addr,
- CONN_MANAGER,
- CONN_PROBE,
- TIPC_OK,
- port_out_seqno(p_ptr),
- 0);
- port_incr_out_seqno(p_ptr);
+ buf = port_build_proto_msg(p_ptr, CONN_PROBE, 0);
p_ptr->probing_state = PROBING;
k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
}
@@ -491,8 +393,8 @@ static void port_timeout(unsigned long ref)
static void port_handle_node_down(unsigned long ref)
{
- struct port *p_ptr = tipc_port_lock(ref);
- struct sk_buff* buf = NULL;
+ struct tipc_port *p_ptr = tipc_port_lock(ref);
+ struct sk_buff *buf = NULL;
if (!p_ptr)
return;
@@ -502,174 +404,159 @@ static void port_handle_node_down(unsigned long ref)
}
-static struct sk_buff *port_build_self_abort_msg(struct port *p_ptr, u32 err)
+static struct sk_buff *port_build_self_abort_msg(struct tipc_port *p_ptr, u32 err)
{
- u32 imp = msg_importance(&p_ptr->publ.phdr);
+ struct sk_buff *buf = port_build_peer_abort_msg(p_ptr, err);
- if (!p_ptr->publ.connected)
- return NULL;
- if (imp < TIPC_CRITICAL_IMPORTANCE)
- imp++;
- return port_build_proto_msg(p_ptr->publ.ref,
- tipc_own_addr,
- port_peerport(p_ptr),
- port_peernode(p_ptr),
- imp,
- TIPC_CONN_MSG,
- err,
- p_ptr->last_in_seqno + 1,
- 0);
+ if (buf) {
+ struct tipc_msg *msg = buf_msg(buf);
+ msg_swap_words(msg, 4, 5);
+ msg_swap_words(msg, 6, 7);
+ }
+ return buf;
}
-static struct sk_buff *port_build_peer_abort_msg(struct port *p_ptr, u32 err)
+static struct sk_buff *port_build_peer_abort_msg(struct tipc_port *p_ptr, u32 err)
{
- u32 imp = msg_importance(&p_ptr->publ.phdr);
+ struct sk_buff *buf;
+ struct tipc_msg *msg;
+ u32 imp;
- if (!p_ptr->publ.connected)
+ if (!p_ptr->connected)
return NULL;
- if (imp < TIPC_CRITICAL_IMPORTANCE)
- imp++;
- return port_build_proto_msg(port_peerport(p_ptr),
- port_peernode(p_ptr),
- p_ptr->publ.ref,
- tipc_own_addr,
- imp,
- TIPC_CONN_MSG,
- err,
- port_out_seqno(p_ptr),
- 0);
+
+ buf = tipc_buf_acquire(BASIC_H_SIZE);
+ if (buf) {
+ msg = buf_msg(buf);
+ memcpy(msg, &p_ptr->phdr, BASIC_H_SIZE);
+ msg_set_hdr_sz(msg, BASIC_H_SIZE);
+ msg_set_size(msg, BASIC_H_SIZE);
+ imp = msg_importance(msg);
+ if (imp < TIPC_CRITICAL_IMPORTANCE)
+ msg_set_importance(msg, ++imp);
+ msg_set_errcode(msg, err);
+ }
+ return buf;
}
-void tipc_port_recv_proto_msg(struct sk_buff *buf)
+void tipc_port_proto_rcv(struct sk_buff *buf)
{
struct tipc_msg *msg = buf_msg(buf);
- struct port *p_ptr = tipc_port_lock(msg_destport(msg));
- u32 err = TIPC_OK;
+ struct tipc_port *p_ptr;
struct sk_buff *r_buf = NULL;
- struct sk_buff *abort_buf = NULL;
-
- msg_dbg(msg, "PORT<RECV<:");
-
- if (!p_ptr) {
- err = TIPC_ERR_NO_PORT;
- } else if (p_ptr->publ.connected) {
- if ((port_peernode(p_ptr) != msg_orignode(msg)) ||
- (port_peerport(p_ptr) != msg_origport(msg))) {
- err = TIPC_ERR_NO_PORT;
- } else if (msg_type(msg) == CONN_ACK) {
- int wakeup = tipc_port_congested(p_ptr) &&
- p_ptr->publ.congested &&
- p_ptr->wakeup;
- p_ptr->acked += msg_msgcnt(msg);
- if (tipc_port_congested(p_ptr))
- goto exit;
- p_ptr->publ.congested = 0;
- if (!wakeup)
- goto exit;
- p_ptr->wakeup(&p_ptr->publ);
- goto exit;
+ u32 destport = msg_destport(msg);
+ int wakeable;
+
+ /* Validate connection */
+ p_ptr = tipc_port_lock(destport);
+ if (!p_ptr || !p_ptr->connected || !tipc_port_peer_msg(p_ptr, msg)) {
+ r_buf = tipc_buf_acquire(BASIC_H_SIZE);
+ if (r_buf) {
+ msg = buf_msg(r_buf);
+ tipc_msg_init(msg, TIPC_HIGH_IMPORTANCE, TIPC_CONN_MSG,
+ BASIC_H_SIZE, msg_orignode(msg));
+ msg_set_errcode(msg, TIPC_ERR_NO_PORT);
+ msg_set_origport(msg, destport);
+ msg_set_destport(msg, msg_origport(msg));
}
- } else if (p_ptr->publ.published) {
- err = TIPC_ERR_NO_PORT;
- }
- if (err) {
- r_buf = port_build_proto_msg(msg_origport(msg),
- msg_orignode(msg),
- msg_destport(msg),
- tipc_own_addr,
- TIPC_HIGH_IMPORTANCE,
- TIPC_CONN_MSG,
- err,
- 0,
- 0);
+ if (p_ptr)
+ tipc_port_unlock(p_ptr);
goto exit;
}
- /* All is fine */
- if (msg_type(msg) == CONN_PROBE) {
- r_buf = port_build_proto_msg(msg_origport(msg),
- msg_orignode(msg),
- msg_destport(msg),
- tipc_own_addr,
- CONN_MANAGER,
- CONN_PROBE_REPLY,
- TIPC_OK,
- port_out_seqno(p_ptr),
- 0);
+ /* Process protocol message sent by peer */
+ switch (msg_type(msg)) {
+ case CONN_ACK:
+ wakeable = tipc_port_congested(p_ptr) && p_ptr->congested;
+ p_ptr->acked += msg_msgcnt(msg);
+ if (!tipc_port_congested(p_ptr)) {
+ p_ptr->congested = 0;
+ if (wakeable)
+ tipc_port_wakeup(p_ptr);
+ }
+ break;
+ case CONN_PROBE:
+ r_buf = port_build_proto_msg(p_ptr, CONN_PROBE_REPLY, 0);
+ break;
+ default:
+ /* CONN_PROBE_REPLY or unrecognized - no action required */
+ break;
}
p_ptr->probing_state = CONFIRMED;
- port_incr_out_seqno(p_ptr);
+ tipc_port_unlock(p_ptr);
exit:
- if (p_ptr)
- tipc_port_unlock(p_ptr);
tipc_net_route_msg(r_buf);
- tipc_net_route_msg(abort_buf);
- buf_discard(buf);
+ kfree_skb(buf);
}
-static void port_print(struct port *p_ptr, struct print_buf *buf, int full_id)
+static int port_print(struct tipc_port *p_ptr, char *buf, int len, int full_id)
{
struct publication *publ;
+ int ret;
if (full_id)
- tipc_printf(buf, "<%u.%u.%u:%u>:",
- tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
- tipc_node(tipc_own_addr), p_ptr->publ.ref);
+ ret = tipc_snprintf(buf, len, "<%u.%u.%u:%u>:",
+ tipc_zone(tipc_own_addr),
+ tipc_cluster(tipc_own_addr),
+ tipc_node(tipc_own_addr), p_ptr->ref);
else
- tipc_printf(buf, "%-10u:", p_ptr->publ.ref);
-
- if (p_ptr->publ.connected) {
- u32 dport = port_peerport(p_ptr);
- u32 destnode = port_peernode(p_ptr);
-
- tipc_printf(buf, " connected to <%u.%u.%u:%u>",
- tipc_zone(destnode), tipc_cluster(destnode),
- tipc_node(destnode), dport);
- if (p_ptr->publ.conn_type != 0)
- tipc_printf(buf, " via {%u,%u}",
- p_ptr->publ.conn_type,
- p_ptr->publ.conn_instance);
- }
- else if (p_ptr->publ.published) {
- tipc_printf(buf, " bound to");
+ ret = tipc_snprintf(buf, len, "%-10u:", p_ptr->ref);
+
+ if (p_ptr->connected) {
+ u32 dport = tipc_port_peerport(p_ptr);
+ u32 destnode = tipc_port_peernode(p_ptr);
+
+ ret += tipc_snprintf(buf + ret, len - ret,
+ " connected to <%u.%u.%u:%u>",
+ tipc_zone(destnode),
+ tipc_cluster(destnode),
+ tipc_node(destnode), dport);
+ if (p_ptr->conn_type != 0)
+ ret += tipc_snprintf(buf + ret, len - ret,
+ " via {%u,%u}", p_ptr->conn_type,
+ p_ptr->conn_instance);
+ } else if (p_ptr->published) {
+ ret += tipc_snprintf(buf + ret, len - ret, " bound to");
list_for_each_entry(publ, &p_ptr->publications, pport_list) {
if (publ->lower == publ->upper)
- tipc_printf(buf, " {%u,%u}", publ->type,
- publ->lower);
+ ret += tipc_snprintf(buf + ret, len - ret,
+ " {%u,%u}", publ->type,
+ publ->lower);
else
- tipc_printf(buf, " {%u,%u,%u}", publ->type,
- publ->lower, publ->upper);
+ ret += tipc_snprintf(buf + ret, len - ret,
+ " {%u,%u,%u}", publ->type,
+ publ->lower, publ->upper);
}
}
- tipc_printf(buf, "\n");
+ ret += tipc_snprintf(buf + ret, len - ret, "\n");
+ return ret;
}
-#define MAX_PORT_QUERY 32768
-
struct sk_buff *tipc_port_get_ports(void)
{
struct sk_buff *buf;
struct tlv_desc *rep_tlv;
- struct print_buf pb;
- struct port *p_ptr;
- int str_len;
+ char *pb;
+ int pb_len;
+ struct tipc_port *p_ptr;
+ int str_len = 0;
- buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_PORT_QUERY));
+ buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
if (!buf)
return NULL;
rep_tlv = (struct tlv_desc *)buf->data;
+ pb = TLV_DATA(rep_tlv);
+ pb_len = ULTRA_STRING_MAX_LEN;
- tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_QUERY);
spin_lock_bh(&tipc_port_list_lock);
list_for_each_entry(p_ptr, &ports, port_list) {
- spin_lock_bh(p_ptr->publ.lock);
- port_print(p_ptr, &pb, 0);
- spin_unlock_bh(p_ptr->publ.lock);
+ spin_lock_bh(p_ptr->lock);
+ str_len += port_print(p_ptr, pb, pb_len, 0);
+ spin_unlock_bh(p_ptr->lock);
}
spin_unlock_bh(&tipc_port_list_lock);
- str_len = tipc_printbuf_validate(&pb);
-
+ str_len += 1; /* for "\0" */
skb_put(buf, TLV_SPACE(str_len));
TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
@@ -678,387 +565,64 @@ struct sk_buff *tipc_port_get_ports(void)
void tipc_port_reinit(void)
{
- struct port *p_ptr;
+ struct tipc_port *p_ptr;
struct tipc_msg *msg;
spin_lock_bh(&tipc_port_list_lock);
list_for_each_entry(p_ptr, &ports, port_list) {
- msg = &p_ptr->publ.phdr;
- if (msg_orignode(msg) == tipc_own_addr)
- break;
+ msg = &p_ptr->phdr;
msg_set_prevnode(msg, tipc_own_addr);
msg_set_orignode(msg, tipc_own_addr);
}
spin_unlock_bh(&tipc_port_list_lock);
}
-
-/*
- * port_dispatcher_sigh(): Signal handler for messages destinated
- * to the tipc_port interface.
- */
-
-static void port_dispatcher_sigh(void *dummy)
-{
- struct sk_buff *buf;
-
- spin_lock_bh(&queue_lock);
- buf = msg_queue_head;
- msg_queue_head = NULL;
- spin_unlock_bh(&queue_lock);
-
- while (buf) {
- struct port *p_ptr;
- struct user_port *up_ptr;
- struct tipc_portid orig;
- struct tipc_name_seq dseq;
- void *usr_handle;
- int connected;
- int published;
- u32 message_type;
-
- struct sk_buff *next = buf->next;
- struct tipc_msg *msg = buf_msg(buf);
- u32 dref = msg_destport(msg);
-
- message_type = msg_type(msg);
- if (message_type > TIPC_DIRECT_MSG)
- goto reject; /* Unsupported message type */
-
- p_ptr = tipc_port_lock(dref);
- if (!p_ptr)
- goto reject; /* Port deleted while msg in queue */
-
- orig.ref = msg_origport(msg);
- orig.node = msg_orignode(msg);
- up_ptr = p_ptr->user_port;
- usr_handle = up_ptr->usr_handle;
- connected = p_ptr->publ.connected;
- published = p_ptr->publ.published;
-
- if (unlikely(msg_errcode(msg)))
- goto err;
-
- switch (message_type) {
-
- case TIPC_CONN_MSG:{
- tipc_conn_msg_event cb = up_ptr->conn_msg_cb;
- u32 peer_port = port_peerport(p_ptr);
- u32 peer_node = port_peernode(p_ptr);
-
- tipc_port_unlock(p_ptr);
- if (unlikely(!cb))
- goto reject;
- if (unlikely(!connected)) {
- if (tipc_connect2port(dref, &orig))
- goto reject;
- } else if ((msg_origport(msg) != peer_port) ||
- (msg_orignode(msg) != peer_node))
- goto reject;
- if (unlikely(++p_ptr->publ.conn_unacked >=
- TIPC_FLOW_CONTROL_WIN))
- tipc_acknowledge(dref,
- p_ptr->publ.conn_unacked);
- skb_pull(buf, msg_hdr_sz(msg));
- cb(usr_handle, dref, &buf, msg_data(msg),
- msg_data_sz(msg));
- break;
- }
- case TIPC_DIRECT_MSG:{
- tipc_msg_event cb = up_ptr->msg_cb;
-
- tipc_port_unlock(p_ptr);
- if (unlikely(!cb || connected))
- goto reject;
- skb_pull(buf, msg_hdr_sz(msg));
- cb(usr_handle, dref, &buf, msg_data(msg),
- msg_data_sz(msg), msg_importance(msg),
- &orig);
- break;
- }
- case TIPC_MCAST_MSG:
- case TIPC_NAMED_MSG:{
- tipc_named_msg_event cb = up_ptr->named_msg_cb;
-
- tipc_port_unlock(p_ptr);
- if (unlikely(!cb || connected || !published))
- goto reject;
- dseq.type = msg_nametype(msg);
- dseq.lower = msg_nameinst(msg);
- dseq.upper = (message_type == TIPC_NAMED_MSG)
- ? dseq.lower : msg_nameupper(msg);
- skb_pull(buf, msg_hdr_sz(msg));
- cb(usr_handle, dref, &buf, msg_data(msg),
- msg_data_sz(msg), msg_importance(msg),
- &orig, &dseq);
- break;
- }
- }
- if (buf)
- buf_discard(buf);
- buf = next;
- continue;
-err:
- switch (message_type) {
-
- case TIPC_CONN_MSG:{
- tipc_conn_shutdown_event cb =
- up_ptr->conn_err_cb;
- u32 peer_port = port_peerport(p_ptr);
- u32 peer_node = port_peernode(p_ptr);
-
- tipc_port_unlock(p_ptr);
- if (!cb || !connected)
- break;
- if ((msg_origport(msg) != peer_port) ||
- (msg_orignode(msg) != peer_node))
- break;
- tipc_disconnect(dref);
- skb_pull(buf, msg_hdr_sz(msg));
- cb(usr_handle, dref, &buf, msg_data(msg),
- msg_data_sz(msg), msg_errcode(msg));
- break;
- }
- case TIPC_DIRECT_MSG:{
- tipc_msg_err_event cb = up_ptr->err_cb;
-
- tipc_port_unlock(p_ptr);
- if (!cb || connected)
- break;
- skb_pull(buf, msg_hdr_sz(msg));
- cb(usr_handle, dref, &buf, msg_data(msg),
- msg_data_sz(msg), msg_errcode(msg), &orig);
- break;
- }
- case TIPC_MCAST_MSG:
- case TIPC_NAMED_MSG:{
- tipc_named_msg_err_event cb =
- up_ptr->named_err_cb;
-
- tipc_port_unlock(p_ptr);
- if (!cb || connected)
- break;
- dseq.type = msg_nametype(msg);
- dseq.lower = msg_nameinst(msg);
- dseq.upper = (message_type == TIPC_NAMED_MSG)
- ? dseq.lower : msg_nameupper(msg);
- skb_pull(buf, msg_hdr_sz(msg));
- cb(usr_handle, dref, &buf, msg_data(msg),
- msg_data_sz(msg), msg_errcode(msg), &dseq);
- break;
- }
- }
- if (buf)
- buf_discard(buf);
- buf = next;
- continue;
-reject:
- tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
- buf = next;
- }
-}
-
-/*
- * port_dispatcher(): Dispatcher for messages destinated
- * to the tipc_port interface. Called with port locked.
- */
-
-static u32 port_dispatcher(struct tipc_port *dummy, struct sk_buff *buf)
-{
- buf->next = NULL;
- spin_lock_bh(&queue_lock);
- if (msg_queue_head) {
- msg_queue_tail->next = buf;
- msg_queue_tail = buf;
- } else {
- msg_queue_tail = msg_queue_head = buf;
- tipc_k_signal((Handler)port_dispatcher_sigh, 0);
- }
- spin_unlock_bh(&queue_lock);
- return 0;
-}
-
-/*
- * Wake up port after congestion: Called with port locked,
- *
- */
-
-static void port_wakeup_sh(unsigned long ref)
-{
- struct port *p_ptr;
- struct user_port *up_ptr;
- tipc_continue_event cb = NULL;
- void *uh = NULL;
-
- p_ptr = tipc_port_lock(ref);
- if (p_ptr) {
- up_ptr = p_ptr->user_port;
- if (up_ptr) {
- cb = up_ptr->continue_event_cb;
- uh = up_ptr->usr_handle;
- }
- tipc_port_unlock(p_ptr);
- }
- if (cb)
- cb(uh, ref);
-}
-
-
-static void port_wakeup(struct tipc_port *p_ptr)
-{
- tipc_k_signal((Handler)port_wakeup_sh, p_ptr->ref);
-}
-
void tipc_acknowledge(u32 ref, u32 ack)
{
- struct port *p_ptr;
+ struct tipc_port *p_ptr;
struct sk_buff *buf = NULL;
p_ptr = tipc_port_lock(ref);
if (!p_ptr)
return;
- if (p_ptr->publ.connected) {
- p_ptr->publ.conn_unacked -= ack;
- buf = port_build_proto_msg(port_peerport(p_ptr),
- port_peernode(p_ptr),
- ref,
- tipc_own_addr,
- CONN_MANAGER,
- CONN_ACK,
- TIPC_OK,
- port_out_seqno(p_ptr),
- ack);
+ if (p_ptr->connected) {
+ p_ptr->conn_unacked -= ack;
+ buf = port_build_proto_msg(p_ptr, CONN_ACK, ack);
}
tipc_port_unlock(p_ptr);
tipc_net_route_msg(buf);
}
-/*
- * tipc_createport(): user level call. Will add port to
- * registry if non-zero user_ref.
- */
-
-int tipc_createport(u32 user_ref,
- void *usr_handle,
- unsigned int importance,
- tipc_msg_err_event error_cb,
- tipc_named_msg_err_event named_error_cb,
- tipc_conn_shutdown_event conn_error_cb,
- tipc_msg_event msg_cb,
- tipc_named_msg_event named_msg_cb,
- tipc_conn_msg_event conn_msg_cb,
- tipc_continue_event continue_event_cb,/* May be zero */
- u32 *portref)
-{
- struct user_port *up_ptr;
- struct port *p_ptr;
-
- up_ptr = kmalloc(sizeof(*up_ptr), GFP_ATOMIC);
- if (!up_ptr) {
- warn("Port creation failed, no memory\n");
- return -ENOMEM;
- }
- p_ptr = (struct port *)tipc_createport_raw(NULL, port_dispatcher,
- port_wakeup, importance);
- if (!p_ptr) {
- kfree(up_ptr);
- return -ENOMEM;
- }
-
- p_ptr->user_port = up_ptr;
- up_ptr->user_ref = user_ref;
- up_ptr->usr_handle = usr_handle;
- up_ptr->ref = p_ptr->publ.ref;
- up_ptr->err_cb = error_cb;
- up_ptr->named_err_cb = named_error_cb;
- up_ptr->conn_err_cb = conn_error_cb;
- up_ptr->msg_cb = msg_cb;
- up_ptr->named_msg_cb = named_msg_cb;
- up_ptr->conn_msg_cb = conn_msg_cb;
- up_ptr->continue_event_cb = continue_event_cb;
- INIT_LIST_HEAD(&up_ptr->uport_list);
- tipc_reg_add_port(up_ptr);
- *portref = p_ptr->publ.ref;
- tipc_port_unlock(p_ptr);
- return 0;
-}
-
-int tipc_portimportance(u32 ref, unsigned int *importance)
+int tipc_publish(struct tipc_port *p_ptr, unsigned int scope,
+ struct tipc_name_seq const *seq)
{
- struct port *p_ptr;
-
- p_ptr = tipc_port_lock(ref);
- if (!p_ptr)
- return -EINVAL;
- *importance = (unsigned int)msg_importance(&p_ptr->publ.phdr);
- tipc_port_unlock(p_ptr);
- return 0;
-}
-
-int tipc_set_portimportance(u32 ref, unsigned int imp)
-{
- struct port *p_ptr;
-
- if (imp > TIPC_CRITICAL_IMPORTANCE)
- return -EINVAL;
-
- p_ptr = tipc_port_lock(ref);
- if (!p_ptr)
- return -EINVAL;
- msg_set_importance(&p_ptr->publ.phdr, (u32)imp);
- tipc_port_unlock(p_ptr);
- return 0;
-}
-
-
-int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
-{
- struct port *p_ptr;
struct publication *publ;
u32 key;
- int res = -EINVAL;
- p_ptr = tipc_port_lock(ref);
- if (!p_ptr)
+ if (p_ptr->connected)
return -EINVAL;
+ key = p_ptr->ref + p_ptr->pub_count + 1;
+ if (key == p_ptr->ref)
+ return -EADDRINUSE;
- dbg("tipc_publ %u, p_ptr = %x, conn = %x, scope = %x, "
- "lower = %u, upper = %u\n",
- ref, p_ptr, p_ptr->publ.connected, scope, seq->lower, seq->upper);
- if (p_ptr->publ.connected)
- goto exit;
- if (seq->lower > seq->upper)
- goto exit;
- if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE))
- goto exit;
- key = ref + p_ptr->pub_count + 1;
- if (key == ref) {
- res = -EADDRINUSE;
- goto exit;
- }
publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper,
- scope, p_ptr->publ.ref, key);
+ scope, p_ptr->ref, key);
if (publ) {
list_add(&publ->pport_list, &p_ptr->publications);
p_ptr->pub_count++;
- p_ptr->publ.published = 1;
- res = 0;
+ p_ptr->published = 1;
+ return 0;
}
-exit:
- tipc_port_unlock(p_ptr);
- return res;
+ return -EINVAL;
}
-int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
+int tipc_withdraw(struct tipc_port *p_ptr, unsigned int scope,
+ struct tipc_name_seq const *seq)
{
- struct port *p_ptr;
struct publication *publ;
struct publication *tpubl;
int res = -EINVAL;
- p_ptr = tipc_port_lock(ref);
- if (!p_ptr)
- return -EINVAL;
if (!seq) {
list_for_each_entry_safe(publ, tpubl,
&p_ptr->publications, pport_list) {
@@ -1084,175 +648,161 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
}
}
if (list_empty(&p_ptr->publications))
- p_ptr->publ.published = 0;
- tipc_port_unlock(p_ptr);
+ p_ptr->published = 0;
return res;
}
-int tipc_connect2port(u32 ref, struct tipc_portid const *peer)
+int tipc_port_connect(u32 ref, struct tipc_portid const *peer)
{
- struct port *p_ptr;
- struct tipc_msg *msg;
- int res = -EINVAL;
+ struct tipc_port *p_ptr;
+ int res;
p_ptr = tipc_port_lock(ref);
if (!p_ptr)
return -EINVAL;
- if (p_ptr->publ.published || p_ptr->publ.connected)
+ res = __tipc_port_connect(ref, p_ptr, peer);
+ tipc_port_unlock(p_ptr);
+ return res;
+}
+
+/*
+ * __tipc_port_connect - connect to a remote peer
+ *
+ * Port must be locked.
+ */
+int __tipc_port_connect(u32 ref, struct tipc_port *p_ptr,
+ struct tipc_portid const *peer)
+{
+ struct tipc_msg *msg;
+ int res = -EINVAL;
+
+ if (p_ptr->published || p_ptr->connected)
goto exit;
if (!peer->ref)
goto exit;
- msg = &p_ptr->publ.phdr;
+ msg = &p_ptr->phdr;
msg_set_destnode(msg, peer->node);
msg_set_destport(msg, peer->ref);
- msg_set_orignode(msg, tipc_own_addr);
- msg_set_origport(msg, p_ptr->publ.ref);
- msg_set_transp_seqno(msg, 42);
msg_set_type(msg, TIPC_CONN_MSG);
- if (!may_route(peer->node))
- msg_set_hdr_sz(msg, SHORT_H_SIZE);
- else
- msg_set_hdr_sz(msg, LONG_H_SIZE);
+ msg_set_lookup_scope(msg, 0);
+ msg_set_hdr_sz(msg, SHORT_H_SIZE);
p_ptr->probing_interval = PROBING_INTERVAL;
p_ptr->probing_state = CONFIRMED;
- p_ptr->publ.connected = 1;
+ p_ptr->connected = 1;
k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
- tipc_nodesub_subscribe(&p_ptr->subscription,peer->node,
+ tipc_nodesub_subscribe(&p_ptr->subscription, peer->node,
(void *)(unsigned long)ref,
(net_ev_handler)port_handle_node_down);
res = 0;
exit:
- tipc_port_unlock(p_ptr);
- p_ptr->publ.max_pkt = tipc_link_get_max_pkt(peer->node, ref);
+ p_ptr->max_pkt = tipc_link_get_max_pkt(peer->node, ref);
return res;
}
-/**
- * tipc_disconnect_port - disconnect port from peer
+/*
+ * __tipc_disconnect - disconnect port from peer
*
* Port must be locked.
*/
-
-int tipc_disconnect_port(struct tipc_port *tp_ptr)
+int __tipc_port_disconnect(struct tipc_port *tp_ptr)
{
- int res;
-
if (tp_ptr->connected) {
tp_ptr->connected = 0;
/* let timer expire on it's own to avoid deadlock! */
- tipc_nodesub_unsubscribe(
- &((struct port *)tp_ptr)->subscription);
- res = 0;
- } else {
- res = -ENOTCONN;
+ tipc_nodesub_unsubscribe(&tp_ptr->subscription);
+ return 0;
}
- return res;
+
+ return -ENOTCONN;
}
/*
- * tipc_disconnect(): Disconnect port form peer.
+ * tipc_port_disconnect(): Disconnect port form peer.
* This is a node local operation.
*/
-
-int tipc_disconnect(u32 ref)
+int tipc_port_disconnect(u32 ref)
{
- struct port *p_ptr;
+ struct tipc_port *p_ptr;
int res;
p_ptr = tipc_port_lock(ref);
if (!p_ptr)
return -EINVAL;
- res = tipc_disconnect_port((struct tipc_port *)p_ptr);
+ res = __tipc_port_disconnect(p_ptr);
tipc_port_unlock(p_ptr);
return res;
}
/*
- * tipc_shutdown(): Send a SHUTDOWN msg to peer and disconnect
+ * tipc_port_shutdown(): Send a SHUTDOWN msg to peer and disconnect
*/
-int tipc_shutdown(u32 ref)
+int tipc_port_shutdown(u32 ref)
{
- struct port *p_ptr;
+ struct tipc_port *p_ptr;
struct sk_buff *buf = NULL;
p_ptr = tipc_port_lock(ref);
if (!p_ptr)
return -EINVAL;
- if (p_ptr->publ.connected) {
- u32 imp = msg_importance(&p_ptr->publ.phdr);
- if (imp < TIPC_CRITICAL_IMPORTANCE)
- imp++;
- buf = port_build_proto_msg(port_peerport(p_ptr),
- port_peernode(p_ptr),
- ref,
- tipc_own_addr,
- imp,
- TIPC_CONN_MSG,
- TIPC_CONN_SHUTDOWN,
- port_out_seqno(p_ptr),
- 0);
- }
+ buf = port_build_peer_abort_msg(p_ptr, TIPC_CONN_SHUTDOWN);
tipc_port_unlock(p_ptr);
tipc_net_route_msg(buf);
- return tipc_disconnect(ref);
+ return tipc_port_disconnect(ref);
}
/*
- * tipc_port_recv_sections(): Concatenate and deliver sectioned
- * message for this node.
+ * tipc_port_iovec_rcv: Concatenate and deliver sectioned
+ * message for this node.
*/
-
-static int tipc_port_recv_sections(struct port *sender, unsigned int num_sect,
- struct iovec const *msg_sect)
+static int tipc_port_iovec_rcv(struct tipc_port *sender,
+ struct iovec const *msg_sect,
+ unsigned int len)
{
struct sk_buff *buf;
int res;
- res = tipc_msg_build(&sender->publ.phdr, msg_sect, num_sect,
- MAX_MSG_SIZE, !sender->user_port, &buf);
+ res = tipc_msg_build(&sender->phdr, msg_sect, len, MAX_MSG_SIZE, &buf);
if (likely(buf))
- tipc_port_recv_msg(buf);
+ tipc_sk_rcv(buf);
return res;
}
/**
* tipc_send - send message sections on connection
*/
-
-int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect)
+int tipc_send(struct tipc_port *p_ptr,
+ struct iovec const *msg_sect,
+ unsigned int len)
{
- struct port *p_ptr;
u32 destnode;
int res;
- p_ptr = tipc_port_deref(ref);
- if (!p_ptr || !p_ptr->publ.connected)
+ if (!p_ptr->connected)
return -EINVAL;
- p_ptr->publ.congested = 1;
+ p_ptr->congested = 1;
if (!tipc_port_congested(p_ptr)) {
- destnode = port_peernode(p_ptr);
- if (likely(destnode != tipc_own_addr))
- res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
- destnode);
+ destnode = tipc_port_peernode(p_ptr);
+ if (likely(!in_own_node(destnode)))
+ res = tipc_link_iovec_xmit_fast(p_ptr, msg_sect, len,
+ destnode);
else
- res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
+ res = tipc_port_iovec_rcv(p_ptr, msg_sect, len);
if (likely(res != -ELINKCONG)) {
- port_incr_out_seqno(p_ptr);
- p_ptr->publ.congested = 0;
- p_ptr->sent++;
+ p_ptr->congested = 0;
+ if (res > 0)
+ p_ptr->sent++;
return res;
}
}
- if (port_unreliable(p_ptr)) {
- p_ptr->publ.congested = 0;
- /* Just calculate msg length and return */
- return tipc_msg_calc_data_size(msg_sect, num_sect);
+ if (tipc_port_unreliable(p_ptr)) {
+ p_ptr->congested = 0;
+ return len;
}
return -ELINKCONG;
}
@@ -1260,25 +810,23 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect)
/**
* tipc_send2name - send message sections to port name
*/
-
-int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
- unsigned int num_sect, struct iovec const *msg_sect)
+int tipc_send2name(struct tipc_port *p_ptr,
+ struct tipc_name const *name,
+ unsigned int domain,
+ struct iovec const *msg_sect,
+ unsigned int len)
{
- struct port *p_ptr;
struct tipc_msg *msg;
u32 destnode = domain;
u32 destport;
int res;
- p_ptr = tipc_port_deref(ref);
- if (!p_ptr || p_ptr->publ.connected)
+ if (p_ptr->connected)
return -EINVAL;
- msg = &p_ptr->publ.phdr;
+ msg = &p_ptr->phdr;
msg_set_type(msg, TIPC_NAMED_MSG);
- msg_set_orignode(msg, tipc_own_addr);
- msg_set_origport(msg, ref);
- msg_set_hdr_sz(msg, LONG_H_SIZE);
+ msg_set_hdr_sz(msg, NAMED_H_SIZE);
msg_set_nametype(msg, name->type);
msg_set_nameinst(msg, name->instance);
msg_set_lookup_scope(msg, tipc_addr_scope(domain));
@@ -1286,96 +834,65 @@ int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
msg_set_destnode(msg, destnode);
msg_set_destport(msg, destport);
- if (likely(destport)) {
- p_ptr->sent++;
- if (likely(destnode == tipc_own_addr))
- return tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
- res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
- destnode);
- if (likely(res != -ELINKCONG))
+ if (likely(destport || destnode)) {
+ if (likely(in_own_node(destnode)))
+ res = tipc_port_iovec_rcv(p_ptr, msg_sect, len);
+ else if (tipc_own_addr)
+ res = tipc_link_iovec_xmit_fast(p_ptr, msg_sect, len,
+ destnode);
+ else
+ res = tipc_port_iovec_reject(p_ptr, msg, msg_sect,
+ len, TIPC_ERR_NO_NODE);
+ if (likely(res != -ELINKCONG)) {
+ if (res > 0)
+ p_ptr->sent++;
return res;
- if (port_unreliable(p_ptr)) {
- /* Just calculate msg length and return */
- return tipc_msg_calc_data_size(msg_sect, num_sect);
}
+ if (tipc_port_unreliable(p_ptr))
+ return len;
+
return -ELINKCONG;
}
- return tipc_port_reject_sections(p_ptr, msg, msg_sect, num_sect,
- TIPC_ERR_NO_NAME);
+ return tipc_port_iovec_reject(p_ptr, msg, msg_sect, len,
+ TIPC_ERR_NO_NAME);
}
/**
* tipc_send2port - send message sections to port identity
*/
-
-int tipc_send2port(u32 ref, struct tipc_portid const *dest,
- unsigned int num_sect, struct iovec const *msg_sect)
+int tipc_send2port(struct tipc_port *p_ptr,
+ struct tipc_portid const *dest,
+ struct iovec const *msg_sect,
+ unsigned int len)
{
- struct port *p_ptr;
struct tipc_msg *msg;
int res;
- p_ptr = tipc_port_deref(ref);
- if (!p_ptr || p_ptr->publ.connected)
+ if (p_ptr->connected)
return -EINVAL;
- msg = &p_ptr->publ.phdr;
+ msg = &p_ptr->phdr;
msg_set_type(msg, TIPC_DIRECT_MSG);
- msg_set_orignode(msg, tipc_own_addr);
- msg_set_origport(msg, ref);
+ msg_set_lookup_scope(msg, 0);
msg_set_destnode(msg, dest->node);
msg_set_destport(msg, dest->ref);
- msg_set_hdr_sz(msg, DIR_MSG_H_SIZE);
- p_ptr->sent++;
- if (dest->node == tipc_own_addr)
- return tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
- res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect, dest->node);
- if (likely(res != -ELINKCONG))
+ msg_set_hdr_sz(msg, BASIC_H_SIZE);
+
+ if (in_own_node(dest->node))
+ res = tipc_port_iovec_rcv(p_ptr, msg_sect, len);
+ else if (tipc_own_addr)
+ res = tipc_link_iovec_xmit_fast(p_ptr, msg_sect, len,
+ dest->node);
+ else
+ res = tipc_port_iovec_reject(p_ptr, msg, msg_sect, len,
+ TIPC_ERR_NO_NODE);
+ if (likely(res != -ELINKCONG)) {
+ if (res > 0)
+ p_ptr->sent++;
return res;
- if (port_unreliable(p_ptr)) {
- /* Just calculate msg length and return */
- return tipc_msg_calc_data_size(msg_sect, num_sect);
}
- return -ELINKCONG;
-}
-
-/**
- * tipc_send_buf2port - send message buffer to port identity
- */
-
-int tipc_send_buf2port(u32 ref, struct tipc_portid const *dest,
- struct sk_buff *buf, unsigned int dsz)
-{
- struct port *p_ptr;
- struct tipc_msg *msg;
- int res;
+ if (tipc_port_unreliable(p_ptr))
+ return len;
- p_ptr = (struct port *)tipc_ref_deref(ref);
- if (!p_ptr || p_ptr->publ.connected)
- return -EINVAL;
-
- msg = &p_ptr->publ.phdr;
- msg_set_type(msg, TIPC_DIRECT_MSG);
- msg_set_orignode(msg, tipc_own_addr);
- msg_set_origport(msg, ref);
- msg_set_destnode(msg, dest->node);
- msg_set_destport(msg, dest->ref);
- msg_set_hdr_sz(msg, DIR_MSG_H_SIZE);
- msg_set_size(msg, DIR_MSG_H_SIZE + dsz);
- if (skb_cow(buf, DIR_MSG_H_SIZE))
- return -ENOMEM;
-
- skb_push(buf, DIR_MSG_H_SIZE);
- skb_copy_to_linear_data(buf, msg, DIR_MSG_H_SIZE);
- msg_dbg(msg, "buf2port: ");
- p_ptr->sent++;
- if (dest->node == tipc_own_addr)
- return tipc_port_recv_msg(buf);
- res = tipc_send_buf_fast(buf, dest->node);
- if (likely(res != -ELINKCONG))
- return res;
- if (port_unreliable(p_ptr))
- return dsz;
return -ELINKCONG;
}
-
diff --git a/net/tipc/port.h b/net/tipc/port.h
index 3a807fcec2b..cf4ca5b1d9a 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -1,8 +1,8 @@
/*
* net/tipc/port.h: Include file for TIPC port code
*
- * Copyright (c) 1994-2007, Ericsson AB
- * Copyright (c) 2004-2007, Wind River Systems
+ * Copyright (c) 1994-2007, 2014, Ericsson AB
+ * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -42,65 +42,13 @@
#include "msg.h"
#include "node_subscr.h"
-#define TIPC_FLOW_CONTROL_WIN 512
-
-typedef void (*tipc_msg_err_event) (void *usr_handle, u32 portref,
- struct sk_buff **buf, unsigned char const *data,
- unsigned int size, int reason,
- struct tipc_portid const *attmpt_destid);
-
-typedef void (*tipc_named_msg_err_event) (void *usr_handle, u32 portref,
- struct sk_buff **buf, unsigned char const *data,
- unsigned int size, int reason,
- struct tipc_name_seq const *attmpt_dest);
-
-typedef void (*tipc_conn_shutdown_event) (void *usr_handle, u32 portref,
- struct sk_buff **buf, unsigned char const *data,
- unsigned int size, int reason);
-
-typedef void (*tipc_msg_event) (void *usr_handle, u32 portref,
- struct sk_buff **buf, unsigned char const *data,
- unsigned int size, unsigned int importance,
- struct tipc_portid const *origin);
-
-typedef void (*tipc_named_msg_event) (void *usr_handle, u32 portref,
- struct sk_buff **buf, unsigned char const *data,
- unsigned int size, unsigned int importance,
- struct tipc_portid const *orig,
- struct tipc_name_seq const *dest);
-
-typedef void (*tipc_conn_msg_event) (void *usr_handle, u32 portref,
- struct sk_buff **buf, unsigned char const *data,
- unsigned int size);
-
-typedef void (*tipc_continue_event) (void *usr_handle, u32 portref);
+#define TIPC_CONNACK_INTV 256
+#define TIPC_FLOWCTRL_WIN (TIPC_CONNACK_INTV * 2)
+#define TIPC_CONN_OVERLOAD_LIMIT ((TIPC_FLOWCTRL_WIN * 2 + 1) * \
+ SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE))
/**
- * struct user_port - TIPC user port (used with native API)
- * @user_ref: id of user who created user port
- * @usr_handle: user-specified field
- * @ref: object reference to associated TIPC port
- * <various callback routines>
- * @uport_list: adjacent user ports in list of ports held by user
- */
-
-struct user_port {
- u32 user_ref;
- void *usr_handle;
- u32 ref;
- tipc_msg_err_event err_cb;
- tipc_named_msg_err_event named_err_cb;
- tipc_conn_shutdown_event conn_err_cb;
- tipc_msg_event msg_cb;
- tipc_named_msg_event named_msg_cb;
- tipc_conn_msg_event conn_msg_cb;
- tipc_continue_event continue_event_cb;
- struct list_head uport_list;
-};
-
-/**
- * struct tipc_port - TIPC port info available to socket API
- * @usr_handle: pointer to additional user-defined information about port
+ * struct tipc_port - TIPC port structure
* @lock: pointer to spinlock for controlling access to port
* @connected: non-zero if port is currently connected to a peer port
* @conn_type: TIPC type used when connection was established
@@ -111,9 +59,19 @@ struct user_port {
* @max_pkt: maximum packet size "hint" used when building messages sent by port
* @ref: unique reference to port in TIPC object registry
* @phdr: preformatted message header used when sending messages
+ * @port_list: adjacent ports in TIPC's global list of ports
+ * @wait_list: adjacent ports in list of ports waiting on link congestion
+ * @waiting_pkts:
+ * @sent: # of non-empty messages sent by port
+ * @acked: # of non-empty message acknowledgements from connected port's peer
+ * @publications: list of publications for port
+ * @pub_count: total # of publications port has made during its lifetime
+ * @probing_state:
+ * @probing_interval:
+ * @timer_ref:
+ * @subscription: "node down" subscription used to terminate failed connections
*/
struct tipc_port {
- void *usr_handle;
spinlock_t *lock;
int connected;
u32 conn_type;
@@ -124,34 +82,7 @@ struct tipc_port {
u32 max_pkt;
u32 ref;
struct tipc_msg phdr;
-};
-
-/**
- * struct port - TIPC port structure
- * @publ: TIPC port info available to privileged users
- * @port_list: adjacent ports in TIPC's global list of ports
- * @dispatcher: ptr to routine which handles received messages
- * @wakeup: ptr to routine to call when port is no longer congested
- * @user_port: ptr to user port associated with port (if any)
- * @wait_list: adjacent ports in list of ports waiting on link congestion
- * @waiting_pkts:
- * @sent:
- * @acked:
- * @publications: list of publications for port
- * @pub_count: total # of publications port has made during its lifetime
- * @probing_state:
- * @probing_interval:
- * @last_in_seqno:
- * @timer_ref:
- * @subscription: "node down" subscription used to terminate failed connections
- */
-
-struct port {
- struct tipc_port publ;
struct list_head port_list;
- u32 (*dispatcher)(struct tipc_port *, struct sk_buff *);
- void (*wakeup)(struct tipc_port *);
- struct user_port *user_port;
struct list_head wait_list;
u32 waiting_pkts;
u32 sent;
@@ -160,95 +91,88 @@ struct port {
u32 pub_count;
u32 probing_state;
u32 probing_interval;
- u32 last_in_seqno;
struct timer_list timer;
struct tipc_node_subscr subscription;
};
extern spinlock_t tipc_port_list_lock;
-struct port_list;
+struct tipc_port_list;
/*
* TIPC port manipulation routines
*/
-struct tipc_port *tipc_createport_raw(void *usr_handle,
- u32 (*dispatcher)(struct tipc_port *, struct sk_buff *),
- void (*wakeup)(struct tipc_port *), const u32 importance);
+u32 tipc_port_init(struct tipc_port *p_ptr,
+ const unsigned int importance);
int tipc_reject_msg(struct sk_buff *buf, u32 err);
-int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode);
-
void tipc_acknowledge(u32 port_ref, u32 ack);
-int tipc_createport(unsigned int tipc_user, void *usr_handle,
- unsigned int importance, tipc_msg_err_event error_cb,
- tipc_named_msg_err_event named_error_cb,
- tipc_conn_shutdown_event conn_error_cb, tipc_msg_event msg_cb,
- tipc_named_msg_event named_msg_cb,
- tipc_conn_msg_event conn_msg_cb,
- tipc_continue_event continue_event_cb, u32 *portref);
-
-int tipc_deleteport(u32 portref);
-
-int tipc_portimportance(u32 portref, unsigned int *importance);
-int tipc_set_portimportance(u32 portref, unsigned int importance);
+void tipc_port_destroy(struct tipc_port *p_ptr);
-int tipc_portunreliable(u32 portref, unsigned int *isunreliable);
-int tipc_set_portunreliable(u32 portref, unsigned int isunreliable);
+int tipc_publish(struct tipc_port *p_ptr, unsigned int scope,
+ struct tipc_name_seq const *name_seq);
-int tipc_portunreturnable(u32 portref, unsigned int *isunreturnable);
-int tipc_set_portunreturnable(u32 portref, unsigned int isunreturnable);
+int tipc_withdraw(struct tipc_port *p_ptr, unsigned int scope,
+ struct tipc_name_seq const *name_seq);
-int tipc_publish(u32 portref, unsigned int scope,
- struct tipc_name_seq const *name_seq);
-int tipc_withdraw(u32 portref, unsigned int scope,
- struct tipc_name_seq const *name_seq);
+int tipc_port_connect(u32 portref, struct tipc_portid const *port);
-int tipc_connect2port(u32 portref, struct tipc_portid const *port);
+int tipc_port_disconnect(u32 portref);
-int tipc_disconnect(u32 portref);
-
-int tipc_shutdown(u32 ref);
+int tipc_port_shutdown(u32 ref);
+void tipc_port_wakeup(struct tipc_port *port);
/*
* The following routines require that the port be locked on entry
*/
-int tipc_disconnect_port(struct tipc_port *tp_ptr);
+int __tipc_port_disconnect(struct tipc_port *tp_ptr);
+int __tipc_port_connect(u32 ref, struct tipc_port *p_ptr,
+ struct tipc_portid const *peer);
+int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg);
/*
* TIPC messaging routines
*/
-int tipc_send(u32 portref, unsigned int num_sect, struct iovec const *msg_sect);
-int tipc_send2name(u32 portref, struct tipc_name const *name, u32 domain,
- unsigned int num_sect, struct iovec const *msg_sect);
+int tipc_send(struct tipc_port *port,
+ struct iovec const *msg_sect,
+ unsigned int len);
+
+int tipc_send2name(struct tipc_port *port,
+ struct tipc_name const *name,
+ u32 domain,
+ struct iovec const *msg_sect,
+ unsigned int len);
-int tipc_send2port(u32 portref, struct tipc_portid const *dest,
- unsigned int num_sect, struct iovec const *msg_sect);
+int tipc_send2port(struct tipc_port *port,
+ struct tipc_portid const *dest,
+ struct iovec const *msg_sect,
+ unsigned int len);
-int tipc_send_buf2port(u32 portref, struct tipc_portid const *dest,
- struct sk_buff *buf, unsigned int dsz);
+int tipc_port_mcast_xmit(struct tipc_port *port,
+ struct tipc_name_seq const *seq,
+ struct iovec const *msg,
+ unsigned int len);
-int tipc_multicast(u32 portref, struct tipc_name_seq const *seq,
- unsigned int section_count, struct iovec const *msg);
+int tipc_port_iovec_reject(struct tipc_port *p_ptr,
+ struct tipc_msg *hdr,
+ struct iovec const *msg_sect,
+ unsigned int len,
+ int err);
-int tipc_port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
- struct iovec const *msg_sect, u32 num_sect,
- int err);
struct sk_buff *tipc_port_get_ports(void);
-void tipc_port_recv_proto_msg(struct sk_buff *buf);
-void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp);
+void tipc_port_proto_rcv(struct sk_buff *buf);
+void tipc_port_mcast_rcv(struct sk_buff *buf, struct tipc_port_list *dp);
void tipc_port_reinit(void);
/**
* tipc_port_lock - lock port instance referred to and return its pointer
*/
-
-static inline struct port *tipc_port_lock(u32 ref)
+static inline struct tipc_port *tipc_port_lock(u32 ref)
{
- return (struct port *)tipc_ref_lock(ref);
+ return (struct tipc_port *)tipc_ref_lock(ref);
}
/**
@@ -256,72 +180,58 @@ static inline struct port *tipc_port_lock(u32 ref)
*
* Can use pointer instead of tipc_ref_unlock() since port is already locked.
*/
+static inline void tipc_port_unlock(struct tipc_port *p_ptr)
+{
+ spin_unlock_bh(p_ptr->lock);
+}
-static inline void tipc_port_unlock(struct port *p_ptr)
+static inline int tipc_port_congested(struct tipc_port *p_ptr)
{
- spin_unlock_bh(p_ptr->publ.lock);
+ return ((p_ptr->sent - p_ptr->acked) >= TIPC_FLOWCTRL_WIN);
}
-static inline struct port* tipc_port_deref(u32 ref)
+
+static inline u32 tipc_port_peernode(struct tipc_port *p_ptr)
{
- return (struct port *)tipc_ref_deref(ref);
+ return msg_destnode(&p_ptr->phdr);
}
-static inline u32 tipc_peer_port(struct port *p_ptr)
+static inline u32 tipc_port_peerport(struct tipc_port *p_ptr)
{
- return msg_destport(&p_ptr->publ.phdr);
+ return msg_destport(&p_ptr->phdr);
}
-static inline u32 tipc_peer_node(struct port *p_ptr)
+static inline bool tipc_port_unreliable(struct tipc_port *port)
{
- return msg_destnode(&p_ptr->publ.phdr);
+ return msg_src_droppable(&port->phdr) != 0;
}
-static inline int tipc_port_congested(struct port *p_ptr)
+static inline void tipc_port_set_unreliable(struct tipc_port *port,
+ bool unreliable)
{
- return (p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2);
+ msg_set_src_droppable(&port->phdr, unreliable ? 1 : 0);
}
-/**
- * tipc_port_recv_msg - receive message from lower layer and deliver to port user
- */
+static inline bool tipc_port_unreturnable(struct tipc_port *port)
+{
+ return msg_dest_droppable(&port->phdr) != 0;
+}
+
+static inline void tipc_port_set_unreturnable(struct tipc_port *port,
+ bool unreturnable)
+{
+ msg_set_dest_droppable(&port->phdr, unreturnable ? 1 : 0);
+}
+
+
+static inline int tipc_port_importance(struct tipc_port *port)
+{
+ return msg_importance(&port->phdr);
+}
-static inline int tipc_port_recv_msg(struct sk_buff *buf)
+static inline void tipc_port_set_importance(struct tipc_port *port, int imp)
{
- struct port *p_ptr;
- struct tipc_msg *msg = buf_msg(buf);
- u32 destport = msg_destport(msg);
- u32 dsz = msg_data_sz(msg);
- u32 err;
-
- /* forward unresolved named message */
- if (unlikely(!destport)) {
- tipc_net_route_msg(buf);
- return dsz;
- }
-
- /* validate destination & pass to port, otherwise reject message */
- p_ptr = tipc_port_lock(destport);
- if (likely(p_ptr)) {
- if (likely(p_ptr->publ.connected)) {
- if ((unlikely(msg_origport(msg) != tipc_peer_port(p_ptr))) ||
- (unlikely(msg_orignode(msg) != tipc_peer_node(p_ptr))) ||
- (unlikely(!msg_connected(msg)))) {
- err = TIPC_ERR_NO_PORT;
- tipc_port_unlock(p_ptr);
- goto reject;
- }
- }
- err = p_ptr->dispatcher(&p_ptr->publ, buf);
- tipc_port_unlock(p_ptr);
- if (likely(!err))
- return dsz;
- } else {
- err = TIPC_ERR_NO_PORT;
- }
-reject:
- dbg("port->rejecting, err = %x..\n",err);
- return tipc_reject_msg(buf, err);
+ msg_set_importance(&port->phdr, (u32)imp);
}
#endif
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
index ab8ad32d8c2..3d4ecd754ee 100644
--- a/net/tipc/ref.c
+++ b/net/tipc/ref.c
@@ -43,7 +43,6 @@
* @lock: spinlock controlling access to object
* @ref: reference value for object (combines instance & array index info)
*/
-
struct reference {
void *object;
spinlock_t lock;
@@ -60,7 +59,6 @@ struct reference {
* @index_mask: bitmask for array index portion of reference values
* @start_mask: initial value for instance value portion of reference values
*/
-
struct ref_table {
struct reference *entries;
u32 capacity;
@@ -89,14 +87,13 @@ struct ref_table {
* have a reference value of 0 (although this is unlikely).
*/
-static struct ref_table tipc_ref_table = { NULL };
+static struct ref_table tipc_ref_table;
-static DEFINE_RWLOCK(ref_table_lock);
+static DEFINE_SPINLOCK(ref_table_lock);
/**
* tipc_ref_table_init - create reference table for objects
*/
-
int tipc_ref_table_init(u32 requested_size, u32 start)
{
struct reference *table;
@@ -109,9 +106,7 @@ int tipc_ref_table_init(u32 requested_size, u32 start)
/* do nothing */ ;
/* allocate table & mark all entries as uninitialized */
-
- table = __vmalloc(actual_size * sizeof(struct reference),
- GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
+ table = vzalloc(actual_size * sizeof(struct reference));
if (table == NULL)
return -ENOMEM;
@@ -129,12 +124,8 @@ int tipc_ref_table_init(u32 requested_size, u32 start)
/**
* tipc_ref_table_stop - destroy reference table for objects
*/
-
void tipc_ref_table_stop(void)
{
- if (!tipc_ref_table.entries)
- return;
-
vfree(tipc_ref_table.entries);
tipc_ref_table.entries = NULL;
}
@@ -150,7 +141,6 @@ void tipc_ref_table_stop(void)
* register a partially initialized object, without running the risk that
* the object will be accessed before initialization is complete.
*/
-
u32 tipc_ref_acquire(void *object, spinlock_t **lock)
{
u32 index;
@@ -160,17 +150,16 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)
struct reference *entry = NULL;
if (!object) {
- err("Attempt to acquire reference to non-existent object\n");
+ pr_err("Attempt to acquire ref. to non-existent obj\n");
return 0;
}
if (!tipc_ref_table.entries) {
- err("Reference table not found during acquisition attempt\n");
+ pr_err("Ref. table not found in acquisition attempt\n");
return 0;
}
/* take a free entry, if available; otherwise initialize a new entry */
-
- write_lock_bh(&ref_table_lock);
+ spin_lock_bh(&ref_table_lock);
if (tipc_ref_table.first_free) {
index = tipc_ref_table.first_free;
entry = &(tipc_ref_table.entries[index]);
@@ -178,17 +167,15 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)
next_plus_upper = entry->ref;
tipc_ref_table.first_free = next_plus_upper & index_mask;
ref = (next_plus_upper & ~index_mask) + index;
- }
- else if (tipc_ref_table.init_point < tipc_ref_table.capacity) {
+ } else if (tipc_ref_table.init_point < tipc_ref_table.capacity) {
index = tipc_ref_table.init_point++;
entry = &(tipc_ref_table.entries[index]);
spin_lock_init(&entry->lock);
ref = tipc_ref_table.start_mask + index;
- }
- else {
+ } else {
ref = 0;
}
- write_unlock_bh(&ref_table_lock);
+ spin_unlock_bh(&ref_table_lock);
/*
* Grab the lock so no one else can modify this entry
@@ -214,7 +201,6 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)
* Disallow future references to an object and free up the entry for re-use.
* Note: The entry's spin_lock may still be busy after discard
*/
-
void tipc_ref_discard(u32 ref)
{
struct reference *entry;
@@ -222,7 +208,7 @@ void tipc_ref_discard(u32 ref)
u32 index_mask;
if (!tipc_ref_table.entries) {
- err("Reference table not found during discard attempt\n");
+ pr_err("Ref. table not found during discard attempt\n");
return;
}
@@ -230,14 +216,14 @@ void tipc_ref_discard(u32 ref)
index = ref & index_mask;
entry = &(tipc_ref_table.entries[index]);
- write_lock_bh(&ref_table_lock);
+ spin_lock_bh(&ref_table_lock);
if (!entry->object) {
- err("Attempt to discard reference to non-existent object\n");
+ pr_err("Attempt to discard ref. to non-existent obj\n");
goto exit;
}
if (entry->ref != ref) {
- err("Attempt to discard non-existent reference\n");
+ pr_err("Attempt to discard non-existent reference\n");
goto exit;
}
@@ -245,12 +231,10 @@ void tipc_ref_discard(u32 ref)
* mark entry as unused; increment instance part of entry's reference
* to invalidate any subsequent references
*/
-
entry->object = NULL;
entry->ref = (ref & ~index_mask) + (index_mask + 1);
/* append entry to free entry list */
-
if (tipc_ref_table.first_free == 0)
tipc_ref_table.first_free = index;
else
@@ -258,13 +242,12 @@ void tipc_ref_discard(u32 ref)
tipc_ref_table.last_free = index;
exit:
- write_unlock_bh(&ref_table_lock);
+ spin_unlock_bh(&ref_table_lock);
}
/**
* tipc_ref_lock - lock referenced object and return pointer to it
*/
-
void *tipc_ref_lock(u32 ref)
{
if (likely(tipc_ref_table.entries)) {
@@ -281,22 +264,3 @@ void *tipc_ref_lock(u32 ref)
}
return NULL;
}
-
-
-/**
- * tipc_ref_deref - return pointer referenced object (without locking it)
- */
-
-void *tipc_ref_deref(u32 ref)
-{
- if (likely(tipc_ref_table.entries)) {
- struct reference *entry;
-
- entry = &tipc_ref_table.entries[ref &
- tipc_ref_table.index_mask];
- if (likely(entry->ref == ref))
- return entry->object;
- }
- return NULL;
-}
-
diff --git a/net/tipc/ref.h b/net/tipc/ref.h
index 5bc8e7ab84d..d01aa1df63b 100644
--- a/net/tipc/ref.h
+++ b/net/tipc/ref.h
@@ -44,6 +44,5 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock);
void tipc_ref_discard(u32 ref);
void *tipc_ref_lock(u32 ref);
-void *tipc_ref_deref(u32 ref);
#endif
diff --git a/net/tipc/server.c b/net/tipc/server.c
new file mode 100644
index 00000000000..a538a02f869
--- /dev/null
+++ b/net/tipc/server.c
@@ -0,0 +1,600 @@
+/*
+ * net/tipc/server.c: TIPC server infrastructure
+ *
+ * Copyright (c) 2012-2013, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "server.h"
+#include "core.h"
+#include <net/sock.h>
+
+/* Number of messages to send before rescheduling */
+#define MAX_SEND_MSG_COUNT 25
+#define MAX_RECV_MSG_COUNT 25
+#define CF_CONNECTED 1
+
+#define sock2con(x) ((struct tipc_conn *)(x)->sk_user_data)
+
+/**
+ * struct tipc_conn - TIPC connection structure
+ * @kref: reference counter to connection object
+ * @conid: connection identifier
+ * @sock: socket handler associated with connection
+ * @flags: indicates connection state
+ * @server: pointer to connected server
+ * @rwork: receive work item
+ * @usr_data: user-specified field
+ * @rx_action: what to do when connection socket is active
+ * @outqueue: pointer to first outbound message in queue
+ * @outqueue_lock: control access to the outqueue
+ * @outqueue: list of connection objects for its server
+ * @swork: send work item
+ */
+struct tipc_conn {
+ struct kref kref;
+ int conid;
+ struct socket *sock;
+ unsigned long flags;
+ struct tipc_server *server;
+ struct work_struct rwork;
+ int (*rx_action) (struct tipc_conn *con);
+ void *usr_data;
+ struct list_head outqueue;
+ spinlock_t outqueue_lock;
+ struct work_struct swork;
+};
+
+/* An entry waiting to be sent */
+struct outqueue_entry {
+ struct list_head list;
+ struct kvec iov;
+ struct sockaddr_tipc dest;
+};
+
+static void tipc_recv_work(struct work_struct *work);
+static void tipc_send_work(struct work_struct *work);
+static void tipc_clean_outqueues(struct tipc_conn *con);
+
+static void tipc_conn_kref_release(struct kref *kref)
+{
+ struct tipc_conn *con = container_of(kref, struct tipc_conn, kref);
+
+ if (con->sock) {
+ tipc_sock_release_local(con->sock);
+ con->sock = NULL;
+ }
+
+ tipc_clean_outqueues(con);
+ kfree(con);
+}
+
+static void conn_put(struct tipc_conn *con)
+{
+ kref_put(&con->kref, tipc_conn_kref_release);
+}
+
+static void conn_get(struct tipc_conn *con)
+{
+ kref_get(&con->kref);
+}
+
+static struct tipc_conn *tipc_conn_lookup(struct tipc_server *s, int conid)
+{
+ struct tipc_conn *con;
+
+ spin_lock_bh(&s->idr_lock);
+ con = idr_find(&s->conn_idr, conid);
+ if (con)
+ conn_get(con);
+ spin_unlock_bh(&s->idr_lock);
+ return con;
+}
+
+static void sock_data_ready(struct sock *sk)
+{
+ struct tipc_conn *con;
+
+ read_lock(&sk->sk_callback_lock);
+ con = sock2con(sk);
+ if (con && test_bit(CF_CONNECTED, &con->flags)) {
+ conn_get(con);
+ if (!queue_work(con->server->rcv_wq, &con->rwork))
+ conn_put(con);
+ }
+ read_unlock(&sk->sk_callback_lock);
+}
+
+static void sock_write_space(struct sock *sk)
+{
+ struct tipc_conn *con;
+
+ read_lock(&sk->sk_callback_lock);
+ con = sock2con(sk);
+ if (con && test_bit(CF_CONNECTED, &con->flags)) {
+ conn_get(con);
+ if (!queue_work(con->server->send_wq, &con->swork))
+ conn_put(con);
+ }
+ read_unlock(&sk->sk_callback_lock);
+}
+
+static void tipc_register_callbacks(struct socket *sock, struct tipc_conn *con)
+{
+ struct sock *sk = sock->sk;
+
+ write_lock_bh(&sk->sk_callback_lock);
+
+ sk->sk_data_ready = sock_data_ready;
+ sk->sk_write_space = sock_write_space;
+ sk->sk_user_data = con;
+
+ con->sock = sock;
+
+ write_unlock_bh(&sk->sk_callback_lock);
+}
+
+static void tipc_unregister_callbacks(struct tipc_conn *con)
+{
+ struct sock *sk = con->sock->sk;
+
+ write_lock_bh(&sk->sk_callback_lock);
+ sk->sk_user_data = NULL;
+ write_unlock_bh(&sk->sk_callback_lock);
+}
+
+static void tipc_close_conn(struct tipc_conn *con)
+{
+ struct tipc_server *s = con->server;
+
+ if (test_and_clear_bit(CF_CONNECTED, &con->flags)) {
+ if (con->conid)
+ s->tipc_conn_shutdown(con->conid, con->usr_data);
+
+ spin_lock_bh(&s->idr_lock);
+ idr_remove(&s->conn_idr, con->conid);
+ s->idr_in_use--;
+ spin_unlock_bh(&s->idr_lock);
+
+ tipc_unregister_callbacks(con);
+
+ /* We shouldn't flush pending works as we may be in the
+ * thread. In fact the races with pending rx/tx work structs
+ * are harmless for us here as we have already deleted this
+ * connection from server connection list and set
+ * sk->sk_user_data to 0 before releasing connection object.
+ */
+ kernel_sock_shutdown(con->sock, SHUT_RDWR);
+
+ conn_put(con);
+ }
+}
+
+static struct tipc_conn *tipc_alloc_conn(struct tipc_server *s)
+{
+ struct tipc_conn *con;
+ int ret;
+
+ con = kzalloc(sizeof(struct tipc_conn), GFP_ATOMIC);
+ if (!con)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&con->kref);
+ INIT_LIST_HEAD(&con->outqueue);
+ spin_lock_init(&con->outqueue_lock);
+ INIT_WORK(&con->swork, tipc_send_work);
+ INIT_WORK(&con->rwork, tipc_recv_work);
+
+ spin_lock_bh(&s->idr_lock);
+ ret = idr_alloc(&s->conn_idr, con, 0, 0, GFP_ATOMIC);
+ if (ret < 0) {
+ kfree(con);
+ spin_unlock_bh(&s->idr_lock);
+ return ERR_PTR(-ENOMEM);
+ }
+ con->conid = ret;
+ s->idr_in_use++;
+ spin_unlock_bh(&s->idr_lock);
+
+ set_bit(CF_CONNECTED, &con->flags);
+ con->server = s;
+
+ return con;
+}
+
+static int tipc_receive_from_sock(struct tipc_conn *con)
+{
+ struct msghdr msg = {};
+ struct tipc_server *s = con->server;
+ struct sockaddr_tipc addr;
+ struct kvec iov;
+ void *buf;
+ int ret;
+
+ buf = kmem_cache_alloc(s->rcvbuf_cache, GFP_ATOMIC);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto out_close;
+ }
+
+ iov.iov_base = buf;
+ iov.iov_len = s->max_rcvbuf_size;
+ msg.msg_name = &addr;
+ ret = kernel_recvmsg(con->sock, &msg, &iov, 1, iov.iov_len,
+ MSG_DONTWAIT);
+ if (ret <= 0) {
+ kmem_cache_free(s->rcvbuf_cache, buf);
+ goto out_close;
+ }
+
+ s->tipc_conn_recvmsg(con->conid, &addr, con->usr_data, buf, ret);
+
+ kmem_cache_free(s->rcvbuf_cache, buf);
+
+ return 0;
+
+out_close:
+ if (ret != -EWOULDBLOCK)
+ tipc_close_conn(con);
+ else if (ret == 0)
+ /* Don't return success if we really got EOF */
+ ret = -EAGAIN;
+
+ return ret;
+}
+
+static int tipc_accept_from_sock(struct tipc_conn *con)
+{
+ struct tipc_server *s = con->server;
+ struct socket *sock = con->sock;
+ struct socket *newsock;
+ struct tipc_conn *newcon;
+ int ret;
+
+ ret = tipc_sock_accept_local(sock, &newsock, O_NONBLOCK);
+ if (ret < 0)
+ return ret;
+
+ newcon = tipc_alloc_conn(con->server);
+ if (IS_ERR(newcon)) {
+ ret = PTR_ERR(newcon);
+ sock_release(newsock);
+ return ret;
+ }
+
+ newcon->rx_action = tipc_receive_from_sock;
+ tipc_register_callbacks(newsock, newcon);
+
+ /* Notify that new connection is incoming */
+ newcon->usr_data = s->tipc_conn_new(newcon->conid);
+
+ /* Wake up receive process in case of 'SYN+' message */
+ newsock->sk->sk_data_ready(newsock->sk);
+ return ret;
+}
+
+static struct socket *tipc_create_listen_sock(struct tipc_conn *con)
+{
+ struct tipc_server *s = con->server;
+ struct socket *sock = NULL;
+ int ret;
+
+ ret = tipc_sock_create_local(s->type, &sock);
+ if (ret < 0)
+ return NULL;
+ ret = kernel_setsockopt(sock, SOL_TIPC, TIPC_IMPORTANCE,
+ (char *)&s->imp, sizeof(s->imp));
+ if (ret < 0)
+ goto create_err;
+ ret = kernel_bind(sock, (struct sockaddr *)s->saddr, sizeof(*s->saddr));
+ if (ret < 0)
+ goto create_err;
+
+ switch (s->type) {
+ case SOCK_STREAM:
+ case SOCK_SEQPACKET:
+ con->rx_action = tipc_accept_from_sock;
+
+ ret = kernel_listen(sock, 0);
+ if (ret < 0)
+ goto create_err;
+ break;
+ case SOCK_DGRAM:
+ case SOCK_RDM:
+ con->rx_action = tipc_receive_from_sock;
+ break;
+ default:
+ pr_err("Unknown socket type %d\n", s->type);
+ goto create_err;
+ }
+ return sock;
+
+create_err:
+ sock_release(sock);
+ con->sock = NULL;
+ return NULL;
+}
+
+static int tipc_open_listening_sock(struct tipc_server *s)
+{
+ struct socket *sock;
+ struct tipc_conn *con;
+
+ con = tipc_alloc_conn(s);
+ if (IS_ERR(con))
+ return PTR_ERR(con);
+
+ sock = tipc_create_listen_sock(con);
+ if (!sock) {
+ idr_remove(&s->conn_idr, con->conid);
+ s->idr_in_use--;
+ kfree(con);
+ return -EINVAL;
+ }
+
+ tipc_register_callbacks(sock, con);
+ return 0;
+}
+
+static struct outqueue_entry *tipc_alloc_entry(void *data, int len)
+{
+ struct outqueue_entry *entry;
+ void *buf;
+
+ entry = kmalloc(sizeof(struct outqueue_entry), GFP_ATOMIC);
+ if (!entry)
+ return NULL;
+
+ buf = kmalloc(len, GFP_ATOMIC);
+ if (!buf) {
+ kfree(entry);
+ return NULL;
+ }
+
+ memcpy(buf, data, len);
+ entry->iov.iov_base = buf;
+ entry->iov.iov_len = len;
+
+ return entry;
+}
+
+static void tipc_free_entry(struct outqueue_entry *e)
+{
+ kfree(e->iov.iov_base);
+ kfree(e);
+}
+
+static void tipc_clean_outqueues(struct tipc_conn *con)
+{
+ struct outqueue_entry *e, *safe;
+
+ spin_lock_bh(&con->outqueue_lock);
+ list_for_each_entry_safe(e, safe, &con->outqueue, list) {
+ list_del(&e->list);
+ tipc_free_entry(e);
+ }
+ spin_unlock_bh(&con->outqueue_lock);
+}
+
+int tipc_conn_sendmsg(struct tipc_server *s, int conid,
+ struct sockaddr_tipc *addr, void *data, size_t len)
+{
+ struct outqueue_entry *e;
+ struct tipc_conn *con;
+
+ con = tipc_conn_lookup(s, conid);
+ if (!con)
+ return -EINVAL;
+
+ e = tipc_alloc_entry(data, len);
+ if (!e) {
+ conn_put(con);
+ return -ENOMEM;
+ }
+
+ if (addr)
+ memcpy(&e->dest, addr, sizeof(struct sockaddr_tipc));
+
+ spin_lock_bh(&con->outqueue_lock);
+ list_add_tail(&e->list, &con->outqueue);
+ spin_unlock_bh(&con->outqueue_lock);
+
+ if (test_bit(CF_CONNECTED, &con->flags)) {
+ if (!queue_work(s->send_wq, &con->swork))
+ conn_put(con);
+ } else {
+ conn_put(con);
+ }
+ return 0;
+}
+
+void tipc_conn_terminate(struct tipc_server *s, int conid)
+{
+ struct tipc_conn *con;
+
+ con = tipc_conn_lookup(s, conid);
+ if (con) {
+ tipc_close_conn(con);
+ conn_put(con);
+ }
+}
+
+static void tipc_send_to_sock(struct tipc_conn *con)
+{
+ int count = 0;
+ struct tipc_server *s = con->server;
+ struct outqueue_entry *e;
+ struct msghdr msg;
+ int ret;
+
+ spin_lock_bh(&con->outqueue_lock);
+ while (1) {
+ e = list_entry(con->outqueue.next, struct outqueue_entry,
+ list);
+ if ((struct list_head *) e == &con->outqueue)
+ break;
+ spin_unlock_bh(&con->outqueue_lock);
+
+ memset(&msg, 0, sizeof(msg));
+ msg.msg_flags = MSG_DONTWAIT;
+
+ if (s->type == SOCK_DGRAM || s->type == SOCK_RDM) {
+ msg.msg_name = &e->dest;
+ msg.msg_namelen = sizeof(struct sockaddr_tipc);
+ }
+ ret = kernel_sendmsg(con->sock, &msg, &e->iov, 1,
+ e->iov.iov_len);
+ if (ret == -EWOULDBLOCK || ret == 0) {
+ cond_resched();
+ goto out;
+ } else if (ret < 0) {
+ goto send_err;
+ }
+
+ /* Don't starve users filling buffers */
+ if (++count >= MAX_SEND_MSG_COUNT) {
+ cond_resched();
+ count = 0;
+ }
+
+ spin_lock_bh(&con->outqueue_lock);
+ list_del(&e->list);
+ tipc_free_entry(e);
+ }
+ spin_unlock_bh(&con->outqueue_lock);
+out:
+ return;
+
+send_err:
+ tipc_close_conn(con);
+}
+
+static void tipc_recv_work(struct work_struct *work)
+{
+ struct tipc_conn *con = container_of(work, struct tipc_conn, rwork);
+ int count = 0;
+
+ while (test_bit(CF_CONNECTED, &con->flags)) {
+ if (con->rx_action(con))
+ break;
+
+ /* Don't flood Rx machine */
+ if (++count >= MAX_RECV_MSG_COUNT) {
+ cond_resched();
+ count = 0;
+ }
+ }
+ conn_put(con);
+}
+
+static void tipc_send_work(struct work_struct *work)
+{
+ struct tipc_conn *con = container_of(work, struct tipc_conn, swork);
+
+ if (test_bit(CF_CONNECTED, &con->flags))
+ tipc_send_to_sock(con);
+
+ conn_put(con);
+}
+
+static void tipc_work_stop(struct tipc_server *s)
+{
+ destroy_workqueue(s->rcv_wq);
+ destroy_workqueue(s->send_wq);
+}
+
+static int tipc_work_start(struct tipc_server *s)
+{
+ s->rcv_wq = alloc_workqueue("tipc_rcv", WQ_UNBOUND, 1);
+ if (!s->rcv_wq) {
+ pr_err("can't start tipc receive workqueue\n");
+ return -ENOMEM;
+ }
+
+ s->send_wq = alloc_workqueue("tipc_send", WQ_UNBOUND, 1);
+ if (!s->send_wq) {
+ pr_err("can't start tipc send workqueue\n");
+ destroy_workqueue(s->rcv_wq);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int tipc_server_start(struct tipc_server *s)
+{
+ int ret;
+
+ spin_lock_init(&s->idr_lock);
+ idr_init(&s->conn_idr);
+ s->idr_in_use = 0;
+
+ s->rcvbuf_cache = kmem_cache_create(s->name, s->max_rcvbuf_size,
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!s->rcvbuf_cache)
+ return -ENOMEM;
+
+ ret = tipc_work_start(s);
+ if (ret < 0) {
+ kmem_cache_destroy(s->rcvbuf_cache);
+ return ret;
+ }
+ ret = tipc_open_listening_sock(s);
+ if (ret < 0) {
+ tipc_work_stop(s);
+ kmem_cache_destroy(s->rcvbuf_cache);
+ return ret;
+ }
+ return ret;
+}
+
+void tipc_server_stop(struct tipc_server *s)
+{
+ struct tipc_conn *con;
+ int total = 0;
+ int id;
+
+ spin_lock_bh(&s->idr_lock);
+ for (id = 0; total < s->idr_in_use; id++) {
+ con = idr_find(&s->conn_idr, id);
+ if (con) {
+ total++;
+ spin_unlock_bh(&s->idr_lock);
+ tipc_close_conn(con);
+ spin_lock_bh(&s->idr_lock);
+ }
+ }
+ spin_unlock_bh(&s->idr_lock);
+
+ tipc_work_stop(s);
+ kmem_cache_destroy(s->rcvbuf_cache);
+ idr_destroy(&s->conn_idr);
+}
diff --git a/net/tipc/server.h b/net/tipc/server.h
new file mode 100644
index 00000000000..be817b0b547
--- /dev/null
+++ b/net/tipc/server.h
@@ -0,0 +1,92 @@
+/*
+ * net/tipc/server.h: Include file for TIPC server code
+ *
+ * Copyright (c) 2012-2013, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_SERVER_H
+#define _TIPC_SERVER_H
+
+#include "core.h"
+
+#define TIPC_SERVER_NAME_LEN 32
+
+/**
+ * struct tipc_server - TIPC server structure
+ * @conn_idr: identifier set of connection
+ * @idr_lock: protect the connection identifier set
+ * @idr_in_use: amount of allocated identifier entry
+ * @rcvbuf_cache: memory cache of server receive buffer
+ * @rcv_wq: receive workqueue
+ * @send_wq: send workqueue
+ * @max_rcvbuf_size: maximum permitted receive message length
+ * @tipc_conn_new: callback will be called when new connection is incoming
+ * @tipc_conn_shutdown: callback will be called when connection is shut down
+ * @tipc_conn_recvmsg: callback will be called when message arrives
+ * @saddr: TIPC server address
+ * @name: server name
+ * @imp: message importance
+ * @type: socket type
+ */
+struct tipc_server {
+ struct idr conn_idr;
+ spinlock_t idr_lock;
+ int idr_in_use;
+ struct kmem_cache *rcvbuf_cache;
+ struct workqueue_struct *rcv_wq;
+ struct workqueue_struct *send_wq;
+ int max_rcvbuf_size;
+ void *(*tipc_conn_new) (int conid);
+ void (*tipc_conn_shutdown) (int conid, void *usr_data);
+ void (*tipc_conn_recvmsg) (int conid, struct sockaddr_tipc *addr,
+ void *usr_data, void *buf, size_t len);
+ struct sockaddr_tipc *saddr;
+ const char name[TIPC_SERVER_NAME_LEN];
+ int imp;
+ int type;
+};
+
+int tipc_conn_sendmsg(struct tipc_server *s, int conid,
+ struct sockaddr_tipc *addr, void *data, size_t len);
+
+/**
+ * tipc_conn_terminate - terminate connection with server
+ *
+ * Note: Must call it in process context since it might sleep
+ */
+void tipc_conn_terminate(struct tipc_server *s, int conid);
+
+int tipc_server_start(struct tipc_server *s);
+
+void tipc_server_stop(struct tipc_server *s);
+
+#endif
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index cd0bb77f267..ef0475568f9 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1,8 +1,8 @@
/*
* net/tipc/socket.c: TIPC socket API
*
- * Copyright (c) 2001-2007, Ericsson AB
- * Copyright (c) 2004-2008, Wind River Systems
+ * Copyright (c) 2001-2007, 2012-2014, Ericsson AB
+ * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -34,54 +34,29 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/net.h>
-#include <linux/socket.h>
-#include <linux/errno.h>
-#include <linux/mm.h>
-#include <linux/poll.h>
-#include <linux/fcntl.h>
-#include <linux/gfp.h>
-#include <asm/string.h>
-#include <asm/atomic.h>
-#include <net/sock.h>
-
-#include <linux/tipc.h>
-#include <linux/tipc_config.h>
-
#include "core.h"
#include "port.h"
+#include "node.h"
+
+#include <linux/export.h>
#define SS_LISTENING -1 /* socket is listening */
#define SS_READY -2 /* socket is connectionless */
-#define OVERLOAD_LIMIT_BASE 5000
#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
-struct tipc_sock {
- struct sock sk;
- struct tipc_port *p;
- struct tipc_portid peer_name;
- long conn_timeout;
-};
-
-#define tipc_sk(sk) ((struct tipc_sock *)(sk))
-#define tipc_sk_port(sk) ((struct tipc_port *)(tipc_sk(sk)->p))
-
-static int backlog_rcv(struct sock *sk, struct sk_buff *skb);
-static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf);
-static void wakeupdispatch(struct tipc_port *tport);
+static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
+static void tipc_data_ready(struct sock *sk);
+static void tipc_write_space(struct sock *sk);
+static int tipc_release(struct socket *sock);
+static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
static const struct proto_ops packet_ops;
static const struct proto_ops stream_ops;
static const struct proto_ops msg_ops;
static struct proto tipc_proto;
-
-static int sockets_enabled = 0;
-
-static atomic_t tipc_queue_size = ATOMIC_INIT(0);
+static struct proto tipc_proto_kern;
/*
* Revised TIPC socket locking policy:
@@ -128,32 +103,16 @@ static atomic_t tipc_queue_size = ATOMIC_INIT(0);
* - port reference
*/
+#include "socket.h"
+
/**
* advance_rx_queue - discard first buffer in socket receive queue
*
* Caller must hold socket lock
*/
-
static void advance_rx_queue(struct sock *sk)
{
- buf_discard(__skb_dequeue(&sk->sk_receive_queue));
- atomic_dec(&tipc_queue_size);
-}
-
-/**
- * discard_rx_queue - discard all buffers in socket receive queue
- *
- * Caller must hold socket lock
- */
-
-static void discard_rx_queue(struct sock *sk)
-{
- struct sk_buff *buf;
-
- while ((buf = __skb_dequeue(&sk->sk_receive_queue))) {
- atomic_dec(&tipc_queue_size);
- buf_discard(buf);
- }
+ kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
}
/**
@@ -161,19 +120,16 @@ static void discard_rx_queue(struct sock *sk)
*
* Caller must hold socket lock
*/
-
static void reject_rx_queue(struct sock *sk)
{
struct sk_buff *buf;
- while ((buf = __skb_dequeue(&sk->sk_receive_queue))) {
+ while ((buf = __skb_dequeue(&sk->sk_receive_queue)))
tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
- atomic_dec(&tipc_queue_size);
- }
}
/**
- * tipc_create - create a TIPC socket
+ * tipc_sk_create - create a TIPC socket
* @net: network namespace (must be default network)
* @sock: pre-allocated socket structure
* @protocol: protocol indicator (must be 0)
@@ -184,20 +140,17 @@ static void reject_rx_queue(struct sock *sk)
*
* Returns 0 on success, errno otherwise
*/
-
-static int tipc_create(struct net *net, struct socket *sock, int protocol,
- int kern)
+static int tipc_sk_create(struct net *net, struct socket *sock,
+ int protocol, int kern)
{
const struct proto_ops *ops;
socket_state state;
struct sock *sk;
- struct tipc_port *tp_ptr;
+ struct tipc_sock *tsk;
+ struct tipc_port *port;
+ u32 ref;
/* Validate arguments */
-
- if (!net_eq(net, &init_net))
- return -EAFNOSUPPORT;
-
if (unlikely(protocol != 0))
return -EPROTONOSUPPORT;
@@ -220,44 +173,116 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol,
}
/* Allocate socket's protocol area */
+ if (!kern)
+ sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto);
+ else
+ sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto_kern);
- sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto);
if (sk == NULL)
return -ENOMEM;
- /* Allocate TIPC port for socket to use */
+ tsk = tipc_sk(sk);
+ port = &tsk->port;
- tp_ptr = tipc_createport_raw(sk, &dispatch, &wakeupdispatch,
- TIPC_LOW_IMPORTANCE);
- if (unlikely(!tp_ptr)) {
+ ref = tipc_port_init(port, TIPC_LOW_IMPORTANCE);
+ if (!ref) {
+ pr_warn("Socket registration failed, ref. table exhausted\n");
sk_free(sk);
return -ENOMEM;
}
/* Finish initializing socket data structures */
-
sock->ops = ops;
sock->state = state;
sock_init_data(sock, sk);
- sk->sk_backlog_rcv = backlog_rcv;
- tipc_sk(sk)->p = tp_ptr;
- tipc_sk(sk)->conn_timeout = msecs_to_jiffies(CONN_TIMEOUT_DEFAULT);
-
- spin_unlock_bh(tp_ptr->lock);
+ sk->sk_backlog_rcv = tipc_backlog_rcv;
+ sk->sk_rcvbuf = sysctl_tipc_rmem[1];
+ sk->sk_data_ready = tipc_data_ready;
+ sk->sk_write_space = tipc_write_space;
+ tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
+ atomic_set(&tsk->dupl_rcvcnt, 0);
+ tipc_port_unlock(port);
if (sock->state == SS_READY) {
- tipc_set_portunreturnable(tp_ptr->ref, 1);
+ tipc_port_set_unreturnable(port, true);
if (sock->type == SOCK_DGRAM)
- tipc_set_portunreliable(tp_ptr->ref, 1);
+ tipc_port_set_unreliable(port, true);
+ }
+ return 0;
+}
+
+/**
+ * tipc_sock_create_local - create TIPC socket from inside TIPC module
+ * @type: socket type - SOCK_RDM or SOCK_SEQPACKET
+ *
+ * We cannot use sock_creat_kern here because it bumps module user count.
+ * Since socket owner and creator is the same module we must make sure
+ * that module count remains zero for module local sockets, otherwise
+ * we cannot do rmmod.
+ *
+ * Returns 0 on success, errno otherwise
+ */
+int tipc_sock_create_local(int type, struct socket **res)
+{
+ int rc;
+
+ rc = sock_create_lite(AF_TIPC, type, 0, res);
+ if (rc < 0) {
+ pr_err("Failed to create kernel socket\n");
+ return rc;
}
+ tipc_sk_create(&init_net, *res, 0, 1);
- atomic_inc(&tipc_user_count);
return 0;
}
/**
- * release - destroy a TIPC socket
+ * tipc_sock_release_local - release socket created by tipc_sock_create_local
+ * @sock: the socket to be released.
+ *
+ * Module reference count is not incremented when such sockets are created,
+ * so we must keep it from being decremented when they are released.
+ */
+void tipc_sock_release_local(struct socket *sock)
+{
+ tipc_release(sock);
+ sock->ops = NULL;
+ sock_release(sock);
+}
+
+/**
+ * tipc_sock_accept_local - accept a connection on a socket created
+ * with tipc_sock_create_local. Use this function to avoid that
+ * module reference count is inadvertently incremented.
+ *
+ * @sock: the accepting socket
+ * @newsock: reference to the new socket to be created
+ * @flags: socket flags
+ */
+
+int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
+ int flags)
+{
+ struct sock *sk = sock->sk;
+ int ret;
+
+ ret = sock_create_lite(sk->sk_family, sk->sk_type,
+ sk->sk_protocol, newsock);
+ if (ret < 0)
+ return ret;
+
+ ret = tipc_accept(sock, *newsock, flags);
+ if (ret < 0) {
+ sock_release(*newsock);
+ return ret;
+ }
+ (*newsock)->ops = sock->ops;
+ return ret;
+}
+
+/**
+ * tipc_release - destroy a TIPC socket
* @sock: socket to destroy
*
* This routine cleans up any messages that are still queued on the socket.
@@ -272,72 +297,64 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol,
*
* Returns 0 on success, errno otherwise
*/
-
-static int release(struct socket *sock)
+static int tipc_release(struct socket *sock)
{
struct sock *sk = sock->sk;
- struct tipc_port *tport;
+ struct tipc_sock *tsk;
+ struct tipc_port *port;
struct sk_buff *buf;
- int res;
/*
* Exit if socket isn't fully initialized (occurs when a failed accept()
* releases a pre-allocated child socket that was never used)
*/
-
if (sk == NULL)
return 0;
- tport = tipc_sk_port(sk);
+ tsk = tipc_sk(sk);
+ port = &tsk->port;
lock_sock(sk);
/*
* Reject all unreceived messages, except on an active connection
* (which disconnects locally & sends a 'FIN+' to peer)
*/
-
while (sock->state != SS_DISCONNECTING) {
buf = __skb_dequeue(&sk->sk_receive_queue);
if (buf == NULL)
break;
- atomic_dec(&tipc_queue_size);
- if (TIPC_SKB_CB(buf)->handle != msg_data(buf_msg(buf)))
- buf_discard(buf);
+ if (TIPC_SKB_CB(buf)->handle != NULL)
+ kfree_skb(buf);
else {
if ((sock->state == SS_CONNECTING) ||
(sock->state == SS_CONNECTED)) {
sock->state = SS_DISCONNECTING;
- tipc_disconnect(tport->ref);
+ tipc_port_disconnect(port->ref);
}
tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
}
}
- /*
- * Delete TIPC port; this ensures no more messages are queued
- * (also disconnects an active connection & sends a 'FIN-' to peer)
+ /* Destroy TIPC port; also disconnects an active connection and
+ * sends a 'FIN-' to peer.
*/
-
- res = tipc_deleteport(tport->ref);
+ tipc_port_destroy(port);
/* Discard any remaining (connection-based) messages in receive queue */
-
- discard_rx_queue(sk);
+ __skb_queue_purge(&sk->sk_receive_queue);
/* Reject any messages that accumulated in backlog queue */
-
sock->state = SS_DISCONNECTING;
release_sock(sk);
sock_put(sk);
sock->sk = NULL;
- atomic_dec(&tipc_user_count);
- return res;
+ return 0;
}
/**
- * bind - associate or disassocate TIPC name(s) with a socket
+ * tipc_bind - associate or disassocate TIPC name(s) with a socket
* @sock: socket structure
* @uaddr: socket address describing name(s) and desired operation
* @uaddr_len: size of socket address data structure
@@ -351,32 +368,53 @@ static int release(struct socket *sock)
* NOTE: This routine doesn't need to take the socket lock since it doesn't
* access any non-constant socket information.
*/
-
-static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
+static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
+ int uaddr_len)
{
+ struct sock *sk = sock->sk;
struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
- u32 portref = tipc_sk_port(sock->sk)->ref;
+ struct tipc_sock *tsk = tipc_sk(sk);
+ int res = -EINVAL;
- if (unlikely(!uaddr_len))
- return tipc_withdraw(portref, 0, NULL);
+ lock_sock(sk);
+ if (unlikely(!uaddr_len)) {
+ res = tipc_withdraw(&tsk->port, 0, NULL);
+ goto exit;
+ }
- if (uaddr_len < sizeof(struct sockaddr_tipc))
- return -EINVAL;
- if (addr->family != AF_TIPC)
- return -EAFNOSUPPORT;
+ if (uaddr_len < sizeof(struct sockaddr_tipc)) {
+ res = -EINVAL;
+ goto exit;
+ }
+ if (addr->family != AF_TIPC) {
+ res = -EAFNOSUPPORT;
+ goto exit;
+ }
if (addr->addrtype == TIPC_ADDR_NAME)
addr->addr.nameseq.upper = addr->addr.nameseq.lower;
- else if (addr->addrtype != TIPC_ADDR_NAMESEQ)
- return -EAFNOSUPPORT;
+ else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
+ res = -EAFNOSUPPORT;
+ goto exit;
+ }
+
+ if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
+ (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
+ (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
+ res = -EACCES;
+ goto exit;
+ }
- return (addr->scope > 0) ?
- tipc_publish(portref, addr->scope, &addr->addr.nameseq) :
- tipc_withdraw(portref, -addr->scope, &addr->addr.nameseq);
+ res = (addr->scope > 0) ?
+ tipc_publish(&tsk->port, addr->scope, &addr->addr.nameseq) :
+ tipc_withdraw(&tsk->port, -addr->scope, &addr->addr.nameseq);
+exit:
+ release_sock(sk);
+ return res;
}
/**
- * get_name - get port ID of socket or peer socket
+ * tipc_getname - get port ID of socket or peer socket
* @sock: socket structure
* @uaddr: area for returned socket address
* @uaddr_len: area for returned length of socket address
@@ -386,24 +424,23 @@ static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
*
* NOTE: This routine doesn't need to take the socket lock since it only
* accesses socket information that is unchanging (or which changes in
- * a completely predictable manner).
+ * a completely predictable manner).
*/
-
-static int get_name(struct socket *sock, struct sockaddr *uaddr,
- int *uaddr_len, int peer)
+static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
+ int *uaddr_len, int peer)
{
struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
- struct tipc_sock *tsock = tipc_sk(sock->sk);
+ struct tipc_sock *tsk = tipc_sk(sock->sk);
memset(addr, 0, sizeof(*addr));
if (peer) {
if ((sock->state != SS_CONNECTED) &&
((peer != 2) || (sock->state != SS_DISCONNECTING)))
return -ENOTCONN;
- addr->addr.id.ref = tsock->peer_name.ref;
- addr->addr.id.node = tsock->peer_name.node;
+ addr->addr.id.ref = tipc_port_peerport(&tsk->port);
+ addr->addr.id.node = tipc_port_peernode(&tsk->port);
} else {
- addr->addr.id.ref = tsock->p->ref;
+ addr->addr.id.ref = tsk->port.ref;
addr->addr.id.node = tipc_own_addr;
}
@@ -417,7 +454,7 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr,
}
/**
- * poll - read and possibly block on pollmask
+ * tipc_poll - read and possibly block on pollmask
* @file: file structure associated with the socket
* @sock: socket for which to calculate the poll bits
* @wait: ???
@@ -435,7 +472,7 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr,
* socket state flags set
* ------------ ---------
* unconnected no read flags
- * no write flags
+ * POLLOUT if port is not congested
*
* connecting POLLIN/POLLRDNORM if ACK/NACK in rx queue
* no write flags
@@ -456,19 +493,23 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr,
* imply that the operation will succeed, merely that it should be performed
* and will not block.
*/
-
-static unsigned int poll(struct file *file, struct socket *sock,
- poll_table *wait)
+static unsigned int tipc_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
{
struct sock *sk = sock->sk;
+ struct tipc_sock *tsk = tipc_sk(sk);
u32 mask = 0;
- poll_wait(file, sk_sleep(sk), wait);
+ sock_poll_wait(file, sk_sleep(sk), wait);
switch ((int)sock->state) {
+ case SS_UNCONNECTED:
+ if (!tsk->port.congested)
+ mask |= POLLOUT;
+ break;
case SS_READY:
case SS_CONNECTED:
- if (!tipc_sk_port(sk)->congested)
+ if (!tsk->port.congested)
mask |= POLLOUT;
/* fall thru' */
case SS_CONNECTING:
@@ -494,7 +535,6 @@ static unsigned int poll(struct file *file, struct socket *sock,
*
* Returns 0 if permission is granted, otherwise errno
*/
-
static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
{
struct tipc_cfg_msg_hdr hdr;
@@ -506,6 +546,8 @@ static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
if (likely(dest->addr.name.name.type != TIPC_CFG_SRV))
return -EACCES;
+ if (!m->msg_iovlen || (m->msg_iov[0].iov_len < sizeof(hdr)))
+ return -EMSGSIZE;
if (copy_from_user(&hdr, m->msg_iov[0].iov_base, sizeof(hdr)))
return -EFAULT;
if ((ntohs(hdr.tcm_type) & 0xC000) && (!capable(CAP_NET_ADMIN)))
@@ -514,8 +556,34 @@ static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
return 0;
}
+static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
+{
+ struct sock *sk = sock->sk;
+ struct tipc_sock *tsk = tipc_sk(sk);
+ DEFINE_WAIT(wait);
+ int done;
+
+ do {
+ int err = sock_error(sk);
+ if (err)
+ return err;
+ if (sock->state == SS_DISCONNECTING)
+ return -EPIPE;
+ if (!*timeo_p)
+ return -EAGAIN;
+ if (signal_pending(current))
+ return sock_intr_errno(*timeo_p);
+
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+ done = sk_wait_event(sk, timeo_p, !tsk->port.congested);
+ finish_wait(sk_sleep(sk), &wait);
+ } while (!done);
+ return 0;
+}
+
+
/**
- * send_msg - send message in connectionless manner
+ * tipc_sendmsg - send message in connectionless manner
* @iocb: if NULL, indicates that socket lock is already held
* @sock: socket structure
* @m: message to send
@@ -528,14 +596,15 @@ static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
*
* Returns the number of bytes sent on success, or errno otherwise
*/
-
-static int send_msg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t total_len)
+static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len)
{
struct sock *sk = sock->sk;
- struct tipc_port *tport = tipc_sk_port(sk);
- struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
+ struct tipc_sock *tsk = tipc_sk(sk);
+ struct tipc_port *port = &tsk->port;
+ DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
int needs_conn;
+ long timeo;
int res = -EINVAL;
if (unlikely(!dest))
@@ -543,6 +612,8 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
if (unlikely((m->msg_namelen < sizeof(*dest)) ||
(dest->family != AF_TIPC)))
return -EINVAL;
+ if (total_len > TIPC_MAX_USER_MSG_SIZE)
+ return -EMSGSIZE;
if (iocb)
lock_sock(sk);
@@ -557,63 +628,54 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
res = -EISCONN;
goto exit;
}
- if ((tport->published) ||
- ((sock->type == SOCK_STREAM) && (total_len != 0))) {
+ if (tsk->port.published) {
res = -EOPNOTSUPP;
goto exit;
}
if (dest->addrtype == TIPC_ADDR_NAME) {
- tport->conn_type = dest->addr.name.name.type;
- tport->conn_instance = dest->addr.name.name.instance;
+ tsk->port.conn_type = dest->addr.name.name.type;
+ tsk->port.conn_instance = dest->addr.name.name.instance;
}
/* Abort any pending connection attempts (very unlikely) */
-
reject_rx_queue(sk);
}
+ timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
do {
if (dest->addrtype == TIPC_ADDR_NAME) {
- if ((res = dest_name_check(dest, m)))
+ res = dest_name_check(dest, m);
+ if (res)
break;
- res = tipc_send2name(tport->ref,
+ res = tipc_send2name(port,
&dest->addr.name.name,
dest->addr.name.domain,
- m->msg_iovlen,
- m->msg_iov);
- }
- else if (dest->addrtype == TIPC_ADDR_ID) {
- res = tipc_send2port(tport->ref,
+ m->msg_iov,
+ total_len);
+ } else if (dest->addrtype == TIPC_ADDR_ID) {
+ res = tipc_send2port(port,
&dest->addr.id,
- m->msg_iovlen,
- m->msg_iov);
- }
- else if (dest->addrtype == TIPC_ADDR_MCAST) {
+ m->msg_iov,
+ total_len);
+ } else if (dest->addrtype == TIPC_ADDR_MCAST) {
if (needs_conn) {
res = -EOPNOTSUPP;
break;
}
- if ((res = dest_name_check(dest, m)))
+ res = dest_name_check(dest, m);
+ if (res)
break;
- res = tipc_multicast(tport->ref,
- &dest->addr.nameseq,
- m->msg_iovlen,
- m->msg_iov);
+ res = tipc_port_mcast_xmit(port,
+ &dest->addr.nameseq,
+ m->msg_iov,
+ total_len);
}
if (likely(res != -ELINKCONG)) {
- if (needs_conn && (res >= 0)) {
+ if (needs_conn && (res >= 0))
sock->state = SS_CONNECTING;
- }
- break;
- }
- if (m->msg_flags & MSG_DONTWAIT) {
- res = -EWOULDBLOCK;
break;
}
- release_sock(sk);
- res = wait_event_interruptible(*sk_sleep(sk),
- !tport->congested);
- lock_sock(sk);
+ res = tipc_wait_for_sndmsg(sock, &timeo);
if (res)
break;
} while (1);
@@ -624,8 +686,37 @@ exit:
return res;
}
+static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p)
+{
+ struct sock *sk = sock->sk;
+ struct tipc_sock *tsk = tipc_sk(sk);
+ struct tipc_port *port = &tsk->port;
+ DEFINE_WAIT(wait);
+ int done;
+
+ do {
+ int err = sock_error(sk);
+ if (err)
+ return err;
+ if (sock->state == SS_DISCONNECTING)
+ return -EPIPE;
+ else if (sock->state != SS_CONNECTED)
+ return -ENOTCONN;
+ if (!*timeo_p)
+ return -EAGAIN;
+ if (signal_pending(current))
+ return sock_intr_errno(*timeo_p);
+
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+ done = sk_wait_event(sk, timeo_p,
+ (!port->congested || !port->connected));
+ finish_wait(sk_sleep(sk), &wait);
+ } while (!done);
+ return 0;
+}
+
/**
- * send_packet - send a connection-oriented message
+ * tipc_send_packet - send a connection-oriented message
* @iocb: if NULL, indicates that socket lock is already held
* @sock: socket structure
* @m: message to send
@@ -635,55 +726,50 @@ exit:
*
* Returns the number of bytes sent on success, or errno otherwise
*/
-
-static int send_packet(struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t total_len)
+static int tipc_send_packet(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len)
{
struct sock *sk = sock->sk;
- struct tipc_port *tport = tipc_sk_port(sk);
- struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
- int res;
+ struct tipc_sock *tsk = tipc_sk(sk);
+ DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
+ int res = -EINVAL;
+ long timeo;
/* Handle implied connection establishment */
-
if (unlikely(dest))
- return send_msg(iocb, sock, m, total_len);
+ return tipc_sendmsg(iocb, sock, m, total_len);
+
+ if (total_len > TIPC_MAX_USER_MSG_SIZE)
+ return -EMSGSIZE;
if (iocb)
lock_sock(sk);
- do {
- if (unlikely(sock->state != SS_CONNECTED)) {
- if (sock->state == SS_DISCONNECTING)
- res = -EPIPE;
- else
- res = -ENOTCONN;
- break;
- }
+ if (unlikely(sock->state != SS_CONNECTED)) {
+ if (sock->state == SS_DISCONNECTING)
+ res = -EPIPE;
+ else
+ res = -ENOTCONN;
+ goto exit;
+ }
- res = tipc_send(tport->ref, m->msg_iovlen, m->msg_iov);
- if (likely(res != -ELINKCONG)) {
- break;
- }
- if (m->msg_flags & MSG_DONTWAIT) {
- res = -EWOULDBLOCK;
+ timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
+ do {
+ res = tipc_send(&tsk->port, m->msg_iov, total_len);
+ if (likely(res != -ELINKCONG))
break;
- }
- release_sock(sk);
- res = wait_event_interruptible(*sk_sleep(sk),
- (!tport->congested || !tport->connected));
- lock_sock(sk);
+ res = tipc_wait_for_sndpkt(sock, &timeo);
if (res)
break;
} while (1);
-
+exit:
if (iocb)
release_sock(sk);
return res;
}
/**
- * send_stream - send stream-oriented data
+ * tipc_send_stream - send stream-oriented data
* @iocb: (unused)
* @sock: socket structure
* @m: data to send
@@ -694,12 +780,11 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
* Returns the number of bytes sent on success (or partial success),
* or errno if no data sent
*/
-
-static int send_stream(struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t total_len)
+static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len)
{
struct sock *sk = sock->sk;
- struct tipc_port *tport = tipc_sk_port(sk);
+ struct tipc_sock *tsk = tipc_sk(sk);
struct msghdr my_msg;
struct iovec my_iov;
struct iovec *curr_iov;
@@ -714,18 +799,12 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
lock_sock(sk);
/* Handle special cases where there is no connection */
-
if (unlikely(sock->state != SS_CONNECTED)) {
- if (sock->state == SS_UNCONNECTED) {
- res = send_packet(NULL, sock, m, total_len);
- goto exit;
- } else if (sock->state == SS_DISCONNECTING) {
- res = -EPIPE;
- goto exit;
- } else {
- res = -ENOTCONN;
- goto exit;
- }
+ if (sock->state == SS_UNCONNECTED)
+ res = tipc_send_packet(NULL, sock, m, total_len);
+ else
+ res = sock->state == SS_DISCONNECTING ? -EPIPE : -ENOTCONN;
+ goto exit;
}
if (unlikely(m->msg_name)) {
@@ -733,6 +812,11 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
goto exit;
}
+ if (total_len > (unsigned int)INT_MAX) {
+ res = -EMSGSIZE;
+ goto exit;
+ }
+
/*
* Send each iovec entry using one or more messages
*
@@ -740,7 +824,6 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
* (i.e. one large iovec entry), but could be improved to pass sets
* of small iovec entries into send_packet().
*/
-
curr_iov = m->msg_iov;
curr_iovlen = m->msg_iovlen;
my_msg.msg_iov = &my_iov;
@@ -749,21 +832,23 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
my_msg.msg_name = NULL;
bytes_sent = 0;
- hdr_size = msg_hdr_sz(&tport->phdr);
+ hdr_size = msg_hdr_sz(&tsk->port.phdr);
while (curr_iovlen--) {
curr_start = curr_iov->iov_base;
curr_left = curr_iov->iov_len;
while (curr_left) {
- bytes_to_send = tport->max_pkt - hdr_size;
+ bytes_to_send = tsk->port.max_pkt - hdr_size;
if (bytes_to_send > TIPC_MAX_USER_MSG_SIZE)
bytes_to_send = TIPC_MAX_USER_MSG_SIZE;
if (curr_left < bytes_to_send)
bytes_to_send = curr_left;
my_iov.iov_base = curr_start;
my_iov.iov_len = bytes_to_send;
- if ((res = send_packet(NULL, sock, &my_msg, 0)) < 0) {
+ res = tipc_send_packet(NULL, sock, &my_msg,
+ bytes_to_send);
+ if (res < 0) {
if (bytes_sent)
res = bytes_sent;
goto exit;
@@ -783,25 +868,25 @@ exit:
/**
* auto_connect - complete connection setup to a remote port
- * @sock: socket structure
+ * @tsk: tipc socket structure
* @msg: peer's response message
*
* Returns 0 on success, errno otherwise
*/
-
-static int auto_connect(struct socket *sock, struct tipc_msg *msg)
+static int auto_connect(struct tipc_sock *tsk, struct tipc_msg *msg)
{
- struct tipc_sock *tsock = tipc_sk(sock->sk);
+ struct tipc_port *port = &tsk->port;
+ struct socket *sock = tsk->sk.sk_socket;
+ struct tipc_portid peer;
- if (msg_errcode(msg)) {
- sock->state = SS_DISCONNECTING;
- return -ECONNREFUSED;
- }
+ peer.ref = msg_origport(msg);
+ peer.node = msg_orignode(msg);
+
+ __tipc_port_connect(port->ref, port, &peer);
- tsock->peer_name.ref = msg_origport(msg);
- tsock->peer_name.node = msg_orignode(msg);
- tipc_connect2port(tsock->p->ref, &tsock->peer_name);
- tipc_set_portimportance(tsock->p->ref, msg_importance(msg));
+ if (msg_importance(msg) > TIPC_CRITICAL_IMPORTANCE)
+ return -EINVAL;
+ msg_set_importance(&port->phdr, (u32)msg_importance(msg));
sock->state = SS_CONNECTED;
return 0;
}
@@ -813,18 +898,18 @@ static int auto_connect(struct socket *sock, struct tipc_msg *msg)
*
* Note: Address is not captured if not requested by receiver.
*/
-
static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
{
- struct sockaddr_tipc *addr = (struct sockaddr_tipc *)m->msg_name;
+ DECLARE_SOCKADDR(struct sockaddr_tipc *, addr, m->msg_name);
if (addr) {
addr->family = AF_TIPC;
addr->addrtype = TIPC_ADDR_ID;
+ memset(&addr->addr, 0, sizeof(addr->addr));
addr->addr.id.ref = msg_origport(msg);
addr->addr.id.node = msg_orignode(msg);
- addr->addr.name.domain = 0; /* could leave uninitialized */
- addr->scope = 0; /* could leave uninitialized */
+ addr->addr.name.domain = 0; /* could leave uninitialized */
+ addr->scope = 0; /* could leave uninitialized */
m->msg_namelen = sizeof(struct sockaddr_tipc);
}
}
@@ -839,9 +924,8 @@ static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
*
* Returns 0 if successful, otherwise errno
*/
-
static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
- struct tipc_port *tport)
+ struct tipc_port *tport)
{
u32 anc_data[3];
u32 err;
@@ -853,21 +937,22 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
return 0;
/* Optionally capture errored message object(s) */
-
err = msg ? msg_errcode(msg) : 0;
if (unlikely(err)) {
anc_data[0] = err;
anc_data[1] = msg_data_sz(msg);
- if ((res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data)))
- return res;
- if (anc_data[1] &&
- (res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
- msg_data(msg))))
+ res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
+ if (res)
return res;
+ if (anc_data[1]) {
+ res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
+ msg_data(msg));
+ if (res)
+ return res;
+ }
}
/* Optionally capture message destination object */
-
dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
switch (dest_type) {
case TIPC_NAMED_MSG:
@@ -891,15 +976,50 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
default:
has_name = 0;
}
- if (has_name &&
- (res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data)))
- return res;
+ if (has_name) {
+ res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
+ if (res)
+ return res;
+ }
return 0;
}
+static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
+{
+ struct sock *sk = sock->sk;
+ DEFINE_WAIT(wait);
+ long timeo = *timeop;
+ int err;
+
+ for (;;) {
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+ if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
+ if (sock->state == SS_DISCONNECTING) {
+ err = -ENOTCONN;
+ break;
+ }
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+ lock_sock(sk);
+ }
+ err = 0;
+ if (!skb_queue_empty(&sk->sk_receive_queue))
+ break;
+ err = sock_intr_errno(timeo);
+ if (signal_pending(current))
+ break;
+ err = -EAGAIN;
+ if (!timeo)
+ break;
+ }
+ finish_wait(sk_sleep(sk), &wait);
+ *timeop = timeo;
+ return err;
+}
+
/**
- * recv_msg - receive packet-oriented message
+ * tipc_recvmsg - receive packet-oriented message
* @iocb: (unused)
* @m: descriptor for message info
* @buf_len: total size of user buffer area
@@ -910,23 +1030,20 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
*
* Returns size of returned message data, errno otherwise
*/
-
-static int recv_msg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t buf_len, int flags)
+static int tipc_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t buf_len, int flags)
{
struct sock *sk = sock->sk;
- struct tipc_port *tport = tipc_sk_port(sk);
+ struct tipc_sock *tsk = tipc_sk(sk);
+ struct tipc_port *port = &tsk->port;
struct sk_buff *buf;
struct tipc_msg *msg;
+ long timeo;
unsigned int sz;
u32 err;
int res;
/* Catch invalid receive requests */
-
- if (m->msg_iovlen != 1)
- return -EOPNOTSUPP; /* Don't do multiple iovec entries yet */
-
if (unlikely(!buf_len))
return -EINVAL;
@@ -937,72 +1054,44 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock,
goto exit;
}
+ timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
restart:
/* Look for a message in receive queue; wait if necessary */
-
- while (skb_queue_empty(&sk->sk_receive_queue)) {
- if (sock->state == SS_DISCONNECTING) {
- res = -ENOTCONN;
- goto exit;
- }
- if (flags & MSG_DONTWAIT) {
- res = -EWOULDBLOCK;
- goto exit;
- }
- release_sock(sk);
- res = wait_event_interruptible(*sk_sleep(sk),
- (!skb_queue_empty(&sk->sk_receive_queue) ||
- (sock->state == SS_DISCONNECTING)));
- lock_sock(sk);
- if (res)
- goto exit;
- }
+ res = tipc_wait_for_rcvmsg(sock, &timeo);
+ if (res)
+ goto exit;
/* Look at first message in receive queue */
-
buf = skb_peek(&sk->sk_receive_queue);
msg = buf_msg(buf);
sz = msg_data_sz(msg);
err = msg_errcode(msg);
- /* Complete connection setup for an implied connect */
-
- if (unlikely(sock->state == SS_CONNECTING)) {
- res = auto_connect(sock, msg);
- if (res)
- goto exit;
- }
-
/* Discard an empty non-errored message & try again */
-
if ((!sz) && (!err)) {
advance_rx_queue(sk);
goto restart;
}
/* Capture sender's address (optional) */
-
set_orig_addr(m, msg);
/* Capture ancillary data (optional) */
-
- res = anc_data_recv(m, msg, tport);
+ res = anc_data_recv(m, msg, port);
if (res)
goto exit;
/* Capture message data (if valid) & compute return value (always) */
-
if (!err) {
if (unlikely(buf_len < sz)) {
sz = buf_len;
m->msg_flags |= MSG_TRUNC;
}
- if (unlikely(copy_to_user(m->msg_iov->iov_base, msg_data(msg),
- sz))) {
- res = -EFAULT;
+ res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg),
+ m->msg_iov, sz);
+ if (res)
goto exit;
- }
res = sz;
} else {
if ((sock->state == SS_READY) ||
@@ -1013,11 +1102,10 @@ restart:
}
/* Consume received message (optional) */
-
if (likely(!(flags & MSG_PEEK))) {
if ((sock->state != SS_READY) &&
- (++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
- tipc_acknowledge(tport->ref, tport->conn_unacked);
+ (++port->conn_unacked >= TIPC_CONNACK_INTV))
+ tipc_acknowledge(port->ref, port->conn_unacked);
advance_rx_queue(sk);
}
exit:
@@ -1026,7 +1114,7 @@ exit:
}
/**
- * recv_stream - receive stream-oriented data
+ * tipc_recv_stream - receive stream-oriented data
* @iocb: (unused)
* @m: descriptor for message info
* @buf_len: total size of user buffer area
@@ -1037,106 +1125,82 @@ exit:
*
* Returns size of returned message data, errno otherwise
*/
-
-static int recv_stream(struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t buf_len, int flags)
+static int tipc_recv_stream(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t buf_len, int flags)
{
struct sock *sk = sock->sk;
- struct tipc_port *tport = tipc_sk_port(sk);
+ struct tipc_sock *tsk = tipc_sk(sk);
+ struct tipc_port *port = &tsk->port;
struct sk_buff *buf;
struct tipc_msg *msg;
+ long timeo;
unsigned int sz;
int sz_to_copy, target, needed;
int sz_copied = 0;
- char __user *crs = m->msg_iov->iov_base;
- unsigned char *buf_crs;
u32 err;
int res = 0;
/* Catch invalid receive attempts */
-
- if (m->msg_iovlen != 1)
- return -EOPNOTSUPP; /* Don't do multiple iovec entries yet */
-
if (unlikely(!buf_len))
return -EINVAL;
lock_sock(sk);
- if (unlikely((sock->state == SS_UNCONNECTED) ||
- (sock->state == SS_CONNECTING))) {
+ if (unlikely(sock->state == SS_UNCONNECTED)) {
res = -ENOTCONN;
goto exit;
}
target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
+ timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
restart:
-
/* Look for a message in receive queue; wait if necessary */
-
- while (skb_queue_empty(&sk->sk_receive_queue)) {
- if (sock->state == SS_DISCONNECTING) {
- res = -ENOTCONN;
- goto exit;
- }
- if (flags & MSG_DONTWAIT) {
- res = -EWOULDBLOCK;
- goto exit;
- }
- release_sock(sk);
- res = wait_event_interruptible(*sk_sleep(sk),
- (!skb_queue_empty(&sk->sk_receive_queue) ||
- (sock->state == SS_DISCONNECTING)));
- lock_sock(sk);
- if (res)
- goto exit;
- }
+ res = tipc_wait_for_rcvmsg(sock, &timeo);
+ if (res)
+ goto exit;
/* Look at first message in receive queue */
-
buf = skb_peek(&sk->sk_receive_queue);
msg = buf_msg(buf);
sz = msg_data_sz(msg);
err = msg_errcode(msg);
/* Discard an empty non-errored message & try again */
-
if ((!sz) && (!err)) {
advance_rx_queue(sk);
goto restart;
}
/* Optionally capture sender's address & ancillary data of first msg */
-
if (sz_copied == 0) {
set_orig_addr(m, msg);
- res = anc_data_recv(m, msg, tport);
+ res = anc_data_recv(m, msg, port);
if (res)
goto exit;
}
/* Capture message data (if valid) & compute return value (always) */
-
if (!err) {
- buf_crs = (unsigned char *)(TIPC_SKB_CB(buf)->handle);
- sz = (unsigned char *)msg + msg_size(msg) - buf_crs;
+ u32 offset = (u32)(unsigned long)(TIPC_SKB_CB(buf)->handle);
+ sz -= offset;
needed = (buf_len - sz_copied);
sz_to_copy = (sz <= needed) ? sz : needed;
- if (unlikely(copy_to_user(crs, buf_crs, sz_to_copy))) {
- res = -EFAULT;
+
+ res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg) + offset,
+ m->msg_iov, sz_to_copy);
+ if (res)
goto exit;
- }
+
sz_copied += sz_to_copy;
if (sz_to_copy < sz) {
if (!(flags & MSG_PEEK))
- TIPC_SKB_CB(buf)->handle = buf_crs + sz_to_copy;
+ TIPC_SKB_CB(buf)->handle =
+ (void *)(unsigned long)(offset + sz_to_copy);
goto exit;
}
-
- crs += sz_to_copy;
} else {
if (sz_copied != 0)
goto exit; /* can't add error msg to valid data */
@@ -1148,15 +1212,13 @@ restart:
}
/* Consume received message (optional) */
-
if (likely(!(flags & MSG_PEEK))) {
- if (unlikely(++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
- tipc_acknowledge(tport->ref, tport->conn_unacked);
+ if (unlikely(++port->conn_unacked >= TIPC_CONNACK_INTV))
+ tipc_acknowledge(port->ref, port->conn_unacked);
advance_rx_queue(sk);
}
/* Loop around if more data is required */
-
if ((sz_copied < buf_len) && /* didn't get all requested data */
(!skb_queue_empty(&sk->sk_receive_queue) ||
(sz_copied < target)) && /* and more is ready or required */
@@ -1170,32 +1232,144 @@ exit:
}
/**
- * rx_queue_full - determine if receive queue can accept another message
- * @msg: message to be added to queue
- * @queue_size: current size of queue
- * @base: nominal maximum size of queue
+ * tipc_write_space - wake up thread if port congestion is released
+ * @sk: socket
+ */
+static void tipc_write_space(struct sock *sk)
+{
+ struct socket_wq *wq;
+
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
+ POLLWRNORM | POLLWRBAND);
+ rcu_read_unlock();
+}
+
+/**
+ * tipc_data_ready - wake up threads to indicate messages have been received
+ * @sk: socket
+ * @len: the length of messages
+ */
+static void tipc_data_ready(struct sock *sk)
+{
+ struct socket_wq *wq;
+
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
+ POLLRDNORM | POLLRDBAND);
+ rcu_read_unlock();
+}
+
+/**
+ * filter_connect - Handle all incoming messages for a connection-based socket
+ * @tsk: TIPC socket
+ * @msg: message
*
- * Returns 1 if queue is unable to accept message, 0 otherwise
+ * Returns TIPC error status code and socket error status code
+ * once it encounters some errors
*/
+static u32 filter_connect(struct tipc_sock *tsk, struct sk_buff **buf)
+{
+ struct sock *sk = &tsk->sk;
+ struct tipc_port *port = &tsk->port;
+ struct socket *sock = sk->sk_socket;
+ struct tipc_msg *msg = buf_msg(*buf);
+
+ u32 retval = TIPC_ERR_NO_PORT;
+ int res;
+
+ if (msg_mcast(msg))
+ return retval;
+
+ switch ((int)sock->state) {
+ case SS_CONNECTED:
+ /* Accept only connection-based messages sent by peer */
+ if (msg_connected(msg) && tipc_port_peer_msg(port, msg)) {
+ if (unlikely(msg_errcode(msg))) {
+ sock->state = SS_DISCONNECTING;
+ __tipc_port_disconnect(port);
+ }
+ retval = TIPC_OK;
+ }
+ break;
+ case SS_CONNECTING:
+ /* Accept only ACK or NACK message */
+ if (unlikely(msg_errcode(msg))) {
+ sock->state = SS_DISCONNECTING;
+ sk->sk_err = ECONNREFUSED;
+ retval = TIPC_OK;
+ break;
+ }
+
+ if (unlikely(!msg_connected(msg)))
+ break;
+
+ res = auto_connect(tsk, msg);
+ if (res) {
+ sock->state = SS_DISCONNECTING;
+ sk->sk_err = -res;
+ retval = TIPC_OK;
+ break;
+ }
+
+ /* If an incoming message is an 'ACK-', it should be
+ * discarded here because it doesn't contain useful
+ * data. In addition, we should try to wake up
+ * connect() routine if sleeping.
+ */
+ if (msg_data_sz(msg) == 0) {
+ kfree_skb(*buf);
+ *buf = NULL;
+ if (waitqueue_active(sk_sleep(sk)))
+ wake_up_interruptible(sk_sleep(sk));
+ }
+ retval = TIPC_OK;
+ break;
+ case SS_LISTENING:
+ case SS_UNCONNECTED:
+ /* Accept only SYN message */
+ if (!msg_connected(msg) && !(msg_errcode(msg)))
+ retval = TIPC_OK;
+ break;
+ case SS_DISCONNECTING:
+ break;
+ default:
+ pr_err("Unknown socket state %u\n", sock->state);
+ }
+ return retval;
+}
-static int rx_queue_full(struct tipc_msg *msg, u32 queue_size, u32 base)
+/**
+ * rcvbuf_limit - get proper overload limit of socket receive queue
+ * @sk: socket
+ * @buf: message
+ *
+ * For all connection oriented messages, irrespective of importance,
+ * the default overload value (i.e. 67MB) is set as limit.
+ *
+ * For all connectionless messages, by default new queue limits are
+ * as belows:
+ *
+ * TIPC_LOW_IMPORTANCE (4 MB)
+ * TIPC_MEDIUM_IMPORTANCE (8 MB)
+ * TIPC_HIGH_IMPORTANCE (16 MB)
+ * TIPC_CRITICAL_IMPORTANCE (32 MB)
+ *
+ * Returns overload limit according to corresponding message importance
+ */
+static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf)
{
- u32 threshold;
- u32 imp = msg_importance(msg);
-
- if (imp == TIPC_LOW_IMPORTANCE)
- threshold = base;
- else if (imp == TIPC_MEDIUM_IMPORTANCE)
- threshold = base * 2;
- else if (imp == TIPC_HIGH_IMPORTANCE)
- threshold = base * 100;
- else
- return 0;
+ struct tipc_msg *msg = buf_msg(buf);
if (msg_connected(msg))
- threshold *= 4;
+ return sysctl_tipc_rmem[2];
- return queue_size >= threshold;
+ return sk->sk_rcvbuf >> TIPC_CRITICAL_IMPORTANCE <<
+ msg_importance(msg);
}
/**
@@ -1210,95 +1384,42 @@ static int rx_queue_full(struct tipc_msg *msg, u32 queue_size, u32 base)
*
* Returns TIPC error status code (TIPC_OK if message is not to be rejected)
*/
-
static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
{
struct socket *sock = sk->sk_socket;
+ struct tipc_sock *tsk = tipc_sk(sk);
struct tipc_msg *msg = buf_msg(buf);
- u32 recv_q_len;
+ unsigned int limit = rcvbuf_limit(sk, buf);
+ u32 res = TIPC_OK;
/* Reject message if it is wrong sort of message for socket */
-
- /*
- * WOULD IT BE BETTER TO JUST DISCARD THESE MESSAGES INSTEAD?
- * "NO PORT" ISN'T REALLY THE RIGHT ERROR CODE, AND THERE MAY
- * BE SECURITY IMPLICATIONS INHERENT IN REJECTING INVALID TRAFFIC
- */
+ if (msg_type(msg) > TIPC_DIRECT_MSG)
+ return TIPC_ERR_NO_PORT;
if (sock->state == SS_READY) {
- if (msg_connected(msg)) {
- msg_dbg(msg, "dispatch filter 1\n");
+ if (msg_connected(msg))
return TIPC_ERR_NO_PORT;
- }
} else {
- if (msg_mcast(msg)) {
- msg_dbg(msg, "dispatch filter 2\n");
- return TIPC_ERR_NO_PORT;
- }
- if (sock->state == SS_CONNECTED) {
- if (!msg_connected(msg)) {
- msg_dbg(msg, "dispatch filter 3\n");
- return TIPC_ERR_NO_PORT;
- }
- }
- else if (sock->state == SS_CONNECTING) {
- if (!msg_connected(msg) && (msg_errcode(msg) == 0)) {
- msg_dbg(msg, "dispatch filter 4\n");
- return TIPC_ERR_NO_PORT;
- }
- }
- else if (sock->state == SS_LISTENING) {
- if (msg_connected(msg) || msg_errcode(msg)) {
- msg_dbg(msg, "dispatch filter 5\n");
- return TIPC_ERR_NO_PORT;
- }
- }
- else if (sock->state == SS_DISCONNECTING) {
- msg_dbg(msg, "dispatch filter 6\n");
- return TIPC_ERR_NO_PORT;
- }
- else /* (sock->state == SS_UNCONNECTED) */ {
- if (msg_connected(msg) || msg_errcode(msg)) {
- msg_dbg(msg, "dispatch filter 7\n");
- return TIPC_ERR_NO_PORT;
- }
- }
+ res = filter_connect(tsk, &buf);
+ if (res != TIPC_OK || buf == NULL)
+ return res;
}
/* Reject message if there isn't room to queue it */
+ if (sk_rmem_alloc_get(sk) + buf->truesize >= limit)
+ return TIPC_ERR_OVERLOAD;
- recv_q_len = (u32)atomic_read(&tipc_queue_size);
- if (unlikely(recv_q_len >= OVERLOAD_LIMIT_BASE)) {
- if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE))
- return TIPC_ERR_OVERLOAD;
- }
- recv_q_len = skb_queue_len(&sk->sk_receive_queue);
- if (unlikely(recv_q_len >= (OVERLOAD_LIMIT_BASE / 2))) {
- if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE / 2))
- return TIPC_ERR_OVERLOAD;
- }
-
- /* Enqueue message (finally!) */
-
- msg_dbg(msg, "<DISP<: ");
- TIPC_SKB_CB(buf)->handle = msg_data(msg);
- atomic_inc(&tipc_queue_size);
+ /* Enqueue message */
+ TIPC_SKB_CB(buf)->handle = NULL;
__skb_queue_tail(&sk->sk_receive_queue, buf);
+ skb_set_owner_r(buf, sk);
- /* Initiate connection termination for an incoming 'FIN' */
-
- if (unlikely(msg_errcode(msg) && (sock->state == SS_CONNECTED))) {
- sock->state = SS_DISCONNECTING;
- tipc_disconnect_port(tipc_sk_port(sk));
- }
-
- if (waitqueue_active(sk_sleep(sk)))
- wake_up_interruptible(sk_sleep(sk));
+ sk->sk_data_ready(sk);
return TIPC_OK;
}
/**
- * backlog_rcv - handle incoming message from backlog queue
+ * tipc_backlog_rcv - handle incoming message from backlog queue
* @sk: socket
* @buf: message
*
@@ -1306,70 +1427,100 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
*
* Returns 0
*/
-
-static int backlog_rcv(struct sock *sk, struct sk_buff *buf)
+static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *buf)
{
u32 res;
+ struct tipc_sock *tsk = tipc_sk(sk);
+ uint truesize = buf->truesize;
res = filter_rcv(sk, buf);
- if (res)
+ if (unlikely(res))
tipc_reject_msg(buf, res);
+
+ if (atomic_read(&tsk->dupl_rcvcnt) < TIPC_CONN_OVERLOAD_LIMIT)
+ atomic_add(truesize, &tsk->dupl_rcvcnt);
+
return 0;
}
/**
- * dispatch - handle incoming message
- * @tport: TIPC port that received message
- * @buf: message
- *
- * Called with port lock already taken.
- *
- * Returns TIPC error status code (TIPC_OK if message is not to be rejected)
+ * tipc_sk_rcv - handle incoming message
+ * @buf: buffer containing arriving message
+ * Consumes buffer
+ * Returns 0 if success, or errno: -EHOSTUNREACH
*/
-
-static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
+int tipc_sk_rcv(struct sk_buff *buf)
{
- struct sock *sk = (struct sock *)tport->usr_handle;
- u32 res;
+ struct tipc_sock *tsk;
+ struct tipc_port *port;
+ struct sock *sk;
+ u32 dport = msg_destport(buf_msg(buf));
+ int err = TIPC_OK;
+ uint limit;
- /*
- * Process message if socket is unlocked; otherwise add to backlog queue
- *
- * This code is based on sk_receive_skb(), but must be distinct from it
- * since a TIPC-specific filter/reject mechanism is utilized
- */
+ /* Forward unresolved named message */
+ if (unlikely(!dport)) {
+ tipc_net_route_msg(buf);
+ return 0;
+ }
+ /* Validate destination */
+ port = tipc_port_lock(dport);
+ if (unlikely(!port)) {
+ err = TIPC_ERR_NO_PORT;
+ goto exit;
+ }
+
+ tsk = tipc_port_to_sock(port);
+ sk = &tsk->sk;
+
+ /* Queue message */
bh_lock_sock(sk);
+
if (!sock_owned_by_user(sk)) {
- res = filter_rcv(sk, buf);
+ err = filter_rcv(sk, buf);
} else {
- if (sk_add_backlog(sk, buf))
- res = TIPC_ERR_OVERLOAD;
- else
- res = TIPC_OK;
+ if (sk->sk_backlog.len == 0)
+ atomic_set(&tsk->dupl_rcvcnt, 0);
+ limit = rcvbuf_limit(sk, buf) + atomic_read(&tsk->dupl_rcvcnt);
+ if (sk_add_backlog(sk, buf, limit))
+ err = TIPC_ERR_OVERLOAD;
}
+
bh_unlock_sock(sk);
+ tipc_port_unlock(port);
- return res;
+ if (likely(!err))
+ return 0;
+exit:
+ tipc_reject_msg(buf, err);
+ return -EHOSTUNREACH;
}
-/**
- * wakeupdispatch - wake up port after congestion
- * @tport: port to wakeup
- *
- * Called with port lock already taken.
- */
-
-static void wakeupdispatch(struct tipc_port *tport)
+static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
{
- struct sock *sk = (struct sock *)tport->usr_handle;
+ struct sock *sk = sock->sk;
+ DEFINE_WAIT(wait);
+ int done;
- if (waitqueue_active(sk_sleep(sk)))
- wake_up_interruptible(sk_sleep(sk));
+ do {
+ int err = sock_error(sk);
+ if (err)
+ return err;
+ if (!*timeo_p)
+ return -ETIMEDOUT;
+ if (signal_pending(current))
+ return sock_intr_errno(*timeo_p);
+
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+ done = sk_wait_event(sk, timeo_p, sock->state != SS_CONNECTING);
+ finish_wait(sk_sleep(sk), &wait);
+ } while (!done);
+ return 0;
}
/**
- * connect - establish a connection to another TIPC port
+ * tipc_connect - establish a connection to another TIPC port
* @sock: socket structure
* @dest: socket address for destination port
* @destlen: size of socket address data structure
@@ -1377,131 +1528,93 @@ static void wakeupdispatch(struct tipc_port *tport)
*
* Returns 0 on success, errno otherwise
*/
-
-static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
- int flags)
+static int tipc_connect(struct socket *sock, struct sockaddr *dest,
+ int destlen, int flags)
{
struct sock *sk = sock->sk;
struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
struct msghdr m = {NULL,};
- struct sk_buff *buf;
- struct tipc_msg *msg;
- long timeout;
+ long timeout = (flags & O_NONBLOCK) ? 0 : tipc_sk(sk)->conn_timeout;
+ socket_state previous;
int res;
lock_sock(sk);
/* For now, TIPC does not allow use of connect() with DGRAM/RDM types */
-
if (sock->state == SS_READY) {
res = -EOPNOTSUPP;
goto exit;
}
- /* For now, TIPC does not support the non-blocking form of connect() */
-
- if (flags & O_NONBLOCK) {
- res = -EOPNOTSUPP;
- goto exit;
- }
-
- /* Issue Posix-compliant error code if socket is in the wrong state */
-
- if (sock->state == SS_LISTENING) {
- res = -EOPNOTSUPP;
- goto exit;
- }
- if (sock->state == SS_CONNECTING) {
- res = -EALREADY;
- goto exit;
- }
- if (sock->state != SS_UNCONNECTED) {
- res = -EISCONN;
- goto exit;
- }
-
/*
* Reject connection attempt using multicast address
*
* Note: send_msg() validates the rest of the address fields,
* so there's no need to do it here
*/
-
if (dst->addrtype == TIPC_ADDR_MCAST) {
res = -EINVAL;
goto exit;
}
- /* Reject any messages already in receive queue (very unlikely) */
-
- reject_rx_queue(sk);
-
- /* Send a 'SYN-' to destination */
-
- m.msg_name = dest;
- m.msg_namelen = destlen;
- res = send_msg(NULL, sock, &m, 0);
- if (res < 0) {
- goto exit;
- }
+ previous = sock->state;
+ switch (sock->state) {
+ case SS_UNCONNECTED:
+ /* Send a 'SYN-' to destination */
+ m.msg_name = dest;
+ m.msg_namelen = destlen;
- /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
+ /* If connect is in non-blocking case, set MSG_DONTWAIT to
+ * indicate send_msg() is never blocked.
+ */
+ if (!timeout)
+ m.msg_flags = MSG_DONTWAIT;
- timeout = tipc_sk(sk)->conn_timeout;
- release_sock(sk);
- res = wait_event_interruptible_timeout(*sk_sleep(sk),
- (!skb_queue_empty(&sk->sk_receive_queue) ||
- (sock->state != SS_CONNECTING)),
- timeout ? timeout : MAX_SCHEDULE_TIMEOUT);
- lock_sock(sk);
+ res = tipc_sendmsg(NULL, sock, &m, 0);
+ if ((res < 0) && (res != -EWOULDBLOCK))
+ goto exit;
- if (res > 0) {
- buf = skb_peek(&sk->sk_receive_queue);
- if (buf != NULL) {
- msg = buf_msg(buf);
- res = auto_connect(sock, msg);
- if (!res) {
- if (!msg_data_sz(msg))
- advance_rx_queue(sk);
- }
- } else {
- if (sock->state == SS_CONNECTED) {
- res = -EISCONN;
- } else {
- res = -ECONNREFUSED;
- }
- }
- } else {
- if (res == 0)
- res = -ETIMEDOUT;
- else
- ; /* leave "res" unchanged */
- sock->state = SS_DISCONNECTING;
+ /* Just entered SS_CONNECTING state; the only
+ * difference is that return value in non-blocking
+ * case is EINPROGRESS, rather than EALREADY.
+ */
+ res = -EINPROGRESS;
+ case SS_CONNECTING:
+ if (previous == SS_CONNECTING)
+ res = -EALREADY;
+ if (!timeout)
+ goto exit;
+ timeout = msecs_to_jiffies(timeout);
+ /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
+ res = tipc_wait_for_connect(sock, &timeout);
+ break;
+ case SS_CONNECTED:
+ res = -EISCONN;
+ break;
+ default:
+ res = -EINVAL;
+ break;
}
-
exit:
release_sock(sk);
return res;
}
/**
- * listen - allow socket to listen for incoming connections
+ * tipc_listen - allow socket to listen for incoming connections
* @sock: socket structure
* @len: (unused)
*
* Returns 0 on success, errno otherwise
*/
-
-static int listen(struct socket *sock, int len)
+static int tipc_listen(struct socket *sock, int len)
{
struct sock *sk = sock->sk;
int res;
lock_sock(sk);
- if (sock->state == SS_READY)
- res = -EOPNOTSUPP;
- else if (sock->state != SS_UNCONNECTED)
+ if (sock->state != SS_UNCONNECTED)
res = -EINVAL;
else {
sock->state = SS_LISTENING;
@@ -1512,101 +1625,126 @@ static int listen(struct socket *sock, int len)
return res;
}
+static int tipc_wait_for_accept(struct socket *sock, long timeo)
+{
+ struct sock *sk = sock->sk;
+ DEFINE_WAIT(wait);
+ int err;
+
+ /* True wake-one mechanism for incoming connections: only
+ * one process gets woken up, not the 'whole herd'.
+ * Since we do not 'race & poll' for established sockets
+ * anymore, the common case will execute the loop only once.
+ */
+ for (;;) {
+ prepare_to_wait_exclusive(sk_sleep(sk), &wait,
+ TASK_INTERRUPTIBLE);
+ if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+ lock_sock(sk);
+ }
+ err = 0;
+ if (!skb_queue_empty(&sk->sk_receive_queue))
+ break;
+ err = -EINVAL;
+ if (sock->state != SS_LISTENING)
+ break;
+ err = sock_intr_errno(timeo);
+ if (signal_pending(current))
+ break;
+ err = -EAGAIN;
+ if (!timeo)
+ break;
+ }
+ finish_wait(sk_sleep(sk), &wait);
+ return err;
+}
+
/**
- * accept - wait for connection request
+ * tipc_accept - wait for connection request
* @sock: listening socket
* @newsock: new socket that is to be connected
* @flags: file-related flags associated with socket
*
* Returns 0 on success, errno otherwise
*/
-
-static int accept(struct socket *sock, struct socket *new_sock, int flags)
+static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
{
- struct sock *sk = sock->sk;
+ struct sock *new_sk, *sk = sock->sk;
struct sk_buff *buf;
+ struct tipc_port *new_port;
+ struct tipc_msg *msg;
+ struct tipc_portid peer;
+ u32 new_ref;
+ long timeo;
int res;
lock_sock(sk);
- if (sock->state == SS_READY) {
- res = -EOPNOTSUPP;
- goto exit;
- }
if (sock->state != SS_LISTENING) {
res = -EINVAL;
goto exit;
}
-
- while (skb_queue_empty(&sk->sk_receive_queue)) {
- if (flags & O_NONBLOCK) {
- res = -EWOULDBLOCK;
- goto exit;
- }
- release_sock(sk);
- res = wait_event_interruptible(*sk_sleep(sk),
- (!skb_queue_empty(&sk->sk_receive_queue)));
- lock_sock(sk);
- if (res)
- goto exit;
- }
+ timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
+ res = tipc_wait_for_accept(sock, timeo);
+ if (res)
+ goto exit;
buf = skb_peek(&sk->sk_receive_queue);
- res = tipc_create(sock_net(sock->sk), new_sock, 0, 0);
- if (!res) {
- struct sock *new_sk = new_sock->sk;
- struct tipc_sock *new_tsock = tipc_sk(new_sk);
- struct tipc_port *new_tport = new_tsock->p;
- u32 new_ref = new_tport->ref;
- struct tipc_msg *msg = buf_msg(buf);
-
- lock_sock(new_sk);
-
- /*
- * Reject any stray messages received by new socket
- * before the socket lock was taken (very, very unlikely)
- */
-
- reject_rx_queue(new_sk);
-
- /* Connect new socket to it's peer */
+ res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 1);
+ if (res)
+ goto exit;
- new_tsock->peer_name.ref = msg_origport(msg);
- new_tsock->peer_name.node = msg_orignode(msg);
- tipc_connect2port(new_ref, &new_tsock->peer_name);
- new_sock->state = SS_CONNECTED;
+ new_sk = new_sock->sk;
+ new_port = &tipc_sk(new_sk)->port;
+ new_ref = new_port->ref;
+ msg = buf_msg(buf);
- tipc_set_portimportance(new_ref, msg_importance(msg));
- if (msg_named(msg)) {
- new_tport->conn_type = msg_nametype(msg);
- new_tport->conn_instance = msg_nameinst(msg);
- }
+ /* we lock on new_sk; but lockdep sees the lock on sk */
+ lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
- /*
- * Respond to 'SYN-' by discarding it & returning 'ACK'-.
- * Respond to 'SYN+' by queuing it on new socket.
- */
+ /*
+ * Reject any stray messages received by new socket
+ * before the socket lock was taken (very, very unlikely)
+ */
+ reject_rx_queue(new_sk);
+
+ /* Connect new socket to it's peer */
+ peer.ref = msg_origport(msg);
+ peer.node = msg_orignode(msg);
+ tipc_port_connect(new_ref, &peer);
+ new_sock->state = SS_CONNECTED;
+
+ tipc_port_set_importance(new_port, msg_importance(msg));
+ if (msg_named(msg)) {
+ new_port->conn_type = msg_nametype(msg);
+ new_port->conn_instance = msg_nameinst(msg);
+ }
- msg_dbg(msg,"<ACC<: ");
- if (!msg_data_sz(msg)) {
- struct msghdr m = {NULL,};
+ /*
+ * Respond to 'SYN-' by discarding it & returning 'ACK'-.
+ * Respond to 'SYN+' by queuing it on new socket.
+ */
+ if (!msg_data_sz(msg)) {
+ struct msghdr m = {NULL,};
- advance_rx_queue(sk);
- send_packet(NULL, new_sock, &m, 0);
- } else {
- __skb_dequeue(&sk->sk_receive_queue);
- __skb_queue_head(&new_sk->sk_receive_queue, buf);
- }
- release_sock(new_sk);
+ advance_rx_queue(sk);
+ tipc_send_packet(NULL, new_sock, &m, 0);
+ } else {
+ __skb_dequeue(&sk->sk_receive_queue);
+ __skb_queue_head(&new_sk->sk_receive_queue, buf);
+ skb_set_owner_r(buf, new_sk);
}
+ release_sock(new_sk);
exit:
release_sock(sk);
return res;
}
/**
- * shutdown - shutdown socket connection
+ * tipc_shutdown - shutdown socket connection
* @sock: socket structure
* @how: direction to close (must be SHUT_RDWR)
*
@@ -1614,11 +1752,11 @@ exit:
*
* Returns 0 on success, errno otherwise
*/
-
-static int shutdown(struct socket *sock, int how)
+static int tipc_shutdown(struct socket *sock, int how)
{
struct sock *sk = sock->sk;
- struct tipc_port *tport = tipc_sk_port(sk);
+ struct tipc_sock *tsk = tipc_sk(sk);
+ struct tipc_port *port = &tsk->port;
struct sk_buff *buf;
int res;
@@ -1631,19 +1769,18 @@ static int shutdown(struct socket *sock, int how)
case SS_CONNECTING:
case SS_CONNECTED:
- /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
restart:
+ /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
buf = __skb_dequeue(&sk->sk_receive_queue);
if (buf) {
- atomic_dec(&tipc_queue_size);
- if (TIPC_SKB_CB(buf)->handle != msg_data(buf_msg(buf))) {
- buf_discard(buf);
+ if (TIPC_SKB_CB(buf)->handle != NULL) {
+ kfree_skb(buf);
goto restart;
}
- tipc_disconnect(tport->ref);
+ tipc_port_disconnect(port->ref);
tipc_reject_msg(buf, TIPC_CONN_SHUTDOWN);
} else {
- tipc_shutdown(tport->ref);
+ tipc_port_shutdown(port->ref);
}
sock->state = SS_DISCONNECTING;
@@ -1652,11 +1789,11 @@ restart:
case SS_DISCONNECTING:
- /* Discard any unreceived messages; wake up sleeping tasks */
+ /* Discard any unreceived messages */
+ __skb_queue_purge(&sk->sk_receive_queue);
- discard_rx_queue(sk);
- if (waitqueue_active(sk_sleep(sk)))
- wake_up_interruptible(sk_sleep(sk));
+ /* Wake up anyone sleeping in poll */
+ sk->sk_state_change(sk);
res = 0;
break;
@@ -1669,7 +1806,7 @@ restart:
}
/**
- * setsockopt - set socket option
+ * tipc_setsockopt - set socket option
* @sock: socket structure
* @lvl: option level
* @opt: option identifier
@@ -1681,12 +1818,12 @@ restart:
*
* Returns 0 on success, errno otherwise
*/
-
-static int setsockopt(struct socket *sock,
- int lvl, int opt, char __user *ov, unsigned int ol)
+static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
+ char __user *ov, unsigned int ol)
{
struct sock *sk = sock->sk;
- struct tipc_port *tport = tipc_sk_port(sk);
+ struct tipc_sock *tsk = tipc_sk(sk);
+ struct tipc_port *port = &tsk->port;
u32 value;
int res;
@@ -1696,26 +1833,27 @@ static int setsockopt(struct socket *sock,
return -ENOPROTOOPT;
if (ol < sizeof(value))
return -EINVAL;
- if ((res = get_user(value, (u32 __user *)ov)))
+ res = get_user(value, (u32 __user *)ov);
+ if (res)
return res;
lock_sock(sk);
switch (opt) {
case TIPC_IMPORTANCE:
- res = tipc_set_portimportance(tport->ref, value);
+ tipc_port_set_importance(port, value);
break;
case TIPC_SRC_DROPPABLE:
if (sock->type != SOCK_STREAM)
- res = tipc_set_portunreliable(tport->ref, value);
+ tipc_port_set_unreliable(port, value);
else
res = -ENOPROTOOPT;
break;
case TIPC_DEST_DROPPABLE:
- res = tipc_set_portunreturnable(tport->ref, value);
+ tipc_port_set_unreturnable(port, value);
break;
case TIPC_CONN_TIMEOUT:
- tipc_sk(sk)->conn_timeout = msecs_to_jiffies(value);
+ tipc_sk(sk)->conn_timeout = value;
/* no need to set "res", since already 0 at this point */
break;
default:
@@ -1728,7 +1866,7 @@ static int setsockopt(struct socket *sock,
}
/**
- * getsockopt - get socket option
+ * tipc_getsockopt - get socket option
* @sock: socket structure
* @lvl: option level
* @opt: option identifier
@@ -1740,12 +1878,12 @@ static int setsockopt(struct socket *sock,
*
* Returns 0 on success, errno otherwise
*/
-
-static int getsockopt(struct socket *sock,
- int lvl, int opt, char __user *ov, int __user *ol)
+static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
+ char __user *ov, int __user *ol)
{
struct sock *sk = sock->sk;
- struct tipc_port *tport = tipc_sk_port(sk);
+ struct tipc_sock *tsk = tipc_sk(sk);
+ struct tipc_port *port = &tsk->port;
int len;
u32 value;
int res;
@@ -1754,29 +1892,30 @@ static int getsockopt(struct socket *sock,
return put_user(0, ol);
if (lvl != SOL_TIPC)
return -ENOPROTOOPT;
- if ((res = get_user(len, ol)))
+ res = get_user(len, ol);
+ if (res)
return res;
lock_sock(sk);
switch (opt) {
case TIPC_IMPORTANCE:
- res = tipc_portimportance(tport->ref, &value);
+ value = tipc_port_importance(port);
break;
case TIPC_SRC_DROPPABLE:
- res = tipc_portunreliable(tport->ref, &value);
+ value = tipc_port_unreliable(port);
break;
case TIPC_DEST_DROPPABLE:
- res = tipc_portunreturnable(tport->ref, &value);
+ value = tipc_port_unreturnable(port);
break;
case TIPC_CONN_TIMEOUT:
- value = jiffies_to_msecs(tipc_sk(sk)->conn_timeout);
+ value = tipc_sk(sk)->conn_timeout;
/* no need to set "res", since already 0 at this point */
break;
- case TIPC_NODE_RECVQ_DEPTH:
- value = (u32)atomic_read(&tipc_queue_size);
+ case TIPC_NODE_RECVQ_DEPTH:
+ value = 0; /* was tipc_queue_size, now obsolete */
break;
- case TIPC_SOCK_RECVQ_DEPTH:
+ case TIPC_SOCK_RECVQ_DEPTH:
value = skb_queue_len(&sk->sk_receive_queue);
break;
default:
@@ -1785,99 +1924,122 @@ static int getsockopt(struct socket *sock,
release_sock(sk);
- if (res) {
- /* "get" failed */
- }
- else if (len < sizeof(value)) {
- res = -EINVAL;
- }
- else if (copy_to_user(ov, &value, sizeof(value))) {
- res = -EFAULT;
- }
- else {
- res = put_user(sizeof(value), ol);
- }
+ if (res)
+ return res; /* "get" failed */
- return res;
+ if (len < sizeof(value))
+ return -EINVAL;
+
+ if (copy_to_user(ov, &value, sizeof(value)))
+ return -EFAULT;
+
+ return put_user(sizeof(value), ol);
}
-/**
- * Protocol switches for the various types of TIPC sockets
- */
+int tipc_ioctl(struct socket *sk, unsigned int cmd, unsigned long arg)
+{
+ struct tipc_sioc_ln_req lnr;
+ void __user *argp = (void __user *)arg;
+
+ switch (cmd) {
+ case SIOCGETLINKNAME:
+ if (copy_from_user(&lnr, argp, sizeof(lnr)))
+ return -EFAULT;
+ if (!tipc_node_get_linkname(lnr.bearer_id, lnr.peer,
+ lnr.linkname, TIPC_MAX_LINK_NAME)) {
+ if (copy_to_user(argp, &lnr, sizeof(lnr)))
+ return -EFAULT;
+ return 0;
+ }
+ return -EADDRNOTAVAIL;
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+/* Protocol switches for the various types of TIPC sockets */
static const struct proto_ops msg_ops = {
- .owner = THIS_MODULE,
+ .owner = THIS_MODULE,
.family = AF_TIPC,
- .release = release,
- .bind = bind,
- .connect = connect,
+ .release = tipc_release,
+ .bind = tipc_bind,
+ .connect = tipc_connect,
.socketpair = sock_no_socketpair,
- .accept = accept,
- .getname = get_name,
- .poll = poll,
- .ioctl = sock_no_ioctl,
- .listen = listen,
- .shutdown = shutdown,
- .setsockopt = setsockopt,
- .getsockopt = getsockopt,
- .sendmsg = send_msg,
- .recvmsg = recv_msg,
+ .accept = sock_no_accept,
+ .getname = tipc_getname,
+ .poll = tipc_poll,
+ .ioctl = tipc_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = tipc_shutdown,
+ .setsockopt = tipc_setsockopt,
+ .getsockopt = tipc_getsockopt,
+ .sendmsg = tipc_sendmsg,
+ .recvmsg = tipc_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage
};
static const struct proto_ops packet_ops = {
- .owner = THIS_MODULE,
+ .owner = THIS_MODULE,
.family = AF_TIPC,
- .release = release,
- .bind = bind,
- .connect = connect,
+ .release = tipc_release,
+ .bind = tipc_bind,
+ .connect = tipc_connect,
.socketpair = sock_no_socketpair,
- .accept = accept,
- .getname = get_name,
- .poll = poll,
- .ioctl = sock_no_ioctl,
- .listen = listen,
- .shutdown = shutdown,
- .setsockopt = setsockopt,
- .getsockopt = getsockopt,
- .sendmsg = send_packet,
- .recvmsg = recv_msg,
+ .accept = tipc_accept,
+ .getname = tipc_getname,
+ .poll = tipc_poll,
+ .ioctl = tipc_ioctl,
+ .listen = tipc_listen,
+ .shutdown = tipc_shutdown,
+ .setsockopt = tipc_setsockopt,
+ .getsockopt = tipc_getsockopt,
+ .sendmsg = tipc_send_packet,
+ .recvmsg = tipc_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage
};
static const struct proto_ops stream_ops = {
- .owner = THIS_MODULE,
+ .owner = THIS_MODULE,
.family = AF_TIPC,
- .release = release,
- .bind = bind,
- .connect = connect,
+ .release = tipc_release,
+ .bind = tipc_bind,
+ .connect = tipc_connect,
.socketpair = sock_no_socketpair,
- .accept = accept,
- .getname = get_name,
- .poll = poll,
- .ioctl = sock_no_ioctl,
- .listen = listen,
- .shutdown = shutdown,
- .setsockopt = setsockopt,
- .getsockopt = getsockopt,
- .sendmsg = send_stream,
- .recvmsg = recv_stream,
+ .accept = tipc_accept,
+ .getname = tipc_getname,
+ .poll = tipc_poll,
+ .ioctl = tipc_ioctl,
+ .listen = tipc_listen,
+ .shutdown = tipc_shutdown,
+ .setsockopt = tipc_setsockopt,
+ .getsockopt = tipc_getsockopt,
+ .sendmsg = tipc_send_stream,
+ .recvmsg = tipc_recv_stream,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage
};
static const struct net_proto_family tipc_family_ops = {
- .owner = THIS_MODULE,
+ .owner = THIS_MODULE,
.family = AF_TIPC,
- .create = tipc_create
+ .create = tipc_sk_create
};
static struct proto tipc_proto = {
.name = "TIPC",
.owner = THIS_MODULE,
- .obj_size = sizeof(struct tipc_sock)
+ .obj_size = sizeof(struct tipc_sock),
+ .sysctl_rmem = sysctl_tipc_rmem
+};
+
+static struct proto tipc_proto_kern = {
+ .name = "TIPC",
+ .obj_size = sizeof(struct tipc_sock),
+ .sysctl_rmem = sysctl_tipc_rmem
};
/**
@@ -1891,18 +2053,16 @@ int tipc_socket_init(void)
res = proto_register(&tipc_proto, 1);
if (res) {
- err("Failed to register TIPC protocol type\n");
+ pr_err("Failed to register TIPC protocol type\n");
goto out;
}
res = sock_register(&tipc_family_ops);
if (res) {
- err("Failed to register TIPC socket type\n");
+ pr_err("Failed to register TIPC socket type\n");
proto_unregister(&tipc_proto);
goto out;
}
-
- sockets_enabled = 1;
out:
return res;
}
@@ -1910,14 +2070,8 @@ int tipc_socket_init(void)
/**
* tipc_socket_stop - stop TIPC socket interface
*/
-
void tipc_socket_stop(void)
{
- if (!sockets_enabled)
- return;
-
- sockets_enabled = 0;
sock_unregister(tipc_family_ops.family);
proto_unregister(&tipc_proto);
}
-
diff --git a/net/tipc/zone.h b/net/tipc/socket.h
index bd1c20ce9d0..3afcd2a70b3 100644
--- a/net/tipc/zone.h
+++ b/net/tipc/socket.h
@@ -1,8 +1,6 @@
-/*
- * net/tipc/zone.h: Include file for TIPC zone management routines
+/* net/tipc/socket.h: Include file for TIPC socket code
*
- * Copyright (c) 2000-2006, Ericsson AB
- * Copyright (c) 2005-2006, Wind River Systems
+ * Copyright (c) 2014, Ericsson AB
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -34,37 +32,43 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef _TIPC_ZONE_H
-#define _TIPC_ZONE_H
-
-#include "node_subscr.h"
-#include "net.h"
+#ifndef _TIPC_SOCK_H
+#define _TIPC_SOCK_H
+#include "port.h"
+#include <net/sock.h>
/**
- * struct _zone - TIPC zone structure
- * @addr: network address of zone
- * @clusters: array of pointers to all clusters within zone
- * @links: number of (unicast) links to zone
+ * struct tipc_sock - TIPC socket structure
+ * @sk: socket - interacts with 'port' and with user via the socket API
+ * @port: port - interacts with 'sk' and with the rest of the TIPC stack
+ * @peer_name: the peer of the connection, if any
+ * @conn_timeout: the time we can wait for an unresponded setup request
+ * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
*/
-struct _zone {
- u32 addr;
- struct cluster *clusters[2]; /* currently limited to just 1 cluster */
- u32 links;
+struct tipc_sock {
+ struct sock sk;
+ struct tipc_port port;
+ unsigned int conn_timeout;
+ atomic_t dupl_rcvcnt;
};
-struct tipc_node *tipc_zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref);
-u32 tipc_zone_select_router(struct _zone *z_ptr, u32 addr, u32 ref);
-void tipc_zone_remove_as_router(struct _zone *z_ptr, u32 router);
-void tipc_zone_send_external_routes(struct _zone *z_ptr, u32 dest);
-struct _zone *tipc_zone_create(u32 addr);
-void tipc_zone_delete(struct _zone *z_ptr);
-void tipc_zone_attach_cluster(struct _zone *z_ptr, struct cluster *c_ptr);
+static inline struct tipc_sock *tipc_sk(const struct sock *sk)
+{
+ return container_of(sk, struct tipc_sock, sk);
+}
-static inline struct _zone *tipc_zone_find(u32 addr)
+static inline struct tipc_sock *tipc_port_to_sock(const struct tipc_port *port)
{
- return tipc_net.zones[tipc_zone(addr)];
+ return container_of(port, struct tipc_sock, port);
}
+static inline void tipc_sock_wakeup(struct tipc_sock *tsk)
+{
+ tsk->sk.sk_write_space(&tsk->sk);
+}
+
+int tipc_sk_rcv(struct sk_buff *buf);
+
#endif
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 23f43d03980..642437231ad 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -2,7 +2,7 @@
* net/tipc/subscr.c: TIPC network topology service
*
* Copyright (c) 2000-2006, Ericsson AB
- * Copyright (c) 2005-2007, Wind River Systems
+ * Copyright (c) 2005-2007, 2010-2013, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -36,42 +36,47 @@
#include "core.h"
#include "name_table.h"
-#include "user_reg.h"
+#include "port.h"
#include "subscr.h"
/**
- * struct subscriber - TIPC network topology subscriber
- * @port_ref: object reference to server port connecting to subscriber
- * @lock: pointer to spinlock controlling access to subscriber's server port
- * @subscriber_list: adjacent subscribers in top. server's list of subscribers
+ * struct tipc_subscriber - TIPC network topology subscriber
+ * @conid: connection identifier to server connecting to subscriber
+ * @lock: control access to subscriber
* @subscription_list: list of subscription objects for this subscriber
*/
-
-struct subscriber {
- u32 port_ref;
- spinlock_t *lock;
- struct list_head subscriber_list;
+struct tipc_subscriber {
+ int conid;
+ spinlock_t lock;
struct list_head subscription_list;
};
-/**
- * struct top_srv - TIPC network topology subscription service
- * @user_ref: TIPC userid of subscription service
- * @setup_port: reference to TIPC port that handles subscription requests
- * @subscription_count: number of active subscriptions (not subscribers!)
- * @subscriber_list: list of ports subscribing to service
- * @lock: spinlock govering access to subscriber list
- */
+static void subscr_conn_msg_event(int conid, struct sockaddr_tipc *addr,
+ void *usr_data, void *buf, size_t len);
+static void *subscr_named_msg_event(int conid);
+static void subscr_conn_shutdown_event(int conid, void *usr_data);
-struct top_srv {
- u32 user_ref;
- u32 setup_port;
- atomic_t subscription_count;
- struct list_head subscriber_list;
- spinlock_t lock;
+static atomic_t subscription_count = ATOMIC_INIT(0);
+
+static struct sockaddr_tipc topsrv_addr __read_mostly = {
+ .family = AF_TIPC,
+ .addrtype = TIPC_ADDR_NAMESEQ,
+ .addr.nameseq.type = TIPC_TOP_SRV,
+ .addr.nameseq.lower = TIPC_TOP_SRV,
+ .addr.nameseq.upper = TIPC_TOP_SRV,
+ .scope = TIPC_NODE_SCOPE
};
-static struct top_srv topsrv = { 0 };
+static struct tipc_server topsrv __read_mostly = {
+ .saddr = &topsrv_addr,
+ .imp = TIPC_CRITICAL_IMPORTANCE,
+ .type = SOCK_SEQPACKET,
+ .max_rcvbuf_size = sizeof(struct tipc_subscr),
+ .name = "topology_server",
+ .tipc_conn_recvmsg = subscr_conn_msg_event,
+ .tipc_conn_new = subscr_named_msg_event,
+ .tipc_conn_shutdown = subscr_conn_shutdown_event,
+};
/**
* htohl - convert value to endianness used by destination
@@ -80,37 +85,27 @@ static struct top_srv topsrv = { 0 };
*
* Returns converted value
*/
-
static u32 htohl(u32 in, int swap)
{
return swap ? swab32(in) : in;
}
-/**
- * subscr_send_event - send a message containing a tipc_event to the subscriber
- *
- * Note: Must not hold subscriber's server port lock, since tipc_send() will
- * try to take the lock if the message is rejected and returned!
- */
-
-static void subscr_send_event(struct subscription *sub,
- u32 found_lower,
- u32 found_upper,
- u32 event,
- u32 port_ref,
+static void subscr_send_event(struct tipc_subscription *sub, u32 found_lower,
+ u32 found_upper, u32 event, u32 port_ref,
u32 node)
{
- struct iovec msg_sect;
+ struct tipc_subscriber *subscriber = sub->subscriber;
+ struct kvec msg_sect;
msg_sect.iov_base = (void *)&sub->evt;
msg_sect.iov_len = sizeof(struct tipc_event);
-
sub->evt.event = htohl(event, sub->swap);
sub->evt.found_lower = htohl(found_lower, sub->swap);
sub->evt.found_upper = htohl(found_upper, sub->swap);
sub->evt.port.ref = htohl(port_ref, sub->swap);
sub->evt.port.node = htohl(node, sub->swap);
- tipc_send(sub->server_ref, 1, &msg_sect);
+ tipc_conn_sendmsg(&topsrv, subscriber->conid, NULL, msg_sect.iov_base,
+ msg_sect.iov_len);
}
/**
@@ -118,11 +113,8 @@ static void subscr_send_event(struct subscription *sub,
*
* Returns 1 if there is overlap, otherwise 0.
*/
-
-int tipc_subscr_overlap(struct subscription *sub,
- u32 found_lower,
+int tipc_subscr_overlap(struct tipc_subscription *sub, u32 found_lower,
u32 found_upper)
-
{
if (found_lower < sub->seq.lower)
found_lower = sub->seq.lower;
@@ -138,157 +130,113 @@ int tipc_subscr_overlap(struct subscription *sub,
*
* Protected by nameseq.lock in name_table.c
*/
-
-void tipc_subscr_report_overlap(struct subscription *sub,
- u32 found_lower,
- u32 found_upper,
- u32 event,
- u32 port_ref,
- u32 node,
- int must)
+void tipc_subscr_report_overlap(struct tipc_subscription *sub, u32 found_lower,
+ u32 found_upper, u32 event, u32 port_ref,
+ u32 node, int must)
{
if (!tipc_subscr_overlap(sub, found_lower, found_upper))
return;
if (!must && !(sub->filter & TIPC_SUB_PORTS))
return;
- sub->event_cb(sub, found_lower, found_upper, event, port_ref, node);
+ subscr_send_event(sub, found_lower, found_upper, event, port_ref, node);
}
-/**
- * subscr_timeout - subscription timeout has occurred
- */
-
-static void subscr_timeout(struct subscription *sub)
+static void subscr_timeout(struct tipc_subscription *sub)
{
- struct port *server_port;
+ struct tipc_subscriber *subscriber = sub->subscriber;
- /* Validate server port reference (in case subscriber is terminating) */
-
- server_port = tipc_port_lock(sub->server_ref);
- if (server_port == NULL)
- return;
+ /* The spin lock per subscriber is used to protect its members */
+ spin_lock_bh(&subscriber->lock);
/* Validate timeout (in case subscription is being cancelled) */
-
if (sub->timeout == TIPC_WAIT_FOREVER) {
- tipc_port_unlock(server_port);
+ spin_unlock_bh(&subscriber->lock);
return;
}
/* Unlink subscription from name table */
-
tipc_nametbl_unsubscribe(sub);
/* Unlink subscription from subscriber */
-
list_del(&sub->subscription_list);
- /* Release subscriber's server port */
-
- tipc_port_unlock(server_port);
+ spin_unlock_bh(&subscriber->lock);
/* Notify subscriber of timeout */
-
subscr_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
TIPC_SUBSCR_TIMEOUT, 0, 0);
/* Now destroy subscription */
-
k_term_timer(&sub->timer);
kfree(sub);
- atomic_dec(&topsrv.subscription_count);
+ atomic_dec(&subscription_count);
}
/**
* subscr_del - delete a subscription within a subscription list
*
- * Called with subscriber port locked.
+ * Called with subscriber lock held.
*/
-
-static void subscr_del(struct subscription *sub)
+static void subscr_del(struct tipc_subscription *sub)
{
tipc_nametbl_unsubscribe(sub);
list_del(&sub->subscription_list);
kfree(sub);
- atomic_dec(&topsrv.subscription_count);
+ atomic_dec(&subscription_count);
}
/**
* subscr_terminate - terminate communication with a subscriber
*
- * Called with subscriber port locked. Routine must temporarily release lock
- * to enable subscription timeout routine(s) to finish without deadlocking;
- * the lock is then reclaimed to allow caller to release it upon return.
- * (This should work even in the unlikely event some other thread creates
- * a new object reference in the interim that uses this lock; this routine will
- * simply wait for it to be released, then claim it.)
+ * Note: Must call it in process context since it might sleep.
*/
-
-static void subscr_terminate(struct subscriber *subscriber)
+static void subscr_terminate(struct tipc_subscriber *subscriber)
{
- u32 port_ref;
- struct subscription *sub;
- struct subscription *sub_temp;
-
- /* Invalidate subscriber reference */
-
- port_ref = subscriber->port_ref;
- subscriber->port_ref = 0;
- spin_unlock_bh(subscriber->lock);
+ tipc_conn_terminate(&topsrv, subscriber->conid);
+}
- /* Sever connection to subscriber */
+static void subscr_release(struct tipc_subscriber *subscriber)
+{
+ struct tipc_subscription *sub;
+ struct tipc_subscription *sub_temp;
- tipc_shutdown(port_ref);
- tipc_deleteport(port_ref);
+ spin_lock_bh(&subscriber->lock);
/* Destroy any existing subscriptions for subscriber */
-
list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
subscription_list) {
if (sub->timeout != TIPC_WAIT_FOREVER) {
+ spin_unlock_bh(&subscriber->lock);
k_cancel_timer(&sub->timer);
k_term_timer(&sub->timer);
+ spin_lock_bh(&subscriber->lock);
}
- dbg("Term: Removing sub %u,%u,%u from subscriber %x list\n",
- sub->seq.type, sub->seq.lower, sub->seq.upper, subscriber);
subscr_del(sub);
}
-
- /* Remove subscriber from topology server's subscriber list */
-
- spin_lock_bh(&topsrv.lock);
- list_del(&subscriber->subscriber_list);
- spin_unlock_bh(&topsrv.lock);
-
- /* Reclaim subscriber lock */
-
- spin_lock_bh(subscriber->lock);
+ spin_unlock_bh(&subscriber->lock);
/* Now destroy subscriber */
-
kfree(subscriber);
}
/**
* subscr_cancel - handle subscription cancellation request
*
- * Called with subscriber port locked. Routine must temporarily release lock
+ * Called with subscriber lock held. Routine must temporarily release lock
* to enable the subscription timeout routine to finish without deadlocking;
* the lock is then reclaimed to allow caller to release it upon return.
*
* Note that fields of 's' use subscriber's endianness!
*/
-
static void subscr_cancel(struct tipc_subscr *s,
- struct subscriber *subscriber)
+ struct tipc_subscriber *subscriber)
{
- struct subscription *sub;
- struct subscription *sub_temp;
+ struct tipc_subscription *sub;
+ struct tipc_subscription *sub_temp;
int found = 0;
/* Find first matching subscription, exit if not found */
-
list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
subscription_list) {
if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) {
@@ -300,63 +248,52 @@ static void subscr_cancel(struct tipc_subscr *s,
return;
/* Cancel subscription timer (if used), then delete subscription */
-
if (sub->timeout != TIPC_WAIT_FOREVER) {
sub->timeout = TIPC_WAIT_FOREVER;
- spin_unlock_bh(subscriber->lock);
+ spin_unlock_bh(&subscriber->lock);
k_cancel_timer(&sub->timer);
k_term_timer(&sub->timer);
- spin_lock_bh(subscriber->lock);
+ spin_lock_bh(&subscriber->lock);
}
- dbg("Cancel: removing sub %u,%u,%u from subscriber %x list\n",
- sub->seq.type, sub->seq.lower, sub->seq.upper, subscriber);
subscr_del(sub);
}
/**
* subscr_subscribe - create subscription for subscriber
*
- * Called with subscriber port locked.
+ * Called with subscriber lock held.
*/
-
-static struct subscription *subscr_subscribe(struct tipc_subscr *s,
- struct subscriber *subscriber)
-{
- struct subscription *sub;
+static int subscr_subscribe(struct tipc_subscr *s,
+ struct tipc_subscriber *subscriber,
+ struct tipc_subscription **sub_p) {
+ struct tipc_subscription *sub;
int swap;
/* Determine subscriber's endianness */
-
swap = !(s->filter & (TIPC_SUB_PORTS | TIPC_SUB_SERVICE));
/* Detect & process a subscription cancellation request */
-
if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) {
s->filter &= ~htohl(TIPC_SUB_CANCEL, swap);
subscr_cancel(s, subscriber);
- return NULL;
+ return 0;
}
/* Refuse subscription if global limit exceeded */
-
- if (atomic_read(&topsrv.subscription_count) >= tipc_max_subscriptions) {
- warn("Subscription rejected, subscription limit reached (%u)\n",
- tipc_max_subscriptions);
- subscr_terminate(subscriber);
- return NULL;
+ if (atomic_read(&subscription_count) >= TIPC_MAX_SUBSCRIPTIONS) {
+ pr_warn("Subscription rejected, limit reached (%u)\n",
+ TIPC_MAX_SUBSCRIPTIONS);
+ return -EINVAL;
}
/* Allocate subscription object */
-
sub = kmalloc(sizeof(*sub), GFP_ATOMIC);
if (!sub) {
- warn("Subscription rejected, no memory\n");
- subscr_terminate(subscriber);
- return NULL;
+ pr_warn("Subscription rejected, no memory\n");
+ return -ENOMEM;
}
/* Initialize subscription object */
-
sub->seq.type = htohl(s->seq.type, swap);
sub->seq.lower = htohl(s->seq.lower, swap);
sub->seq.upper = htohl(s->seq.upper, swap);
@@ -365,242 +302,74 @@ static struct subscription *subscr_subscribe(struct tipc_subscr *s,
if ((!(sub->filter & TIPC_SUB_PORTS) ==
!(sub->filter & TIPC_SUB_SERVICE)) ||
(sub->seq.lower > sub->seq.upper)) {
- warn("Subscription rejected, illegal request\n");
+ pr_warn("Subscription rejected, illegal request\n");
kfree(sub);
- subscr_terminate(subscriber);
- return NULL;
+ return -EINVAL;
}
- sub->event_cb = subscr_send_event;
INIT_LIST_HEAD(&sub->nameseq_list);
list_add(&sub->subscription_list, &subscriber->subscription_list);
- sub->server_ref = subscriber->port_ref;
+ sub->subscriber = subscriber;
sub->swap = swap;
memcpy(&sub->evt.s, s, sizeof(struct tipc_subscr));
- atomic_inc(&topsrv.subscription_count);
+ atomic_inc(&subscription_count);
if (sub->timeout != TIPC_WAIT_FOREVER) {
k_init_timer(&sub->timer,
(Handler)subscr_timeout, (unsigned long)sub);
k_start_timer(&sub->timer, sub->timeout);
}
-
- return sub;
+ *sub_p = sub;
+ return 0;
}
-/**
- * subscr_conn_shutdown_event - handle termination request from subscriber
- *
- * Called with subscriber's server port unlocked.
- */
-
-static void subscr_conn_shutdown_event(void *usr_handle,
- u32 port_ref,
- struct sk_buff **buf,
- unsigned char const *data,
- unsigned int size,
- int reason)
+/* Handle one termination request for the subscriber */
+static void subscr_conn_shutdown_event(int conid, void *usr_data)
{
- struct subscriber *subscriber = usr_handle;
- spinlock_t *subscriber_lock;
-
- if (tipc_port_lock(port_ref) == NULL)
- return;
-
- subscriber_lock = subscriber->lock;
- subscr_terminate(subscriber);
- spin_unlock_bh(subscriber_lock);
+ subscr_release((struct tipc_subscriber *)usr_data);
}
-/**
- * subscr_conn_msg_event - handle new subscription request from subscriber
- *
- * Called with subscriber's server port unlocked.
- */
-
-static void subscr_conn_msg_event(void *usr_handle,
- u32 port_ref,
- struct sk_buff **buf,
- const unchar *data,
- u32 size)
+/* Handle one request to create a new subscription for the subscriber */
+static void subscr_conn_msg_event(int conid, struct sockaddr_tipc *addr,
+ void *usr_data, void *buf, size_t len)
{
- struct subscriber *subscriber = usr_handle;
- spinlock_t *subscriber_lock;
- struct subscription *sub;
-
- /*
- * Lock subscriber's server port (& make a local copy of lock pointer,
- * in case subscriber is deleted while processing subscription request)
- */
-
- if (tipc_port_lock(port_ref) == NULL)
- return;
-
- subscriber_lock = subscriber->lock;
+ struct tipc_subscriber *subscriber = usr_data;
+ struct tipc_subscription *sub = NULL;
- if (size != sizeof(struct tipc_subscr)) {
+ spin_lock_bh(&subscriber->lock);
+ if (subscr_subscribe((struct tipc_subscr *)buf, subscriber, &sub) < 0) {
+ spin_unlock_bh(&subscriber->lock);
subscr_terminate(subscriber);
- spin_unlock_bh(subscriber_lock);
- } else {
- sub = subscr_subscribe((struct tipc_subscr *)data, subscriber);
- spin_unlock_bh(subscriber_lock);
- if (sub != NULL) {
-
- /*
- * We must release the server port lock before adding a
- * subscription to the name table since TIPC needs to be
- * able to (re)acquire the port lock if an event message
- * issued by the subscription process is rejected and
- * returned. The subscription cannot be deleted while
- * it is being added to the name table because:
- * a) the single-threading of the native API port code
- * ensures the subscription cannot be cancelled and
- * the subscriber connection cannot be broken, and
- * b) the name table lock ensures the subscription
- * timeout code cannot delete the subscription,
- * so the subscription object is still protected.
- */
-
- tipc_nametbl_subscribe(sub);
- }
+ return;
}
+ if (sub)
+ tipc_nametbl_subscribe(sub);
+ spin_unlock_bh(&subscriber->lock);
}
-/**
- * subscr_named_msg_event - handle request to establish a new subscriber
- */
-static void subscr_named_msg_event(void *usr_handle,
- u32 port_ref,
- struct sk_buff **buf,
- const unchar *data,
- u32 size,
- u32 importance,
- struct tipc_portid const *orig,
- struct tipc_name_seq const *dest)
+/* Handle one request to establish a new subscriber */
+static void *subscr_named_msg_event(int conid)
{
- static struct iovec msg_sect = {NULL, 0};
-
- struct subscriber *subscriber;
- u32 server_port_ref;
+ struct tipc_subscriber *subscriber;
/* Create subscriber object */
-
- subscriber = kzalloc(sizeof(struct subscriber), GFP_ATOMIC);
+ subscriber = kzalloc(sizeof(struct tipc_subscriber), GFP_ATOMIC);
if (subscriber == NULL) {
- warn("Subscriber rejected, no memory\n");
- return;
+ pr_warn("Subscriber rejected, no memory\n");
+ return NULL;
}
INIT_LIST_HEAD(&subscriber->subscription_list);
- INIT_LIST_HEAD(&subscriber->subscriber_list);
-
- /* Create server port & establish connection to subscriber */
-
- tipc_createport(topsrv.user_ref,
- subscriber,
- importance,
- NULL,
- NULL,
- subscr_conn_shutdown_event,
- NULL,
- NULL,
- subscr_conn_msg_event,
- NULL,
- &subscriber->port_ref);
- if (subscriber->port_ref == 0) {
- warn("Subscriber rejected, unable to create port\n");
- kfree(subscriber);
- return;
- }
- tipc_connect2port(subscriber->port_ref, orig);
-
- /* Lock server port (& save lock address for future use) */
-
- subscriber->lock = tipc_port_lock(subscriber->port_ref)->publ.lock;
-
- /* Add subscriber to topology server's subscriber list */
-
- spin_lock_bh(&topsrv.lock);
- list_add(&subscriber->subscriber_list, &topsrv.subscriber_list);
- spin_unlock_bh(&topsrv.lock);
+ subscriber->conid = conid;
+ spin_lock_init(&subscriber->lock);
- /* Unlock server port */
-
- server_port_ref = subscriber->port_ref;
- spin_unlock_bh(subscriber->lock);
-
- /* Send an ACK- to complete connection handshaking */
-
- tipc_send(server_port_ref, 1, &msg_sect);
-
- /* Handle optional subscription request */
-
- if (size != 0) {
- subscr_conn_msg_event(subscriber, server_port_ref,
- buf, data, size);
- }
+ return (void *)subscriber;
}
int tipc_subscr_start(void)
{
- struct tipc_name_seq seq = {TIPC_TOP_SRV, TIPC_TOP_SRV, TIPC_TOP_SRV};
- int res;
-
- memset(&topsrv, 0, sizeof (topsrv));
- spin_lock_init(&topsrv.lock);
- INIT_LIST_HEAD(&topsrv.subscriber_list);
-
- spin_lock_bh(&topsrv.lock);
- res = tipc_attach(&topsrv.user_ref);
- if (res) {
- spin_unlock_bh(&topsrv.lock);
- return res;
- }
-
- res = tipc_createport(topsrv.user_ref,
- NULL,
- TIPC_CRITICAL_IMPORTANCE,
- NULL,
- NULL,
- NULL,
- NULL,
- subscr_named_msg_event,
- NULL,
- NULL,
- &topsrv.setup_port);
- if (res)
- goto failed;
-
- res = tipc_nametbl_publish_rsv(topsrv.setup_port, TIPC_NODE_SCOPE, &seq);
- if (res)
- goto failed;
-
- spin_unlock_bh(&topsrv.lock);
- return 0;
-
-failed:
- err("Failed to create subscription service\n");
- tipc_detach(topsrv.user_ref);
- topsrv.user_ref = 0;
- spin_unlock_bh(&topsrv.lock);
- return res;
+ return tipc_server_start(&topsrv);
}
void tipc_subscr_stop(void)
{
- struct subscriber *subscriber;
- struct subscriber *subscriber_temp;
- spinlock_t *subscriber_lock;
-
- if (topsrv.user_ref) {
- tipc_deleteport(topsrv.setup_port);
- list_for_each_entry_safe(subscriber, subscriber_temp,
- &topsrv.subscriber_list,
- subscriber_list) {
- subscriber_lock = subscriber->lock;
- spin_lock_bh(subscriber_lock);
- subscr_terminate(subscriber);
- spin_unlock_bh(subscriber_lock);
- }
- tipc_detach(topsrv.user_ref);
- topsrv.user_ref = 0;
- }
+ tipc_server_stop(&topsrv);
}
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index 45d89bf4d20..393e417bee3 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -2,7 +2,7 @@
* net/tipc/subscr.h: Include file for TIPC network topology service
*
* Copyright (c) 2003-2006, Ericsson AB
- * Copyright (c) 2005-2007, Wind River Systems
+ * Copyright (c) 2005-2007, 2012-2013, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -37,18 +37,17 @@
#ifndef _TIPC_SUBSCR_H
#define _TIPC_SUBSCR_H
-struct subscription;
+#include "server.h"
-typedef void (*tipc_subscr_event) (struct subscription *sub,
- u32 found_lower, u32 found_upper,
- u32 event, u32 port_ref, u32 node);
+struct tipc_subscription;
+struct tipc_subscriber;
/**
- * struct subscription - TIPC network topology subscription object
+ * struct tipc_subscription - TIPC network topology subscription object
+ * @subscriber: pointer to its subscriber
* @seq: name sequence associated with subscription
* @timeout: duration of subscription (in ms)
* @filter: event filtering to be done for subscription
- * @event_cb: routine invoked when a subscription event is detected
* @timer: timer governing subscription duration (optional)
* @nameseq_list: adjacent subscriptions in name sequence's subscription list
* @subscription_list: adjacent subscriptions in subscriber's subscription list
@@ -56,35 +55,27 @@ typedef void (*tipc_subscr_event) (struct subscription *sub,
* @swap: indicates if subscriber uses opposite endianness in its messages
* @evt: template for events generated by subscription
*/
-
-struct subscription {
+struct tipc_subscription {
+ struct tipc_subscriber *subscriber;
struct tipc_name_seq seq;
u32 timeout;
u32 filter;
- tipc_subscr_event event_cb;
struct timer_list timer;
struct list_head nameseq_list;
struct list_head subscription_list;
- u32 server_ref;
int swap;
struct tipc_event evt;
};
-int tipc_subscr_overlap(struct subscription *sub,
- u32 found_lower,
+int tipc_subscr_overlap(struct tipc_subscription *sub, u32 found_lower,
u32 found_upper);
-void tipc_subscr_report_overlap(struct subscription *sub,
- u32 found_lower,
- u32 found_upper,
- u32 event,
- u32 port_ref,
- u32 node,
- int must_report);
+void tipc_subscr_report_overlap(struct tipc_subscription *sub, u32 found_lower,
+ u32 found_upper, u32 event, u32 port_ref,
+ u32 node, int must);
int tipc_subscr_start(void);
void tipc_subscr_stop(void);
-
#endif
diff --git a/net/tipc/dbg.h b/net/tipc/sysctl.c
index 3ba6ba8b434..f3fef93325a 100644
--- a/net/tipc/dbg.h
+++ b/net/tipc/sysctl.c
@@ -1,8 +1,7 @@
/*
- * net/tipc/dbg.h: Include file for TIPC print buffer routines
+ * net/tipc/sysctl.c: sysctl interface to TIPC subsystem
*
- * Copyright (c) 1997-2006, Ericsson AB
- * Copyright (c) 2005-2007, Wind River Systems
+ * Copyright (c) 2013, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -34,34 +33,32 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef _TIPC_DBG_H
-#define _TIPC_DBG_H
+#include "core.h"
-/**
- * struct print_buf - TIPC print buffer structure
- * @buf: pointer to character array containing print buffer contents
- * @size: size of character array
- * @crs: pointer to first unused space in character array (i.e. final NUL)
- * @echo: echo output to system console if non-zero
- */
-
-struct print_buf {
- char *buf;
- u32 size;
- char *crs;
- int echo;
-};
+#include <linux/sysctl.h>
-#define TIPC_PB_MIN_SIZE 64 /* minimum size for a print buffer's array */
-#define TIPC_PB_MAX_STR 512 /* max printable string (with trailing NUL) */
+static struct ctl_table_header *tipc_ctl_hdr;
-void tipc_printbuf_init(struct print_buf *pb, char *buf, u32 size);
-int tipc_printbuf_validate(struct print_buf *pb);
-
-int tipc_log_resize(int log_size);
+static struct ctl_table tipc_table[] = {
+ {
+ .procname = "tipc_rmem",
+ .data = &sysctl_tipc_rmem,
+ .maxlen = sizeof(sysctl_tipc_rmem),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {}
+};
-struct sk_buff *tipc_log_resize_cmd(const void *req_tlv_area,
- int req_tlv_space);
-struct sk_buff *tipc_log_dump(void);
+int tipc_register_sysctl(void)
+{
+ tipc_ctl_hdr = register_net_sysctl(&init_net, "net/tipc", tipc_table);
+ if (tipc_ctl_hdr == NULL)
+ return -ENOMEM;
+ return 0;
+}
-#endif
+void tipc_unregister_sysctl(void)
+{
+ unregister_net_sysctl_table(tipc_ctl_hdr);
+}
diff --git a/net/tipc/user_reg.c b/net/tipc/user_reg.c
deleted file mode 100644
index 2e2702e2049..00000000000
--- a/net/tipc/user_reg.c
+++ /dev/null
@@ -1,218 +0,0 @@
-/*
- * net/tipc/user_reg.c: TIPC user registry code
- *
- * Copyright (c) 2000-2006, Ericsson AB
- * Copyright (c) 2004-2005, Wind River Systems
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "core.h"
-#include "user_reg.h"
-
-/*
- * TIPC user registry keeps track of users of the tipc_port interface.
- *
- * The registry utilizes an array of "TIPC user" entries;
- * a user's ID is the index of their associated array entry.
- * Array entry 0 is not used, so userid 0 is not valid;
- * TIPC sometimes uses this value to denote an anonymous user.
- * The list of free entries is initially chained from last entry to entry 1.
- */
-
-/**
- * struct tipc_user - registered TIPC user info
- * @next: index of next free registry entry (or -1 for an allocated entry)
- * @ports: list of user ports owned by the user
- */
-
-struct tipc_user {
- int next;
- struct list_head ports;
-};
-
-#define MAX_USERID 64
-#define USER_LIST_SIZE ((MAX_USERID + 1) * sizeof(struct tipc_user))
-
-static struct tipc_user *users = NULL;
-static u32 next_free_user = MAX_USERID + 1;
-static DEFINE_SPINLOCK(reg_lock);
-
-/**
- * reg_init - create TIPC user registry (but don't activate it)
- *
- * If registry has been pre-initialized it is left "as is".
- * NOTE: This routine may be called when TIPC is inactive.
- */
-
-static int reg_init(void)
-{
- u32 i;
-
- spin_lock_bh(&reg_lock);
- if (!users) {
- users = kzalloc(USER_LIST_SIZE, GFP_ATOMIC);
- if (users) {
- for (i = 1; i <= MAX_USERID; i++) {
- users[i].next = i - 1;
- }
- next_free_user = MAX_USERID;
- }
- }
- spin_unlock_bh(&reg_lock);
- return users ? 0 : -ENOMEM;
-}
-
-/**
- * tipc_reg_start - activate TIPC user registry
- */
-
-int tipc_reg_start(void)
-{
- return reg_init();
-}
-
-/**
- * tipc_reg_stop - shut down & delete TIPC user registry
- */
-
-void tipc_reg_stop(void)
-{
- if (!users)
- return;
-
- kfree(users);
- users = NULL;
-}
-
-/**
- * tipc_attach - register a TIPC user
- *
- * NOTE: This routine may be called when TIPC is inactive.
- */
-
-int tipc_attach(u32 *userid)
-{
- struct tipc_user *user_ptr;
-
- if (!users)
- reg_init();
-
- spin_lock_bh(&reg_lock);
- if (!next_free_user) {
- spin_unlock_bh(&reg_lock);
- return -EBUSY;
- }
- user_ptr = &users[next_free_user];
- *userid = next_free_user;
- next_free_user = user_ptr->next;
- user_ptr->next = -1;
- spin_unlock_bh(&reg_lock);
-
- INIT_LIST_HEAD(&user_ptr->ports);
- atomic_inc(&tipc_user_count);
-
- return 0;
-}
-
-/**
- * tipc_detach - deregister a TIPC user
- */
-
-void tipc_detach(u32 userid)
-{
- struct tipc_user *user_ptr;
- struct list_head ports_temp;
- struct user_port *up_ptr, *temp_up_ptr;
-
- if ((userid == 0) || (userid > MAX_USERID))
- return;
-
- spin_lock_bh(&reg_lock);
- if ((!users) || (users[userid].next >= 0)) {
- spin_unlock_bh(&reg_lock);
- return;
- }
-
- user_ptr = &users[userid];
- INIT_LIST_HEAD(&ports_temp);
- list_splice(&user_ptr->ports, &ports_temp);
- user_ptr->next = next_free_user;
- next_free_user = userid;
- spin_unlock_bh(&reg_lock);
-
- atomic_dec(&tipc_user_count);
-
- list_for_each_entry_safe(up_ptr, temp_up_ptr, &ports_temp, uport_list) {
- tipc_deleteport(up_ptr->ref);
- }
-}
-
-/**
- * tipc_reg_add_port - register a user's driver port
- */
-
-int tipc_reg_add_port(struct user_port *up_ptr)
-{
- struct tipc_user *user_ptr;
-
- if (up_ptr->user_ref == 0)
- return 0;
- if (up_ptr->user_ref > MAX_USERID)
- return -EINVAL;
- if ((tipc_mode == TIPC_NOT_RUNNING) || !users )
- return -ENOPROTOOPT;
-
- spin_lock_bh(&reg_lock);
- user_ptr = &users[up_ptr->user_ref];
- list_add(&up_ptr->uport_list, &user_ptr->ports);
- spin_unlock_bh(&reg_lock);
- return 0;
-}
-
-/**
- * tipc_reg_remove_port - deregister a user's driver port
- */
-
-int tipc_reg_remove_port(struct user_port *up_ptr)
-{
- if (up_ptr->user_ref == 0)
- return 0;
- if (up_ptr->user_ref > MAX_USERID)
- return -EINVAL;
- if (!users )
- return -ENOPROTOOPT;
-
- spin_lock_bh(&reg_lock);
- list_del_init(&up_ptr->uport_list);
- spin_unlock_bh(&reg_lock);
- return 0;
-}
-
diff --git a/net/tipc/zone.c b/net/tipc/zone.c
deleted file mode 100644
index 1b61ca8c48e..00000000000
--- a/net/tipc/zone.c
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * net/tipc/zone.c: TIPC zone management routines
- *
- * Copyright (c) 2000-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "core.h"
-#include "zone.h"
-#include "cluster.h"
-#include "node.h"
-
-struct _zone *tipc_zone_create(u32 addr)
-{
- struct _zone *z_ptr;
- u32 z_num;
-
- if (!tipc_addr_domain_valid(addr)) {
- err("Zone creation failed, invalid domain 0x%x\n", addr);
- return NULL;
- }
-
- z_ptr = kzalloc(sizeof(*z_ptr), GFP_ATOMIC);
- if (!z_ptr) {
- warn("Zone creation failed, insufficient memory\n");
- return NULL;
- }
-
- z_num = tipc_zone(addr);
- z_ptr->addr = tipc_addr(z_num, 0, 0);
- tipc_net.zones[z_num] = z_ptr;
- return z_ptr;
-}
-
-void tipc_zone_delete(struct _zone *z_ptr)
-{
- u32 c_num;
-
- if (!z_ptr)
- return;
- for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
- tipc_cltr_delete(z_ptr->clusters[c_num]);
- }
- kfree(z_ptr);
-}
-
-void tipc_zone_attach_cluster(struct _zone *z_ptr, struct cluster *c_ptr)
-{
- u32 c_num = tipc_cluster(c_ptr->addr);
-
- assert(c_ptr->addr);
- assert(c_num <= tipc_max_clusters);
- assert(z_ptr->clusters[c_num] == NULL);
- z_ptr->clusters[c_num] = c_ptr;
-}
-
-void tipc_zone_remove_as_router(struct _zone *z_ptr, u32 router)
-{
- u32 c_num;
-
- for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
- if (z_ptr->clusters[c_num]) {
- tipc_cltr_remove_as_router(z_ptr->clusters[c_num],
- router);
- }
- }
-}
-
-void tipc_zone_send_external_routes(struct _zone *z_ptr, u32 dest)
-{
- u32 c_num;
-
- for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
- if (z_ptr->clusters[c_num]) {
- if (in_own_cluster(z_ptr->addr))
- continue;
- tipc_cltr_send_ext_routes(z_ptr->clusters[c_num], dest);
- }
- }
-}
-
-struct tipc_node *tipc_zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref)
-{
- struct cluster *c_ptr;
- struct tipc_node *n_ptr;
- u32 c_num;
-
- if (!z_ptr)
- return NULL;
- c_ptr = z_ptr->clusters[tipc_cluster(addr)];
- if (!c_ptr)
- return NULL;
- n_ptr = tipc_cltr_select_node(c_ptr, ref);
- if (n_ptr)
- return n_ptr;
-
- /* Links to any other clusters within this zone ? */
- for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
- c_ptr = z_ptr->clusters[c_num];
- if (!c_ptr)
- return NULL;
- n_ptr = tipc_cltr_select_node(c_ptr, ref);
- if (n_ptr)
- return n_ptr;
- }
- return NULL;
-}
-
-u32 tipc_zone_select_router(struct _zone *z_ptr, u32 addr, u32 ref)
-{
- struct cluster *c_ptr;
- u32 c_num;
- u32 router;
-
- if (!z_ptr)
- return 0;
- c_ptr = z_ptr->clusters[tipc_cluster(addr)];
- router = c_ptr ? tipc_cltr_select_router(c_ptr, ref) : 0;
- if (router)
- return router;
-
- /* Links to any other clusters within the zone? */
- for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
- c_ptr = z_ptr->clusters[c_num];
- router = c_ptr ? tipc_cltr_select_router(c_ptr, ref) : 0;
- if (router)
- return router;
- }
- return 0;
-}