aboutsummaryrefslogtreecommitdiff
path: root/drivers/infiniband/core/cma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/core/cma.c')
-rw-r--r--drivers/infiniband/core/cma.c1822
1 files changed, 1115 insertions, 707 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 6884da24fde..d570030d899 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -41,12 +41,16 @@
#include <linux/idr.h>
#include <linux/inetdevice.h>
#include <linux/slab.h>
+#include <linux/module.h>
+#include <net/route.h>
#include <net/tcp.h>
#include <net/ipv6.h>
#include <rdma/rdma_cm.h>
#include <rdma/rdma_cm_ib.h>
+#include <rdma/rdma_netlink.h>
+#include <rdma/ib.h>
#include <rdma/ib_cache.h>
#include <rdma/ib_cm.h>
#include <rdma/ib_sa.h>
@@ -76,10 +80,10 @@ static LIST_HEAD(dev_list);
static LIST_HEAD(listen_any_list);
static DEFINE_MUTEX(lock);
static struct workqueue_struct *cma_wq;
-static DEFINE_IDR(sdp_ps);
static DEFINE_IDR(tcp_ps);
static DEFINE_IDR(udp_ps);
static DEFINE_IDR(ipoib_ps);
+static DEFINE_IDR(ib_ps);
struct cma_device {
struct list_head list;
@@ -89,26 +93,16 @@ struct cma_device {
struct list_head id_list;
};
-enum cma_state {
- CMA_IDLE,
- CMA_ADDR_QUERY,
- CMA_ADDR_RESOLVED,
- CMA_ROUTE_QUERY,
- CMA_ROUTE_RESOLVED,
- CMA_CONNECT,
- CMA_DISCONNECT,
- CMA_ADDR_BOUND,
- CMA_LISTEN,
- CMA_DEVICE_REMOVAL,
- CMA_DESTROYING
-};
-
struct rdma_bind_list {
struct idr *ps;
struct hlist_head owners;
unsigned short port;
};
+enum {
+ CMA_OPTION_AFONLY,
+};
+
/*
* Device removal can occur at anytime, so we need extra handling to
* serialize notifying the user of device removal with other callbacks.
@@ -126,7 +120,7 @@ struct rdma_id_private {
struct list_head mc_list;
int internal_id;
- enum cma_state state;
+ enum rdma_cm_state state;
spinlock_t lock;
struct mutex qp_mutex;
@@ -146,8 +140,12 @@ struct rdma_id_private {
u32 seq_num;
u32 qkey;
u32 qp_num;
+ pid_t owner;
+ u32 options;
u8 srq;
u8 tos;
+ u8 reuseaddr;
+ u8 afonly;
};
struct cma_multicast {
@@ -164,8 +162,8 @@ struct cma_multicast {
struct cma_work {
struct work_struct work;
struct rdma_id_private *id;
- enum cma_state old_state;
- enum cma_state new_state;
+ enum rdma_cm_state old_state;
+ enum rdma_cm_state new_state;
struct rdma_cm_event event;
};
@@ -197,26 +195,9 @@ struct cma_hdr {
union cma_ip_addr dst_addr;
};
-struct sdp_hh {
- u8 bsdh[16];
- u8 sdp_version; /* Major version: 7:4 */
- u8 ip_version; /* IP version: 7:4 */
- u8 sdp_specific1[10];
- __be16 port;
- __be16 sdp_specific2;
- union cma_ip_addr src_addr;
- union cma_ip_addr dst_addr;
-};
-
-struct sdp_hah {
- u8 bsdh[16];
- u8 sdp_version;
-};
-
#define CMA_VERSION 0x00
-#define SDP_MAJ_VERSION 0x2
-static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp)
+static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp)
{
unsigned long flags;
int ret;
@@ -228,7 +209,7 @@ static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp)
}
static int cma_comp_exch(struct rdma_id_private *id_priv,
- enum cma_state comp, enum cma_state exch)
+ enum rdma_cm_state comp, enum rdma_cm_state exch)
{
unsigned long flags;
int ret;
@@ -240,11 +221,11 @@ static int cma_comp_exch(struct rdma_id_private *id_priv,
return ret;
}
-static enum cma_state cma_exch(struct rdma_id_private *id_priv,
- enum cma_state exch)
+static enum rdma_cm_state cma_exch(struct rdma_id_private *id_priv,
+ enum rdma_cm_state exch)
{
unsigned long flags;
- enum cma_state old;
+ enum rdma_cm_state old;
spin_lock_irqsave(&id_priv->lock, flags);
old = id_priv->state;
@@ -263,26 +244,6 @@ static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver)
hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF);
}
-static inline u8 sdp_get_majv(u8 sdp_version)
-{
- return sdp_version >> 4;
-}
-
-static inline u8 sdp_get_ip_ver(struct sdp_hh *hh)
-{
- return hh->ip_version >> 4;
-}
-
-static inline void sdp_set_ip_ver(struct sdp_hh *hh, u8 ip_ver)
-{
- hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF);
-}
-
-static inline int cma_is_ud_ps(enum rdma_port_space ps)
-{
- return (ps == RDMA_PS_UDP || ps == RDMA_PS_IPOIB);
-}
-
static void cma_attach_to_dev(struct rdma_id_private *id_priv,
struct cma_device *cma_dev)
{
@@ -308,23 +269,49 @@ static inline void release_mc(struct kref *kref)
kfree(mc);
}
-static void cma_detach_from_dev(struct rdma_id_private *id_priv)
+static void cma_release_dev(struct rdma_id_private *id_priv)
{
+ mutex_lock(&lock);
list_del(&id_priv->list);
cma_deref_dev(id_priv->cma_dev);
id_priv->cma_dev = NULL;
+ mutex_unlock(&lock);
+}
+
+static inline struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv)
+{
+ return (struct sockaddr *) &id_priv->id.route.addr.src_addr;
+}
+
+static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv)
+{
+ return (struct sockaddr *) &id_priv->id.route.addr.dst_addr;
+}
+
+static inline unsigned short cma_family(struct rdma_id_private *id_priv)
+{
+ return id_priv->id.route.addr.src_addr.ss_family;
}
-static int cma_set_qkey(struct rdma_id_private *id_priv)
+static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
{
struct ib_sa_mcmember_rec rec;
int ret = 0;
- if (id_priv->qkey)
+ if (id_priv->qkey) {
+ if (qkey && id_priv->qkey != qkey)
+ return -EINVAL;
return 0;
+ }
+
+ if (qkey) {
+ id_priv->qkey = qkey;
+ return 0;
+ }
switch (id_priv->id.ps) {
case RDMA_PS_UDP:
+ case RDMA_PS_IB:
id_priv->qkey = RDMA_UDP_QKEY;
break;
case RDMA_PS_IPOIB:
@@ -341,55 +328,83 @@ static int cma_set_qkey(struct rdma_id_private *id_priv)
return ret;
}
-static int find_gid_port(struct ib_device *device, union ib_gid *gid, u8 port_num)
+static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr)
{
- int i;
- int err;
- struct ib_port_attr props;
- union ib_gid tmp;
+ dev_addr->dev_type = ARPHRD_INFINIBAND;
+ rdma_addr_set_sgid(dev_addr, (union ib_gid *) &sib->sib_addr);
+ ib_addr_set_pkey(dev_addr, ntohs(sib->sib_pkey));
+}
- err = ib_query_port(device, port_num, &props);
- if (err)
- return 1;
+static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
+{
+ int ret;
- for (i = 0; i < props.gid_tbl_len; ++i) {
- err = ib_query_gid(device, port_num, i, &tmp);
- if (err)
- return 1;
- if (!memcmp(&tmp, gid, sizeof tmp))
- return 0;
+ if (addr->sa_family != AF_IB) {
+ ret = rdma_translate_ip(addr, dev_addr, NULL);
+ } else {
+ cma_translate_ib((struct sockaddr_ib *) addr, dev_addr);
+ ret = 0;
}
- return -EAGAIN;
+ return ret;
}
-static int cma_acquire_dev(struct rdma_id_private *id_priv)
+static int cma_acquire_dev(struct rdma_id_private *id_priv,
+ struct rdma_id_private *listen_id_priv)
{
struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
struct cma_device *cma_dev;
union ib_gid gid, iboe_gid;
int ret = -ENODEV;
- u8 port;
+ u8 port, found_port;
enum rdma_link_layer dev_ll = dev_addr->dev_type == ARPHRD_INFINIBAND ?
IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
- iboe_addr_get_sgid(dev_addr, &iboe_gid);
+ if (dev_ll != IB_LINK_LAYER_INFINIBAND &&
+ id_priv->id.ps == RDMA_PS_IPOIB)
+ return -EINVAL;
+
+ mutex_lock(&lock);
+ rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
+ &iboe_gid);
+
memcpy(&gid, dev_addr->src_dev_addr +
rdma_addr_gid_offset(dev_addr), sizeof gid);
+ if (listen_id_priv &&
+ rdma_port_get_link_layer(listen_id_priv->id.device,
+ listen_id_priv->id.port_num) == dev_ll) {
+ cma_dev = listen_id_priv->cma_dev;
+ port = listen_id_priv->id.port_num;
+ if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB &&
+ rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET)
+ ret = ib_find_cached_gid(cma_dev->device, &iboe_gid,
+ &found_port, NULL);
+ else
+ ret = ib_find_cached_gid(cma_dev->device, &gid,
+ &found_port, NULL);
+
+ if (!ret && (port == found_port)) {
+ id_priv->id.port_num = found_port;
+ goto out;
+ }
+ }
list_for_each_entry(cma_dev, &dev_list, list) {
for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) {
+ if (listen_id_priv &&
+ listen_id_priv->cma_dev == cma_dev &&
+ listen_id_priv->id.port_num == port)
+ continue;
if (rdma_port_get_link_layer(cma_dev->device, port) == dev_ll) {
if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB &&
rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET)
- ret = find_gid_port(cma_dev->device, &iboe_gid, port);
+ ret = ib_find_cached_gid(cma_dev->device, &iboe_gid, &found_port, NULL);
else
- ret = find_gid_port(cma_dev->device, &gid, port);
+ ret = ib_find_cached_gid(cma_dev->device, &gid, &found_port, NULL);
- if (!ret) {
- id_priv->id.port_num = port;
+ if (!ret && (port == found_port)) {
+ id_priv->id.port_num = found_port;
goto out;
- } else if (ret == 1)
- break;
+ }
}
}
}
@@ -398,9 +413,64 @@ out:
if (!ret)
cma_attach_to_dev(id_priv, cma_dev);
+ mutex_unlock(&lock);
return ret;
}
+/*
+ * Select the source IB device and address to reach the destination IB address.
+ */
+static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
+{
+ struct cma_device *cma_dev, *cur_dev;
+ struct sockaddr_ib *addr;
+ union ib_gid gid, sgid, *dgid;
+ u16 pkey, index;
+ u8 p;
+ int i;
+
+ cma_dev = NULL;
+ addr = (struct sockaddr_ib *) cma_dst_addr(id_priv);
+ dgid = (union ib_gid *) &addr->sib_addr;
+ pkey = ntohs(addr->sib_pkey);
+
+ list_for_each_entry(cur_dev, &dev_list, list) {
+ if (rdma_node_get_transport(cur_dev->device->node_type) != RDMA_TRANSPORT_IB)
+ continue;
+
+ for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
+ if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index))
+ continue;
+
+ for (i = 0; !ib_get_cached_gid(cur_dev->device, p, i, &gid); i++) {
+ if (!memcmp(&gid, dgid, sizeof(gid))) {
+ cma_dev = cur_dev;
+ sgid = gid;
+ id_priv->id.port_num = p;
+ goto found;
+ }
+
+ if (!cma_dev && (gid.global.subnet_prefix ==
+ dgid->global.subnet_prefix)) {
+ cma_dev = cur_dev;
+ sgid = gid;
+ id_priv->id.port_num = p;
+ }
+ }
+ }
+ }
+
+ if (!cma_dev)
+ return -ENODEV;
+
+found:
+ cma_attach_to_dev(id_priv, cma_dev);
+ addr = (struct sockaddr_ib *) cma_src_addr(id_priv);
+ memcpy(&addr->sib_addr, &sgid, sizeof sgid);
+ cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr);
+ return 0;
+}
+
static void cma_deref_id(struct rdma_id_private *id_priv)
{
if (atomic_dec_and_test(&id_priv->refcount))
@@ -408,7 +478,7 @@ static void cma_deref_id(struct rdma_id_private *id_priv)
}
static int cma_disable_callback(struct rdma_id_private *id_priv,
- enum cma_state state)
+ enum rdma_cm_state state)
{
mutex_lock(&id_priv->handler_mutex);
if (id_priv->state != state) {
@@ -418,13 +488,9 @@ static int cma_disable_callback(struct rdma_id_private *id_priv,
return 0;
}
-static int cma_has_cm_dev(struct rdma_id_private *id_priv)
-{
- return (id_priv->id.device && id_priv->cm_id.ib);
-}
-
struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
- void *context, enum rdma_port_space ps)
+ void *context, enum rdma_port_space ps,
+ enum ib_qp_type qp_type)
{
struct rdma_id_private *id_priv;
@@ -432,10 +498,12 @@ struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
if (!id_priv)
return ERR_PTR(-ENOMEM);
- id_priv->state = CMA_IDLE;
+ id_priv->owner = task_pid_nr(current);
+ id_priv->state = RDMA_CM_IDLE;
id_priv->id.context = context;
id_priv->id.event_handler = event_handler;
id_priv->id.ps = ps;
+ id_priv->id.qp_type = qp_type;
spin_lock_init(&id_priv->lock);
mutex_init(&id_priv->qp_mutex);
init_completion(&id_priv->comp);
@@ -503,7 +571,7 @@ int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
if (IS_ERR(qp))
return PTR_ERR(qp);
- if (cma_is_ud_ps(id_priv->id.ps))
+ if (id->qp_type == IB_QPT_UD)
ret = cma_init_ud_qp(id_priv, qp);
else
ret = cma_init_conn_qp(id_priv, qp);
@@ -537,6 +605,7 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
{
struct ib_qp_attr qp_attr;
int qp_attr_mask, ret;
+ union ib_gid sgid;
mutex_lock(&id_priv->qp_mutex);
if (!id_priv->id.qp) {
@@ -559,6 +628,20 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
if (ret)
goto out;
+ ret = ib_query_gid(id_priv->id.device, id_priv->id.port_num,
+ qp_attr.ah_attr.grh.sgid_index, &sgid);
+ if (ret)
+ goto out;
+
+ if (rdma_node_get_transport(id_priv->cma_dev->device->node_type)
+ == RDMA_TRANSPORT_IB &&
+ rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)
+ == IB_LINK_LAYER_ETHERNET) {
+ ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr.smac, NULL);
+
+ if (ret)
+ goto out;
+ }
if (conn_param)
qp_attr.max_dest_rd_atomic = conn_param->responder_resources;
ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
@@ -631,8 +714,8 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
qp_attr->port_num = id_priv->id.port_num;
*qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
- if (cma_is_ud_ps(id_priv->id.ps)) {
- ret = cma_set_qkey(id_priv);
+ if (id_priv->id.qp_type == IB_QPT_UD) {
+ ret = cma_set_qkey(id_priv, 0);
if (ret)
return ret;
@@ -654,11 +737,12 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
id_priv = container_of(id, struct rdma_id_private, id);
switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
case RDMA_TRANSPORT_IB:
- if (!id_priv->cm_id.ib || cma_is_ud_ps(id_priv->id.ps))
+ if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD))
ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
else
ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
qp_attr_mask);
+
if (qp_attr->qp_state == IB_QPS_RTR)
qp_attr->rq_psn = id_priv->seq_num;
break;
@@ -681,26 +765,30 @@ EXPORT_SYMBOL(rdma_init_qp_attr);
static inline int cma_zero_addr(struct sockaddr *addr)
{
- struct in6_addr *ip6;
-
- if (addr->sa_family == AF_INET)
- return ipv4_is_zeronet(
- ((struct sockaddr_in *)addr)->sin_addr.s_addr);
- else {
- ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr;
- return (ip6->s6_addr32[0] | ip6->s6_addr32[1] |
- ip6->s6_addr32[2] | ip6->s6_addr32[3]) == 0;
+ switch (addr->sa_family) {
+ case AF_INET:
+ return ipv4_is_zeronet(((struct sockaddr_in *)addr)->sin_addr.s_addr);
+ case AF_INET6:
+ return ipv6_addr_any(&((struct sockaddr_in6 *) addr)->sin6_addr);
+ case AF_IB:
+ return ib_addr_any(&((struct sockaddr_ib *) addr)->sib_addr);
+ default:
+ return 0;
}
}
static inline int cma_loopback_addr(struct sockaddr *addr)
{
- if (addr->sa_family == AF_INET)
- return ipv4_is_loopback(
- ((struct sockaddr_in *) addr)->sin_addr.s_addr);
- else
- return ipv6_addr_loopback(
- &((struct sockaddr_in6 *) addr)->sin6_addr);
+ switch (addr->sa_family) {
+ case AF_INET:
+ return ipv4_is_loopback(((struct sockaddr_in *) addr)->sin_addr.s_addr);
+ case AF_INET6:
+ return ipv6_addr_loopback(&((struct sockaddr_in6 *) addr)->sin6_addr);
+ case AF_IB:
+ return ib_addr_loopback(&((struct sockaddr_ib *) addr)->sib_addr);
+ default:
+ return 0;
+ }
}
static inline int cma_any_addr(struct sockaddr *addr)
@@ -708,12 +796,40 @@ static inline int cma_any_addr(struct sockaddr *addr)
return cma_zero_addr(addr) || cma_loopback_addr(addr);
}
-static inline __be16 cma_port(struct sockaddr *addr)
+static int cma_addr_cmp(struct sockaddr *src, struct sockaddr *dst)
{
- if (addr->sa_family == AF_INET)
+ if (src->sa_family != dst->sa_family)
+ return -1;
+
+ switch (src->sa_family) {
+ case AF_INET:
+ return ((struct sockaddr_in *) src)->sin_addr.s_addr !=
+ ((struct sockaddr_in *) dst)->sin_addr.s_addr;
+ case AF_INET6:
+ return ipv6_addr_cmp(&((struct sockaddr_in6 *) src)->sin6_addr,
+ &((struct sockaddr_in6 *) dst)->sin6_addr);
+ default:
+ return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr,
+ &((struct sockaddr_ib *) dst)->sib_addr);
+ }
+}
+
+static __be16 cma_port(struct sockaddr *addr)
+{
+ struct sockaddr_ib *sib;
+
+ switch (addr->sa_family) {
+ case AF_INET:
return ((struct sockaddr_in *) addr)->sin_port;
- else
+ case AF_INET6:
return ((struct sockaddr_in6 *) addr)->sin6_port;
+ case AF_IB:
+ sib = (struct sockaddr_ib *) addr;
+ return htons((u16) (be64_to_cpu(sib->sib_sid) &
+ be64_to_cpu(sib->sib_sid_mask)));
+ default:
+ return 0;
+ }
}
static inline int cma_any_port(struct sockaddr *addr)
@@ -721,83 +837,93 @@ static inline int cma_any_port(struct sockaddr *addr)
return !cma_port(addr);
}
-static int cma_get_net_info(void *hdr, enum rdma_port_space ps,
- u8 *ip_ver, __be16 *port,
- union cma_ip_addr **src, union cma_ip_addr **dst)
+static void cma_save_ib_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
+ struct ib_sa_path_rec *path)
{
- switch (ps) {
- case RDMA_PS_SDP:
- if (sdp_get_majv(((struct sdp_hh *) hdr)->sdp_version) !=
- SDP_MAJ_VERSION)
- return -EINVAL;
-
- *ip_ver = sdp_get_ip_ver(hdr);
- *port = ((struct sdp_hh *) hdr)->port;
- *src = &((struct sdp_hh *) hdr)->src_addr;
- *dst = &((struct sdp_hh *) hdr)->dst_addr;
- break;
- default:
- if (((struct cma_hdr *) hdr)->cma_version != CMA_VERSION)
- return -EINVAL;
+ struct sockaddr_ib *listen_ib, *ib;
- *ip_ver = cma_get_ip_ver(hdr);
- *port = ((struct cma_hdr *) hdr)->port;
- *src = &((struct cma_hdr *) hdr)->src_addr;
- *dst = &((struct cma_hdr *) hdr)->dst_addr;
- break;
- }
+ listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr;
+ ib = (struct sockaddr_ib *) &id->route.addr.src_addr;
+ ib->sib_family = listen_ib->sib_family;
+ ib->sib_pkey = path->pkey;
+ ib->sib_flowinfo = path->flow_label;
+ memcpy(&ib->sib_addr, &path->sgid, 16);
+ ib->sib_sid = listen_ib->sib_sid;
+ ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL);
+ ib->sib_scope_id = listen_ib->sib_scope_id;
- if (*ip_ver != 4 && *ip_ver != 6)
- return -EINVAL;
- return 0;
+ ib = (struct sockaddr_ib *) &id->route.addr.dst_addr;
+ ib->sib_family = listen_ib->sib_family;
+ ib->sib_pkey = path->pkey;
+ ib->sib_flowinfo = path->flow_label;
+ memcpy(&ib->sib_addr, &path->dgid, 16);
}
-static void cma_save_net_info(struct rdma_addr *addr,
- struct rdma_addr *listen_addr,
- u8 ip_ver, __be16 port,
- union cma_ip_addr *src, union cma_ip_addr *dst)
+static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
+ struct cma_hdr *hdr)
{
struct sockaddr_in *listen4, *ip4;
+
+ listen4 = (struct sockaddr_in *) &listen_id->route.addr.src_addr;
+ ip4 = (struct sockaddr_in *) &id->route.addr.src_addr;
+ ip4->sin_family = listen4->sin_family;
+ ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr;
+ ip4->sin_port = listen4->sin_port;
+
+ ip4 = (struct sockaddr_in *) &id->route.addr.dst_addr;
+ ip4->sin_family = listen4->sin_family;
+ ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr;
+ ip4->sin_port = hdr->port;
+}
+
+static void cma_save_ip6_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
+ struct cma_hdr *hdr)
+{
struct sockaddr_in6 *listen6, *ip6;
- switch (ip_ver) {
+ listen6 = (struct sockaddr_in6 *) &listen_id->route.addr.src_addr;
+ ip6 = (struct sockaddr_in6 *) &id->route.addr.src_addr;
+ ip6->sin6_family = listen6->sin6_family;
+ ip6->sin6_addr = hdr->dst_addr.ip6;
+ ip6->sin6_port = listen6->sin6_port;
+
+ ip6 = (struct sockaddr_in6 *) &id->route.addr.dst_addr;
+ ip6->sin6_family = listen6->sin6_family;
+ ip6->sin6_addr = hdr->src_addr.ip6;
+ ip6->sin6_port = hdr->port;
+}
+
+static int cma_save_net_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
+ struct ib_cm_event *ib_event)
+{
+ struct cma_hdr *hdr;
+
+ if ((listen_id->route.addr.src_addr.ss_family == AF_IB) &&
+ (ib_event->event == IB_CM_REQ_RECEIVED)) {
+ cma_save_ib_info(id, listen_id, ib_event->param.req_rcvd.primary_path);
+ return 0;
+ }
+
+ hdr = ib_event->private_data;
+ if (hdr->cma_version != CMA_VERSION)
+ return -EINVAL;
+
+ switch (cma_get_ip_ver(hdr)) {
case 4:
- listen4 = (struct sockaddr_in *) &listen_addr->src_addr;
- ip4 = (struct sockaddr_in *) &addr->src_addr;
- ip4->sin_family = listen4->sin_family;
- ip4->sin_addr.s_addr = dst->ip4.addr;
- ip4->sin_port = listen4->sin_port;
-
- ip4 = (struct sockaddr_in *) &addr->dst_addr;
- ip4->sin_family = listen4->sin_family;
- ip4->sin_addr.s_addr = src->ip4.addr;
- ip4->sin_port = port;
+ cma_save_ip4_info(id, listen_id, hdr);
break;
case 6:
- listen6 = (struct sockaddr_in6 *) &listen_addr->src_addr;
- ip6 = (struct sockaddr_in6 *) &addr->src_addr;
- ip6->sin6_family = listen6->sin6_family;
- ip6->sin6_addr = dst->ip6;
- ip6->sin6_port = listen6->sin6_port;
-
- ip6 = (struct sockaddr_in6 *) &addr->dst_addr;
- ip6->sin6_family = listen6->sin6_family;
- ip6->sin6_addr = src->ip6;
- ip6->sin6_port = port;
+ cma_save_ip6_info(id, listen_id, hdr);
break;
default:
- break;
+ return -EINVAL;
}
+ return 0;
}
-static inline int cma_user_data_offset(enum rdma_port_space ps)
+static inline int cma_user_data_offset(struct rdma_id_private *id_priv)
{
- switch (ps) {
- case RDMA_PS_SDP:
- return 0;
- default:
- return sizeof(struct cma_hdr);
- }
+ return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr);
}
static void cma_cancel_route(struct rdma_id_private *id_priv)
@@ -838,18 +964,17 @@ static void cma_cancel_listens(struct rdma_id_private *id_priv)
}
static void cma_cancel_operation(struct rdma_id_private *id_priv,
- enum cma_state state)
+ enum rdma_cm_state state)
{
switch (state) {
- case CMA_ADDR_QUERY:
+ case RDMA_CM_ADDR_QUERY:
rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
break;
- case CMA_ROUTE_QUERY:
+ case RDMA_CM_ROUTE_QUERY:
cma_cancel_route(id_priv);
break;
- case CMA_LISTEN:
- if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)
- && !id_priv->cma_dev)
+ case RDMA_CM_LISTEN:
+ if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev)
cma_cancel_listens(id_priv);
break;
default:
@@ -898,32 +1023,35 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
void rdma_destroy_id(struct rdma_cm_id *id)
{
struct rdma_id_private *id_priv;
- enum cma_state state;
+ enum rdma_cm_state state;
id_priv = container_of(id, struct rdma_id_private, id);
- state = cma_exch(id_priv, CMA_DESTROYING);
+ state = cma_exch(id_priv, RDMA_CM_DESTROYING);
cma_cancel_operation(id_priv, state);
- mutex_lock(&lock);
+ /*
+ * Wait for any active callback to finish. New callbacks will find
+ * the id_priv state set to destroying and abort.
+ */
+ mutex_lock(&id_priv->handler_mutex);
+ mutex_unlock(&id_priv->handler_mutex);
+
if (id_priv->cma_dev) {
- mutex_unlock(&lock);
switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
case RDMA_TRANSPORT_IB:
- if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
+ if (id_priv->cm_id.ib)
ib_destroy_cm_id(id_priv->cm_id.ib);
break;
case RDMA_TRANSPORT_IWARP:
- if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw))
+ if (id_priv->cm_id.iw)
iw_destroy_cm_id(id_priv->cm_id.iw);
break;
default:
break;
}
cma_leave_mc_groups(id_priv);
- mutex_lock(&lock);
- cma_detach_from_dev(id_priv);
+ cma_release_dev(id_priv);
}
- mutex_unlock(&lock);
cma_release_port(id_priv);
cma_deref_id(id_priv);
@@ -961,16 +1089,6 @@ reject:
return ret;
}
-static int cma_verify_rep(struct rdma_id_private *id_priv, void *data)
-{
- if (id_priv->id.ps == RDMA_PS_SDP &&
- sdp_get_majv(((struct sdp_hah *) data)->sdp_version) !=
- SDP_MAJ_VERSION)
- return -EINVAL;
-
- return 0;
-}
-
static void cma_set_rep_event_data(struct rdma_cm_event *event,
struct ib_cm_rep_event_param *rep_data,
void *private_data)
@@ -992,9 +1110,9 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
int ret = 0;
if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
- cma_disable_callback(id_priv, CMA_CONNECT)) ||
+ cma_disable_callback(id_priv, RDMA_CM_CONNECT)) ||
(ib_event->event == IB_CM_TIMEWAIT_EXIT &&
- cma_disable_callback(id_priv, CMA_DISCONNECT)))
+ cma_disable_callback(id_priv, RDMA_CM_DISCONNECT)))
return 0;
memset(&event, 0, sizeof event);
@@ -1005,15 +1123,13 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
event.status = -ETIMEDOUT;
break;
case IB_CM_REP_RECEIVED:
- event.status = cma_verify_rep(id_priv, ib_event->private_data);
- if (event.status)
- event.event = RDMA_CM_EVENT_CONNECT_ERROR;
- else if (id_priv->id.qp && id_priv->id.ps != RDMA_PS_SDP) {
+ if (id_priv->id.qp) {
event.status = cma_rep_recv(id_priv);
event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR :
RDMA_CM_EVENT_ESTABLISHED;
- } else
+ } else {
event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
+ }
cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd,
ib_event->private_data);
break;
@@ -1025,7 +1141,8 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
event.status = -ETIMEDOUT; /* fall through */
case IB_CM_DREQ_RECEIVED:
case IB_CM_DREP_RECEIVED:
- if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT))
+ if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT,
+ RDMA_CM_DISCONNECT))
goto out;
event.event = RDMA_CM_EVENT_DISCONNECTED;
break;
@@ -1052,7 +1169,7 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
if (ret) {
/* Destroy the CM ID by returning a non-zero value. */
id_priv->cm_id.ib = NULL;
- cma_exch(id_priv, CMA_DESTROYING);
+ cma_exch(id_priv, RDMA_CM_DESTROYING);
mutex_unlock(&id_priv->handler_mutex);
rdma_destroy_id(&id_priv->id);
return ret;
@@ -1068,53 +1185,44 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
struct rdma_id_private *id_priv;
struct rdma_cm_id *id;
struct rdma_route *rt;
- union cma_ip_addr *src, *dst;
- __be16 port;
- u8 ip_ver;
int ret;
- if (cma_get_net_info(ib_event->private_data, listen_id->ps,
- &ip_ver, &port, &src, &dst))
- goto err;
-
id = rdma_create_id(listen_id->event_handler, listen_id->context,
- listen_id->ps);
+ listen_id->ps, ib_event->param.req_rcvd.qp_type);
if (IS_ERR(id))
- goto err;
+ return NULL;
- cma_save_net_info(&id->route.addr, &listen_id->route.addr,
- ip_ver, port, src, dst);
+ id_priv = container_of(id, struct rdma_id_private, id);
+ if (cma_save_net_info(id, listen_id, ib_event))
+ goto err;
rt = &id->route;
rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1;
rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths,
GFP_KERNEL);
if (!rt->path_rec)
- goto destroy_id;
+ goto err;
rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path;
if (rt->num_paths == 2)
rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
- if (cma_any_addr((struct sockaddr *) &rt->addr.src_addr)) {
+ if (cma_any_addr(cma_src_addr(id_priv))) {
rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND;
rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
- ib_addr_set_pkey(&rt->addr.dev_addr, rt->path_rec[0].pkey);
+ ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey));
} else {
- ret = rdma_translate_ip((struct sockaddr *) &rt->addr.src_addr,
- &rt->addr.dev_addr);
+ ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr);
if (ret)
- goto destroy_id;
+ goto err;
}
rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
- id_priv = container_of(id, struct rdma_id_private, id);
- id_priv->state = CMA_CONNECT;
+ id_priv->state = RDMA_CM_CONNECT;
return id_priv;
-destroy_id:
- rdma_destroy_id(id);
err:
+ rdma_destroy_id(id);
return NULL;
}
@@ -1123,33 +1231,24 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
{
struct rdma_id_private *id_priv;
struct rdma_cm_id *id;
- union cma_ip_addr *src, *dst;
- __be16 port;
- u8 ip_ver;
int ret;
id = rdma_create_id(listen_id->event_handler, listen_id->context,
- listen_id->ps);
+ listen_id->ps, IB_QPT_UD);
if (IS_ERR(id))
return NULL;
-
- if (cma_get_net_info(ib_event->private_data, listen_id->ps,
- &ip_ver, &port, &src, &dst))
+ id_priv = container_of(id, struct rdma_id_private, id);
+ if (cma_save_net_info(id, listen_id, ib_event))
goto err;
- cma_save_net_info(&id->route.addr, &listen_id->route.addr,
- ip_ver, port, src, dst);
-
if (!cma_any_addr((struct sockaddr *) &id->route.addr.src_addr)) {
- ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr,
- &id->route.addr.dev_addr);
+ ret = cma_translate_addr(cma_src_addr(id_priv), &id->route.addr.dev_addr);
if (ret)
goto err;
}
- id_priv = container_of(id, struct rdma_id_private, id);
- id_priv->state = CMA_CONNECT;
+ id_priv->state = RDMA_CM_CONNECT;
return id_priv;
err:
rdma_destroy_id(id);
@@ -1171,6 +1270,15 @@ static void cma_set_req_event_data(struct rdma_cm_event *event,
event->param.conn.qp_num = req_data->remote_qpn;
}
+static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event)
+{
+ return (((ib_event->event == IB_CM_REQ_RECEIVED) &&
+ (ib_event->param.req_rcvd.qp_type == id->qp_type)) ||
+ ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) &&
+ (id->qp_type == IB_QPT_UD)) ||
+ (!id->qp_type));
+}
+
static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
{
struct rdma_id_private *listen_id, *conn_id;
@@ -1178,13 +1286,16 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
int offset, ret;
listen_id = cm_id->context;
- if (cma_disable_callback(listen_id, CMA_LISTEN))
+ if (!cma_check_req_qp_type(&listen_id->id, ib_event))
+ return -EINVAL;
+
+ if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
return -ECONNABORTED;
memset(&event, 0, sizeof event);
- offset = cma_user_data_offset(listen_id->id.ps);
+ offset = cma_user_data_offset(listen_id);
event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
- if (cma_is_ud_ps(listen_id->id.ps)) {
+ if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) {
conn_id = cma_new_udp_id(&listen_id->id, ib_event);
event.param.ud.private_data = ib_event->private_data + offset;
event.param.ud.private_data_len =
@@ -1196,93 +1307,89 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
}
if (!conn_id) {
ret = -ENOMEM;
- goto out;
+ goto err1;
}
mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
- mutex_lock(&lock);
- ret = cma_acquire_dev(conn_id);
- mutex_unlock(&lock);
+ ret = cma_acquire_dev(conn_id, listen_id);
if (ret)
- goto release_conn_id;
+ goto err2;
conn_id->cm_id.ib = cm_id;
cm_id->context = conn_id;
cm_id->cm_handler = cma_ib_handler;
+ /*
+ * Protect against the user destroying conn_id from another thread
+ * until we're done accessing it.
+ */
+ atomic_inc(&conn_id->refcount);
ret = conn_id->id.event_handler(&conn_id->id, &event);
- if (!ret) {
- /*
- * Acquire mutex to prevent user executing rdma_destroy_id()
- * while we're accessing the cm_id.
- */
- mutex_lock(&lock);
- if (cma_comp(conn_id, CMA_CONNECT) &&
- !cma_is_ud_ps(conn_id->id.ps))
- ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
- mutex_unlock(&lock);
- mutex_unlock(&conn_id->handler_mutex);
- goto out;
- }
+ if (ret)
+ goto err3;
+ /*
+ * Acquire mutex to prevent user executing rdma_destroy_id()
+ * while we're accessing the cm_id.
+ */
+ mutex_lock(&lock);
+ if (cma_comp(conn_id, RDMA_CM_CONNECT) &&
+ (conn_id->id.qp_type != IB_QPT_UD))
+ ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
+ mutex_unlock(&lock);
+ mutex_unlock(&conn_id->handler_mutex);
+ mutex_unlock(&listen_id->handler_mutex);
+ cma_deref_id(conn_id);
+ return 0;
+err3:
+ cma_deref_id(conn_id);
/* Destroy the CM ID by returning a non-zero value. */
conn_id->cm_id.ib = NULL;
-
-release_conn_id:
- cma_exch(conn_id, CMA_DESTROYING);
+err2:
+ cma_exch(conn_id, RDMA_CM_DESTROYING);
mutex_unlock(&conn_id->handler_mutex);
- rdma_destroy_id(&conn_id->id);
-
-out:
+err1:
mutex_unlock(&listen_id->handler_mutex);
+ if (conn_id)
+ rdma_destroy_id(&conn_id->id);
return ret;
}
-static __be64 cma_get_service_id(enum rdma_port_space ps, struct sockaddr *addr)
+__be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr)
{
- return cpu_to_be64(((u64)ps << 16) + be16_to_cpu(cma_port(addr)));
+ if (addr->sa_family == AF_IB)
+ return ((struct sockaddr_ib *) addr)->sib_sid;
+
+ return cpu_to_be64(((u64)id->ps << 16) + be16_to_cpu(cma_port(addr)));
}
+EXPORT_SYMBOL(rdma_get_service_id);
static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr,
struct ib_cm_compare_data *compare)
{
struct cma_hdr *cma_data, *cma_mask;
- struct sdp_hh *sdp_data, *sdp_mask;
__be32 ip4_addr;
struct in6_addr ip6_addr;
memset(compare, 0, sizeof *compare);
cma_data = (void *) compare->data;
cma_mask = (void *) compare->mask;
- sdp_data = (void *) compare->data;
- sdp_mask = (void *) compare->mask;
switch (addr->sa_family) {
case AF_INET:
ip4_addr = ((struct sockaddr_in *) addr)->sin_addr.s_addr;
- if (ps == RDMA_PS_SDP) {
- sdp_set_ip_ver(sdp_data, 4);
- sdp_set_ip_ver(sdp_mask, 0xF);
- sdp_data->dst_addr.ip4.addr = ip4_addr;
- sdp_mask->dst_addr.ip4.addr = htonl(~0);
- } else {
- cma_set_ip_ver(cma_data, 4);
- cma_set_ip_ver(cma_mask, 0xF);
+ cma_set_ip_ver(cma_data, 4);
+ cma_set_ip_ver(cma_mask, 0xF);
+ if (!cma_any_addr(addr)) {
cma_data->dst_addr.ip4.addr = ip4_addr;
cma_mask->dst_addr.ip4.addr = htonl(~0);
}
break;
case AF_INET6:
ip6_addr = ((struct sockaddr_in6 *) addr)->sin6_addr;
- if (ps == RDMA_PS_SDP) {
- sdp_set_ip_ver(sdp_data, 6);
- sdp_set_ip_ver(sdp_mask, 0xF);
- sdp_data->dst_addr.ip6 = ip6_addr;
- memset(&sdp_mask->dst_addr.ip6, 0xFF,
- sizeof sdp_mask->dst_addr.ip6);
- } else {
- cma_set_ip_ver(cma_data, 6);
- cma_set_ip_ver(cma_mask, 0xF);
+ cma_set_ip_ver(cma_data, 6);
+ cma_set_ip_ver(cma_mask, 0xF);
+ if (!cma_any_addr(addr)) {
cma_data->dst_addr.ip6 = ip6_addr;
memset(&cma_mask->dst_addr.ip6, 0xFF,
sizeof cma_mask->dst_addr.ip6);
@@ -1297,10 +1404,11 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
{
struct rdma_id_private *id_priv = iw_id->context;
struct rdma_cm_event event;
- struct sockaddr_in *sin;
int ret = 0;
+ struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
+ struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
- if (cma_disable_callback(id_priv, CMA_CONNECT))
+ if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
return 0;
memset(&event, 0, sizeof event);
@@ -1309,13 +1417,15 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
event.event = RDMA_CM_EVENT_DISCONNECTED;
break;
case IW_CM_EVENT_CONNECT_REPLY:
- sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
- *sin = iw_event->local_addr;
- sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr;
- *sin = iw_event->remote_addr;
+ memcpy(cma_src_addr(id_priv), laddr,
+ rdma_addr_size(laddr));
+ memcpy(cma_dst_addr(id_priv), raddr,
+ rdma_addr_size(raddr));
switch (iw_event->status) {
case 0:
event.event = RDMA_CM_EVENT_ESTABLISHED;
+ event.param.conn.initiator_depth = iw_event->ird;
+ event.param.conn.responder_resources = iw_event->ord;
break;
case -ECONNRESET:
case -ECONNREFUSED:
@@ -1331,6 +1441,8 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
break;
case IW_CM_EVENT_ESTABLISHED:
event.event = RDMA_CM_EVENT_ESTABLISHED;
+ event.param.conn.initiator_depth = iw_event->ird;
+ event.param.conn.responder_resources = iw_event->ord;
break;
default:
BUG_ON(1);
@@ -1343,7 +1455,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
if (ret) {
/* Destroy the CM ID by returning a non-zero value. */
id_priv->cm_id.iw = NULL;
- cma_exch(id_priv, CMA_DESTROYING);
+ cma_exch(id_priv, RDMA_CM_DESTROYING);
mutex_unlock(&id_priv->handler_mutex);
rdma_destroy_id(&id_priv->id);
return ret;
@@ -1358,45 +1470,36 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
{
struct rdma_cm_id *new_cm_id;
struct rdma_id_private *listen_id, *conn_id;
- struct sockaddr_in *sin;
- struct net_device *dev = NULL;
struct rdma_cm_event event;
int ret;
struct ib_device_attr attr;
+ struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
+ struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
listen_id = cm_id->context;
- if (cma_disable_callback(listen_id, CMA_LISTEN))
+ if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
return -ECONNABORTED;
/* Create a new RDMA id for the new IW CM ID */
new_cm_id = rdma_create_id(listen_id->id.event_handler,
listen_id->id.context,
- RDMA_PS_TCP);
+ RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(new_cm_id)) {
ret = -ENOMEM;
goto out;
}
conn_id = container_of(new_cm_id, struct rdma_id_private, id);
mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
- conn_id->state = CMA_CONNECT;
+ conn_id->state = RDMA_CM_CONNECT;
- dev = ip_dev_find(&init_net, iw_event->local_addr.sin_addr.s_addr);
- if (!dev) {
- ret = -EADDRNOTAVAIL;
- mutex_unlock(&conn_id->handler_mutex);
- rdma_destroy_id(new_cm_id);
- goto out;
- }
- ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL);
+ ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr, NULL);
if (ret) {
mutex_unlock(&conn_id->handler_mutex);
rdma_destroy_id(new_cm_id);
goto out;
}
- mutex_lock(&lock);
- ret = cma_acquire_dev(conn_id);
- mutex_unlock(&lock);
+ ret = cma_acquire_dev(conn_id, listen_id);
if (ret) {
mutex_unlock(&conn_id->handler_mutex);
rdma_destroy_id(new_cm_id);
@@ -1407,10 +1510,8 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
cm_id->context = conn_id;
cm_id->cm_handler = cma_iw_handler;
- sin = (struct sockaddr_in *) &new_cm_id->route.addr.src_addr;
- *sin = iw_event->local_addr;
- sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr;
- *sin = iw_event->remote_addr;
+ memcpy(cma_src_addr(conn_id), laddr, rdma_addr_size(laddr));
+ memcpy(cma_dst_addr(conn_id), raddr, rdma_addr_size(raddr));
ret = ib_query_device(conn_id->id.device, &attr);
if (ret) {
@@ -1423,23 +1524,29 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
event.param.conn.private_data = iw_event->private_data;
event.param.conn.private_data_len = iw_event->private_data_len;
- event.param.conn.initiator_depth = attr.max_qp_init_rd_atom;
- event.param.conn.responder_resources = attr.max_qp_rd_atom;
+ event.param.conn.initiator_depth = iw_event->ird;
+ event.param.conn.responder_resources = iw_event->ord;
+
+ /*
+ * Protect against the user destroying conn_id from another thread
+ * until we're done accessing it.
+ */
+ atomic_inc(&conn_id->refcount);
ret = conn_id->id.event_handler(&conn_id->id, &event);
if (ret) {
/* User wants to destroy the CM ID */
conn_id->cm_id.iw = NULL;
- cma_exch(conn_id, CMA_DESTROYING);
+ cma_exch(conn_id, RDMA_CM_DESTROYING);
mutex_unlock(&conn_id->handler_mutex);
+ cma_deref_id(conn_id);
rdma_destroy_id(&conn_id->id);
goto out;
}
mutex_unlock(&conn_id->handler_mutex);
+ cma_deref_id(conn_id);
out:
- if (dev)
- dev_put(dev);
mutex_unlock(&listen_id->handler_mutex);
return ret;
}
@@ -1448,17 +1555,19 @@ static int cma_ib_listen(struct rdma_id_private *id_priv)
{
struct ib_cm_compare_data compare_data;
struct sockaddr *addr;
+ struct ib_cm_id *id;
__be64 svc_id;
int ret;
- id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_req_handler,
- id_priv);
- if (IS_ERR(id_priv->cm_id.ib))
- return PTR_ERR(id_priv->cm_id.ib);
+ id = ib_create_cm_id(id_priv->id.device, cma_req_handler, id_priv);
+ if (IS_ERR(id))
+ return PTR_ERR(id);
- addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr;
- svc_id = cma_get_service_id(id_priv->id.ps, addr);
- if (cma_any_addr(addr))
+ id_priv->cm_id.ib = id;
+
+ addr = cma_src_addr(id_priv);
+ svc_id = rdma_get_service_id(&id_priv->id, addr);
+ if (cma_any_addr(addr) && !id_priv->afonly)
ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL);
else {
cma_set_compare_data(id_priv->id.ps, addr, &compare_data);
@@ -1476,16 +1585,18 @@ static int cma_ib_listen(struct rdma_id_private *id_priv)
static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
{
int ret;
- struct sockaddr_in *sin;
+ struct iw_cm_id *id;
+
+ id = iw_create_cm_id(id_priv->id.device,
+ iw_conn_req_handler,
+ id_priv);
+ if (IS_ERR(id))
+ return PTR_ERR(id);
- id_priv->cm_id.iw = iw_create_cm_id(id_priv->id.device,
- iw_conn_req_handler,
- id_priv);
- if (IS_ERR(id_priv->cm_id.iw))
- return PTR_ERR(id_priv->cm_id.iw);
+ id_priv->cm_id.iw = id;
- sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
- id_priv->cm_id.iw->local_addr = *sin;
+ memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv),
+ rdma_addr_size(cma_src_addr(id_priv)));
ret = iw_cm_listen(id_priv->cm_id.iw, backlog);
@@ -1514,20 +1625,26 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
struct rdma_cm_id *id;
int ret;
- id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps);
+ if (cma_family(id_priv) == AF_IB &&
+ rdma_node_get_transport(cma_dev->device->node_type) != RDMA_TRANSPORT_IB)
+ return;
+
+ id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps,
+ id_priv->id.qp_type);
if (IS_ERR(id))
return;
dev_id_priv = container_of(id, struct rdma_id_private, id);
- dev_id_priv->state = CMA_ADDR_BOUND;
- memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr,
- ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr));
+ dev_id_priv->state = RDMA_CM_ADDR_BOUND;
+ memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv),
+ rdma_addr_size(cma_src_addr(id_priv)));
cma_attach_to_dev(dev_id_priv, cma_dev);
list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list);
atomic_inc(&id_priv->refcount);
dev_id_priv->internal_id = 1;
+ dev_id_priv->afonly = id_priv->afonly;
ret = rdma_listen(id, id_priv->backlog);
if (ret)
@@ -1546,50 +1663,6 @@ static void cma_listen_on_all(struct rdma_id_private *id_priv)
mutex_unlock(&lock);
}
-int rdma_listen(struct rdma_cm_id *id, int backlog)
-{
- struct rdma_id_private *id_priv;
- int ret;
-
- id_priv = container_of(id, struct rdma_id_private, id);
- if (id_priv->state == CMA_IDLE) {
- ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET;
- ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr);
- if (ret)
- return ret;
- }
-
- if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN))
- return -EINVAL;
-
- id_priv->backlog = backlog;
- if (id->device) {
- switch (rdma_node_get_transport(id->device->node_type)) {
- case RDMA_TRANSPORT_IB:
- ret = cma_ib_listen(id_priv);
- if (ret)
- goto err;
- break;
- case RDMA_TRANSPORT_IWARP:
- ret = cma_iw_listen(id_priv, backlog);
- if (ret)
- goto err;
- break;
- default:
- ret = -ENOSYS;
- goto err;
- }
- } else
- cma_listen_on_all(id_priv);
-
- return 0;
-err:
- id_priv->backlog = 0;
- cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND);
- return ret;
-}
-EXPORT_SYMBOL(rdma_listen);
-
void rdma_set_service_type(struct rdma_cm_id *id, int tos)
{
struct rdma_id_private *id_priv;
@@ -1611,8 +1684,8 @@ static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec,
route->num_paths = 1;
*route->path_rec = *path_rec;
} else {
- work->old_state = CMA_ROUTE_QUERY;
- work->new_state = CMA_ADDR_RESOLVED;
+ work->old_state = RDMA_CM_ROUTE_QUERY;
+ work->new_state = RDMA_CM_ADDR_RESOLVED;
work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
work->event.status = status;
}
@@ -1623,31 +1696,39 @@ static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec,
static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
struct cma_work *work)
{
- struct rdma_addr *addr = &id_priv->id.route.addr;
+ struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
struct ib_sa_path_rec path_rec;
ib_sa_comp_mask comp_mask;
struct sockaddr_in6 *sin6;
+ struct sockaddr_ib *sib;
memset(&path_rec, 0, sizeof path_rec);
- rdma_addr_get_sgid(&addr->dev_addr, &path_rec.sgid);
- rdma_addr_get_dgid(&addr->dev_addr, &path_rec.dgid);
- path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(&addr->dev_addr));
+ rdma_addr_get_sgid(dev_addr, &path_rec.sgid);
+ rdma_addr_get_dgid(dev_addr, &path_rec.dgid);
+ path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
path_rec.numb_path = 1;
path_rec.reversible = 1;
- path_rec.service_id = cma_get_service_id(id_priv->id.ps,
- (struct sockaddr *) &addr->dst_addr);
+ path_rec.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID;
- if (addr->src_addr.ss_family == AF_INET) {
+ switch (cma_family(id_priv)) {
+ case AF_INET:
path_rec.qos_class = cpu_to_be16((u16) id_priv->tos);
comp_mask |= IB_SA_PATH_REC_QOS_CLASS;
- } else {
- sin6 = (struct sockaddr_in6 *) &addr->src_addr;
+ break;
+ case AF_INET6:
+ sin6 = (struct sockaddr_in6 *) cma_src_addr(id_priv);
path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20);
comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS;
+ break;
+ case AF_IB:
+ sib = (struct sockaddr_ib *) cma_src_addr(id_priv);
+ path_rec.traffic_class = (u8) (be32_to_cpu(sib->sib_flowinfo) >> 20);
+ comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS;
+ break;
}
id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device,
@@ -1670,7 +1751,7 @@ static void cma_work_handler(struct work_struct *_work)
goto out;
if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
- cma_exch(id_priv, CMA_DESTROYING);
+ cma_exch(id_priv, RDMA_CM_DESTROYING);
destroy = 1;
}
out:
@@ -1688,12 +1769,12 @@ static void cma_ndev_work_handler(struct work_struct *_work)
int destroy = 0;
mutex_lock(&id_priv->handler_mutex);
- if (id_priv->state == CMA_DESTROYING ||
- id_priv->state == CMA_DEVICE_REMOVAL)
+ if (id_priv->state == RDMA_CM_DESTROYING ||
+ id_priv->state == RDMA_CM_DEVICE_REMOVAL)
goto out;
if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
- cma_exch(id_priv, CMA_DESTROYING);
+ cma_exch(id_priv, RDMA_CM_DESTROYING);
destroy = 1;
}
@@ -1717,8 +1798,8 @@ static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
work->id = id_priv;
INIT_WORK(&work->work, cma_work_handler);
- work->old_state = CMA_ROUTE_QUERY;
- work->new_state = CMA_ROUTE_RESOLVED;
+ work->old_state = RDMA_CM_ROUTE_QUERY;
+ work->new_state = RDMA_CM_ROUTE_RESOLVED;
work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
@@ -1747,7 +1828,8 @@ int rdma_set_ib_paths(struct rdma_cm_id *id,
int ret;
id_priv = container_of(id, struct rdma_id_private, id);
- if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED))
+ if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
+ RDMA_CM_ROUTE_RESOLVED))
return -EINVAL;
id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths,
@@ -1760,7 +1842,7 @@ int rdma_set_ib_paths(struct rdma_cm_id *id,
id->route.num_paths = num_paths;
return 0;
err:
- cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_ADDR_RESOLVED);
+ cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED);
return ret;
}
EXPORT_SYMBOL(rdma_set_ib_paths);
@@ -1775,26 +1857,41 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
work->id = id_priv;
INIT_WORK(&work->work, cma_work_handler);
- work->old_state = CMA_ROUTE_QUERY;
- work->new_state = CMA_ROUTE_RESOLVED;
+ work->old_state = RDMA_CM_ROUTE_QUERY;
+ work->new_state = RDMA_CM_ROUTE_RESOLVED;
work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
queue_work(cma_wq, &work->work);
return 0;
}
+static int iboe_tos_to_sl(struct net_device *ndev, int tos)
+{
+ int prio;
+ struct net_device *dev;
+
+ prio = rt_tos2priority(tos);
+ dev = ndev->priv_flags & IFF_802_1Q_VLAN ?
+ vlan_dev_real_dev(ndev) : ndev;
+
+ if (dev->num_tc)
+ return netdev_get_prio_tc_map(dev, prio);
+
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
+ if (ndev->priv_flags & IFF_802_1Q_VLAN)
+ return (vlan_dev_get_egress_qos_mask(ndev, prio) &
+ VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+#endif
+ return 0;
+}
+
static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
{
struct rdma_route *route = &id_priv->id.route;
struct rdma_addr *addr = &route->addr;
struct cma_work *work;
int ret;
- struct sockaddr_in *src_addr = (struct sockaddr_in *)&route->addr.src_addr;
- struct sockaddr_in *dst_addr = (struct sockaddr_in *)&route->addr.dst_addr;
struct net_device *ndev = NULL;
- u16 vid;
- if (src_addr->sin_family != dst_addr->sin_family)
- return -EINVAL;
work = kzalloc(sizeof *work, GFP_KERNEL);
if (!work)
@@ -1818,17 +1915,20 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
goto err2;
}
- vid = rdma_vlan_dev_vlan_id(ndev);
+ route->path_rec->vlan_id = rdma_vlan_dev_vlan_id(ndev);
+ memcpy(route->path_rec->dmac, addr->dev_addr.dst_dev_addr, ETH_ALEN);
+ memcpy(route->path_rec->smac, ndev->dev_addr, ndev->addr_len);
- iboe_mac_vlan_to_ll(&route->path_rec->sgid, addr->dev_addr.src_dev_addr, vid);
- iboe_mac_vlan_to_ll(&route->path_rec->dgid, addr->dev_addr.dst_dev_addr, vid);
+ rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
+ &route->path_rec->sgid);
+ rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr,
+ &route->path_rec->dgid);
route->path_rec->hop_limit = 1;
route->path_rec->reversible = 1;
route->path_rec->pkey = cpu_to_be16(0xffff);
route->path_rec->mtu_selector = IB_SA_EQ;
- route->path_rec->sl = id_priv->tos >> 5;
-
+ route->path_rec->sl = iboe_tos_to_sl(ndev, id_priv->tos);
route->path_rec->mtu = iboe_get_mtu(ndev->mtu);
route->path_rec->rate_selector = IB_SA_EQ;
route->path_rec->rate = iboe_get_rate(ndev);
@@ -1840,8 +1940,8 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
goto err2;
}
- work->old_state = CMA_ROUTE_QUERY;
- work->new_state = CMA_ROUTE_RESOLVED;
+ work->old_state = RDMA_CM_ROUTE_QUERY;
+ work->new_state = RDMA_CM_ROUTE_RESOLVED;
work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
work->event.status = 0;
@@ -1863,7 +1963,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
int ret;
id_priv = container_of(id, struct rdma_id_private, id);
- if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_QUERY))
+ if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY))
return -EINVAL;
atomic_inc(&id_priv->refcount);
@@ -1892,34 +1992,63 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
return 0;
err:
- cma_comp_exch(id_priv, CMA_ROUTE_QUERY, CMA_ADDR_RESOLVED);
+ cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED);
cma_deref_id(id_priv);
return ret;
}
EXPORT_SYMBOL(rdma_resolve_route);
+static void cma_set_loopback(struct sockaddr *addr)
+{
+ switch (addr->sa_family) {
+ case AF_INET:
+ ((struct sockaddr_in *) addr)->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+ break;
+ case AF_INET6:
+ ipv6_addr_set(&((struct sockaddr_in6 *) addr)->sin6_addr,
+ 0, 0, 0, htonl(1));
+ break;
+ default:
+ ib_addr_set(&((struct sockaddr_ib *) addr)->sib_addr,
+ 0, 0, 0, htonl(1));
+ break;
+ }
+}
+
static int cma_bind_loopback(struct rdma_id_private *id_priv)
{
- struct cma_device *cma_dev;
+ struct cma_device *cma_dev, *cur_dev;
struct ib_port_attr port_attr;
union ib_gid gid;
u16 pkey;
int ret;
u8 p;
+ cma_dev = NULL;
mutex_lock(&lock);
- if (list_empty(&dev_list)) {
+ list_for_each_entry(cur_dev, &dev_list, list) {
+ if (cma_family(id_priv) == AF_IB &&
+ rdma_node_get_transport(cur_dev->device->node_type) != RDMA_TRANSPORT_IB)
+ continue;
+
+ if (!cma_dev)
+ cma_dev = cur_dev;
+
+ for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
+ if (!ib_query_port(cur_dev->device, p, &port_attr) &&
+ port_attr.state == IB_PORT_ACTIVE) {
+ cma_dev = cur_dev;
+ goto port_found;
+ }
+ }
+ }
+
+ if (!cma_dev) {
ret = -ENODEV;
goto out;
}
- list_for_each_entry(cma_dev, &dev_list, list)
- for (p = 1; p <= cma_dev->device->phys_port_cnt; ++p)
- if (!ib_query_port(cma_dev->device, p, &port_attr) &&
- port_attr.state == IB_PORT_ACTIVE)
- goto port_found;
p = 1;
- cma_dev = list_entry(dev_list.next, struct cma_device, list);
port_found:
ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid);
@@ -1938,6 +2067,7 @@ port_found:
ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
id_priv->id.port_num = p;
cma_attach_to_dev(id_priv, cma_dev);
+ cma_set_loopback(cma_src_addr(id_priv));
out:
mutex_unlock(&lock);
return ret;
@@ -1951,34 +2081,25 @@ static void addr_handler(int status, struct sockaddr *src_addr,
memset(&event, 0, sizeof event);
mutex_lock(&id_priv->handler_mutex);
-
- /*
- * Grab mutex to block rdma_destroy_id() from removing the device while
- * we're trying to acquire it.
- */
- mutex_lock(&lock);
- if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) {
- mutex_unlock(&lock);
+ if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY,
+ RDMA_CM_ADDR_RESOLVED))
goto out;
- }
+ memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr));
if (!status && !id_priv->cma_dev)
- status = cma_acquire_dev(id_priv);
- mutex_unlock(&lock);
+ status = cma_acquire_dev(id_priv, NULL);
if (status) {
- if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND))
+ if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
+ RDMA_CM_ADDR_BOUND))
goto out;
event.event = RDMA_CM_EVENT_ADDR_ERROR;
event.status = status;
- } else {
- memcpy(&id_priv->id.route.addr.src_addr, src_addr,
- ip_addr_size(src_addr));
+ } else
event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
- }
if (id_priv->id.event_handler(&id_priv->id, &event)) {
- cma_exch(id_priv, CMA_DESTROYING);
+ cma_exch(id_priv, RDMA_CM_DESTROYING);
mutex_unlock(&id_priv->handler_mutex);
cma_deref_id(id_priv);
rdma_destroy_id(&id_priv->id);
@@ -1992,7 +2113,6 @@ out:
static int cma_resolve_loopback(struct rdma_id_private *id_priv)
{
struct cma_work *work;
- struct sockaddr *src, *dst;
union ib_gid gid;
int ret;
@@ -2009,22 +2129,40 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
- src = (struct sockaddr *) &id_priv->id.route.addr.src_addr;
- if (cma_zero_addr(src)) {
- dst = (struct sockaddr *) &id_priv->id.route.addr.dst_addr;
- if ((src->sa_family = dst->sa_family) == AF_INET) {
- ((struct sockaddr_in *) src)->sin_addr.s_addr =
- ((struct sockaddr_in *) dst)->sin_addr.s_addr;
- } else {
- ipv6_addr_copy(&((struct sockaddr_in6 *) src)->sin6_addr,
- &((struct sockaddr_in6 *) dst)->sin6_addr);
- }
+ work->id = id_priv;
+ INIT_WORK(&work->work, cma_work_handler);
+ work->old_state = RDMA_CM_ADDR_QUERY;
+ work->new_state = RDMA_CM_ADDR_RESOLVED;
+ work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
+ queue_work(cma_wq, &work->work);
+ return 0;
+err:
+ kfree(work);
+ return ret;
+}
+
+static int cma_resolve_ib_addr(struct rdma_id_private *id_priv)
+{
+ struct cma_work *work;
+ int ret;
+
+ work = kzalloc(sizeof *work, GFP_KERNEL);
+ if (!work)
+ return -ENOMEM;
+
+ if (!id_priv->cma_dev) {
+ ret = cma_resolve_ib_dev(id_priv);
+ if (ret)
+ goto err;
}
+ rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *)
+ &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr));
+
work->id = id_priv;
INIT_WORK(&work->work, cma_work_handler);
- work->old_state = CMA_ADDR_QUERY;
- work->new_state = CMA_ADDR_RESOLVED;
+ work->old_state = RDMA_CM_ADDR_QUERY;
+ work->new_state = RDMA_CM_ADDR_RESOLVED;
work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
queue_work(cma_wq, &work->work);
return 0;
@@ -2038,9 +2176,13 @@ static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
{
if (!src_addr || !src_addr->sa_family) {
src_addr = (struct sockaddr *) &id->route.addr.src_addr;
- if ((src_addr->sa_family = dst_addr->sa_family) == AF_INET6) {
+ src_addr->sa_family = dst_addr->sa_family;
+ if (dst_addr->sa_family == AF_INET6) {
((struct sockaddr_in6 *) src_addr)->sin6_scope_id =
((struct sockaddr_in6 *) dst_addr)->sin6_scope_id;
+ } else if (dst_addr->sa_family == AF_IB) {
+ ((struct sockaddr_ib *) src_addr)->sib_pkey =
+ ((struct sockaddr_ib *) dst_addr)->sib_pkey;
}
}
return rdma_bind_addr(id, src_addr);
@@ -2053,41 +2195,107 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
int ret;
id_priv = container_of(id, struct rdma_id_private, id);
- if (id_priv->state == CMA_IDLE) {
+ if (id_priv->state == RDMA_CM_IDLE) {
ret = cma_bind_addr(id, src_addr, dst_addr);
if (ret)
return ret;
}
- if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_ADDR_QUERY))
+ if (cma_family(id_priv) != dst_addr->sa_family)
+ return -EINVAL;
+
+ if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY))
return -EINVAL;
atomic_inc(&id_priv->refcount);
- memcpy(&id->route.addr.dst_addr, dst_addr, ip_addr_size(dst_addr));
- if (cma_any_addr(dst_addr))
+ memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
+ if (cma_any_addr(dst_addr)) {
ret = cma_resolve_loopback(id_priv);
- else
- ret = rdma_resolve_ip(&addr_client, (struct sockaddr *) &id->route.addr.src_addr,
- dst_addr, &id->route.addr.dev_addr,
- timeout_ms, addr_handler, id_priv);
+ } else {
+ if (dst_addr->sa_family == AF_IB) {
+ ret = cma_resolve_ib_addr(id_priv);
+ } else {
+ ret = rdma_resolve_ip(&addr_client, cma_src_addr(id_priv),
+ dst_addr, &id->route.addr.dev_addr,
+ timeout_ms, addr_handler, id_priv);
+ }
+ }
if (ret)
goto err;
return 0;
err:
- cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_BOUND);
+ cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
cma_deref_id(id_priv);
return ret;
}
EXPORT_SYMBOL(rdma_resolve_addr);
+int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse)
+{
+ struct rdma_id_private *id_priv;
+ unsigned long flags;
+ int ret;
+
+ id_priv = container_of(id, struct rdma_id_private, id);
+ spin_lock_irqsave(&id_priv->lock, flags);
+ if (reuse || id_priv->state == RDMA_CM_IDLE) {
+ id_priv->reuseaddr = reuse;
+ ret = 0;
+ } else {
+ ret = -EINVAL;
+ }
+ spin_unlock_irqrestore(&id_priv->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(rdma_set_reuseaddr);
+
+int rdma_set_afonly(struct rdma_cm_id *id, int afonly)
+{
+ struct rdma_id_private *id_priv;
+ unsigned long flags;
+ int ret;
+
+ id_priv = container_of(id, struct rdma_id_private, id);
+ spin_lock_irqsave(&id_priv->lock, flags);
+ if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) {
+ id_priv->options |= (1 << CMA_OPTION_AFONLY);
+ id_priv->afonly = afonly;
+ ret = 0;
+ } else {
+ ret = -EINVAL;
+ }
+ spin_unlock_irqrestore(&id_priv->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(rdma_set_afonly);
+
static void cma_bind_port(struct rdma_bind_list *bind_list,
struct rdma_id_private *id_priv)
{
- struct sockaddr_in *sin;
+ struct sockaddr *addr;
+ struct sockaddr_ib *sib;
+ u64 sid, mask;
+ __be16 port;
+
+ addr = cma_src_addr(id_priv);
+ port = htons(bind_list->port);
- sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
- sin->sin_port = htons(bind_list->port);
+ switch (addr->sa_family) {
+ case AF_INET:
+ ((struct sockaddr_in *) addr)->sin_port = port;
+ break;
+ case AF_INET6:
+ ((struct sockaddr_in6 *) addr)->sin6_port = port;
+ break;
+ case AF_IB:
+ sib = (struct sockaddr_ib *) addr;
+ sid = be64_to_cpu(sib->sib_sid);
+ mask = be64_to_cpu(sib->sib_sid_mask);
+ sib->sib_sid = cpu_to_be64((sid & mask) | (u64) ntohs(port));
+ sib->sib_sid_mask = cpu_to_be64(~0ULL);
+ break;
+ }
id_priv->bind_list = bind_list;
hlist_add_head(&id_priv->node, &bind_list->owners);
}
@@ -2096,33 +2304,23 @@ static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv,
unsigned short snum)
{
struct rdma_bind_list *bind_list;
- int port, ret;
+ int ret;
bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
if (!bind_list)
return -ENOMEM;
- do {
- ret = idr_get_new_above(ps, bind_list, snum, &port);
- } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL));
-
- if (ret)
- goto err1;
-
- if (port != snum) {
- ret = -EADDRNOTAVAIL;
- goto err2;
- }
+ ret = idr_alloc(ps, bind_list, snum, snum + 1, GFP_KERNEL);
+ if (ret < 0)
+ goto err;
bind_list->ps = ps;
- bind_list->port = (unsigned short) port;
+ bind_list->port = (unsigned short)ret;
cma_bind_port(bind_list, id_priv);
return 0;
-err2:
- idr_remove(ps, port);
-err1:
+err:
kfree(bind_list);
- return ret;
+ return ret == -ENOSPC ? -EADDRNOTAVAIL : ret;
}
static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
@@ -2131,9 +2329,9 @@ static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
int low, high, remaining;
unsigned int rover;
- inet_get_local_port_range(&low, &high);
+ inet_get_local_port_range(&init_net, &low, &high);
remaining = (high - low) + 1;
- rover = net_random() % remaining + low;
+ rover = prandom_u32() % remaining + low;
retry:
if (last_used_port != rover &&
!idr_find(ps, (unsigned short) rover)) {
@@ -2156,67 +2354,135 @@ retry:
return -EADDRNOTAVAIL;
}
-static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
+/*
+ * Check that the requested port is available. This is called when trying to
+ * bind to a specific port, or when trying to listen on a bound port. In
+ * the latter case, the provided id_priv may already be on the bind_list, but
+ * we still need to check that it's okay to start listening.
+ */
+static int cma_check_port(struct rdma_bind_list *bind_list,
+ struct rdma_id_private *id_priv, uint8_t reuseaddr)
{
struct rdma_id_private *cur_id;
- struct sockaddr_in *sin, *cur_sin;
- struct rdma_bind_list *bind_list;
- struct hlist_node *node;
- unsigned short snum;
+ struct sockaddr *addr, *cur_addr;
- sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
- snum = ntohs(sin->sin_port);
- if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
- return -EACCES;
+ addr = cma_src_addr(id_priv);
+ hlist_for_each_entry(cur_id, &bind_list->owners, node) {
+ if (id_priv == cur_id)
+ continue;
- bind_list = idr_find(ps, snum);
- if (!bind_list)
- return cma_alloc_port(ps, id_priv, snum);
+ if ((cur_id->state != RDMA_CM_LISTEN) && reuseaddr &&
+ cur_id->reuseaddr)
+ continue;
- /*
- * We don't support binding to any address if anyone is bound to
- * a specific address on the same port.
- */
- if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr))
- return -EADDRNOTAVAIL;
+ cur_addr = cma_src_addr(cur_id);
+ if (id_priv->afonly && cur_id->afonly &&
+ (addr->sa_family != cur_addr->sa_family))
+ continue;
- hlist_for_each_entry(cur_id, node, &bind_list->owners, node) {
- if (cma_any_addr((struct sockaddr *) &cur_id->id.route.addr.src_addr))
+ if (cma_any_addr(addr) || cma_any_addr(cur_addr))
return -EADDRNOTAVAIL;
- cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr;
- if (sin->sin_addr.s_addr == cur_sin->sin_addr.s_addr)
+ if (!cma_addr_cmp(addr, cur_addr))
return -EADDRINUSE;
}
-
- cma_bind_port(bind_list, id_priv);
return 0;
}
-static int cma_get_port(struct rdma_id_private *id_priv)
+static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
{
- struct idr *ps;
+ struct rdma_bind_list *bind_list;
+ unsigned short snum;
int ret;
+ snum = ntohs(cma_port(cma_src_addr(id_priv)));
+ if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
+ return -EACCES;
+
+ bind_list = idr_find(ps, snum);
+ if (!bind_list) {
+ ret = cma_alloc_port(ps, id_priv, snum);
+ } else {
+ ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr);
+ if (!ret)
+ cma_bind_port(bind_list, id_priv);
+ }
+ return ret;
+}
+
+static int cma_bind_listen(struct rdma_id_private *id_priv)
+{
+ struct rdma_bind_list *bind_list = id_priv->bind_list;
+ int ret = 0;
+
+ mutex_lock(&lock);
+ if (bind_list->owners.first->next)
+ ret = cma_check_port(bind_list, id_priv, 0);
+ mutex_unlock(&lock);
+ return ret;
+}
+
+static struct idr *cma_select_inet_ps(struct rdma_id_private *id_priv)
+{
switch (id_priv->id.ps) {
- case RDMA_PS_SDP:
- ps = &sdp_ps;
- break;
case RDMA_PS_TCP:
- ps = &tcp_ps;
- break;
+ return &tcp_ps;
case RDMA_PS_UDP:
- ps = &udp_ps;
- break;
+ return &udp_ps;
case RDMA_PS_IPOIB:
- ps = &ipoib_ps;
- break;
+ return &ipoib_ps;
+ case RDMA_PS_IB:
+ return &ib_ps;
default:
- return -EPROTONOSUPPORT;
+ return NULL;
}
+}
+
+static struct idr *cma_select_ib_ps(struct rdma_id_private *id_priv)
+{
+ struct idr *ps = NULL;
+ struct sockaddr_ib *sib;
+ u64 sid_ps, mask, sid;
+
+ sib = (struct sockaddr_ib *) cma_src_addr(id_priv);
+ mask = be64_to_cpu(sib->sib_sid_mask) & RDMA_IB_IP_PS_MASK;
+ sid = be64_to_cpu(sib->sib_sid) & mask;
+
+ if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) {
+ sid_ps = RDMA_IB_IP_PS_IB;
+ ps = &ib_ps;
+ } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) &&
+ (sid == (RDMA_IB_IP_PS_TCP & mask))) {
+ sid_ps = RDMA_IB_IP_PS_TCP;
+ ps = &tcp_ps;
+ } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) &&
+ (sid == (RDMA_IB_IP_PS_UDP & mask))) {
+ sid_ps = RDMA_IB_IP_PS_UDP;
+ ps = &udp_ps;
+ }
+
+ if (ps) {
+ sib->sib_sid = cpu_to_be64(sid_ps | ntohs(cma_port((struct sockaddr *) sib)));
+ sib->sib_sid_mask = cpu_to_be64(RDMA_IB_IP_PS_MASK |
+ be64_to_cpu(sib->sib_sid_mask));
+ }
+ return ps;
+}
+
+static int cma_get_port(struct rdma_id_private *id_priv)
+{
+ struct idr *ps;
+ int ret;
+
+ if (cma_family(id_priv) != AF_IB)
+ ps = cma_select_inet_ps(id_priv);
+ else
+ ps = cma_select_ib_ps(id_priv);
+ if (!ps)
+ return -EPROTONOSUPPORT;
mutex_lock(&lock);
- if (cma_any_port((struct sockaddr *) &id_priv->id.route.addr.src_addr))
+ if (cma_any_port(cma_src_addr(id_priv)))
ret = cma_alloc_any_port(ps, id_priv);
else
ret = cma_use_port(ps, id_priv);
@@ -2228,15 +2494,18 @@ static int cma_get_port(struct rdma_id_private *id_priv)
static int cma_check_linklocal(struct rdma_dev_addr *dev_addr,
struct sockaddr *addr)
{
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct sockaddr_in6 *sin6;
if (addr->sa_family != AF_INET6)
return 0;
sin6 = (struct sockaddr_in6 *) addr;
- if ((ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) &&
- !sin6->sin6_scope_id)
+
+ if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL))
+ return 0;
+
+ if (!sin6->sin6_scope_id)
return -EINVAL;
dev_addr->bound_dev_if = sin6->sin6_scope_id;
@@ -2244,108 +2513,132 @@ static int cma_check_linklocal(struct rdma_dev_addr *dev_addr,
return 0;
}
+int rdma_listen(struct rdma_cm_id *id, int backlog)
+{
+ struct rdma_id_private *id_priv;
+ int ret;
+
+ id_priv = container_of(id, struct rdma_id_private, id);
+ if (id_priv->state == RDMA_CM_IDLE) {
+ id->route.addr.src_addr.ss_family = AF_INET;
+ ret = rdma_bind_addr(id, cma_src_addr(id_priv));
+ if (ret)
+ return ret;
+ }
+
+ if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN))
+ return -EINVAL;
+
+ if (id_priv->reuseaddr) {
+ ret = cma_bind_listen(id_priv);
+ if (ret)
+ goto err;
+ }
+
+ id_priv->backlog = backlog;
+ if (id->device) {
+ switch (rdma_node_get_transport(id->device->node_type)) {
+ case RDMA_TRANSPORT_IB:
+ ret = cma_ib_listen(id_priv);
+ if (ret)
+ goto err;
+ break;
+ case RDMA_TRANSPORT_IWARP:
+ ret = cma_iw_listen(id_priv, backlog);
+ if (ret)
+ goto err;
+ break;
+ default:
+ ret = -ENOSYS;
+ goto err;
+ }
+ } else
+ cma_listen_on_all(id_priv);
+
+ return 0;
+err:
+ id_priv->backlog = 0;
+ cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND);
+ return ret;
+}
+EXPORT_SYMBOL(rdma_listen);
+
int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
{
struct rdma_id_private *id_priv;
int ret;
- if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6)
+ if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 &&
+ addr->sa_family != AF_IB)
return -EAFNOSUPPORT;
id_priv = container_of(id, struct rdma_id_private, id);
- if (!cma_comp_exch(id_priv, CMA_IDLE, CMA_ADDR_BOUND))
+ if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND))
return -EINVAL;
ret = cma_check_linklocal(&id->route.addr.dev_addr, addr);
if (ret)
goto err1;
+ memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr));
if (!cma_any_addr(addr)) {
- ret = rdma_translate_ip(addr, &id->route.addr.dev_addr);
+ ret = cma_translate_addr(addr, &id->route.addr.dev_addr);
if (ret)
goto err1;
- mutex_lock(&lock);
- ret = cma_acquire_dev(id_priv);
- mutex_unlock(&lock);
+ ret = cma_acquire_dev(id_priv, NULL);
if (ret)
goto err1;
}
- memcpy(&id->route.addr.src_addr, addr, ip_addr_size(addr));
+ if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) {
+ if (addr->sa_family == AF_INET)
+ id_priv->afonly = 1;
+#if IS_ENABLED(CONFIG_IPV6)
+ else if (addr->sa_family == AF_INET6)
+ id_priv->afonly = init_net.ipv6.sysctl.bindv6only;
+#endif
+ }
ret = cma_get_port(id_priv);
if (ret)
goto err2;
return 0;
err2:
- if (id_priv->cma_dev) {
- mutex_lock(&lock);
- cma_detach_from_dev(id_priv);
- mutex_unlock(&lock);
- }
+ if (id_priv->cma_dev)
+ cma_release_dev(id_priv);
err1:
- cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE);
+ cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE);
return ret;
}
EXPORT_SYMBOL(rdma_bind_addr);
-static int cma_format_hdr(void *hdr, enum rdma_port_space ps,
- struct rdma_route *route)
+static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv)
{
struct cma_hdr *cma_hdr;
- struct sdp_hh *sdp_hdr;
- if (route->addr.src_addr.ss_family == AF_INET) {
+ cma_hdr = hdr;
+ cma_hdr->cma_version = CMA_VERSION;
+ if (cma_family(id_priv) == AF_INET) {
struct sockaddr_in *src4, *dst4;
- src4 = (struct sockaddr_in *) &route->addr.src_addr;
- dst4 = (struct sockaddr_in *) &route->addr.dst_addr;
-
- switch (ps) {
- case RDMA_PS_SDP:
- sdp_hdr = hdr;
- if (sdp_get_majv(sdp_hdr->sdp_version) != SDP_MAJ_VERSION)
- return -EINVAL;
- sdp_set_ip_ver(sdp_hdr, 4);
- sdp_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr;
- sdp_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr;
- sdp_hdr->port = src4->sin_port;
- break;
- default:
- cma_hdr = hdr;
- cma_hdr->cma_version = CMA_VERSION;
- cma_set_ip_ver(cma_hdr, 4);
- cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr;
- cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr;
- cma_hdr->port = src4->sin_port;
- break;
- }
- } else {
+ src4 = (struct sockaddr_in *) cma_src_addr(id_priv);
+ dst4 = (struct sockaddr_in *) cma_dst_addr(id_priv);
+
+ cma_set_ip_ver(cma_hdr, 4);
+ cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr;
+ cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr;
+ cma_hdr->port = src4->sin_port;
+ } else if (cma_family(id_priv) == AF_INET6) {
struct sockaddr_in6 *src6, *dst6;
- src6 = (struct sockaddr_in6 *) &route->addr.src_addr;
- dst6 = (struct sockaddr_in6 *) &route->addr.dst_addr;
-
- switch (ps) {
- case RDMA_PS_SDP:
- sdp_hdr = hdr;
- if (sdp_get_majv(sdp_hdr->sdp_version) != SDP_MAJ_VERSION)
- return -EINVAL;
- sdp_set_ip_ver(sdp_hdr, 6);
- sdp_hdr->src_addr.ip6 = src6->sin6_addr;
- sdp_hdr->dst_addr.ip6 = dst6->sin6_addr;
- sdp_hdr->port = src6->sin6_port;
- break;
- default:
- cma_hdr = hdr;
- cma_hdr->cma_version = CMA_VERSION;
- cma_set_ip_ver(cma_hdr, 6);
- cma_hdr->src_addr.ip6 = src6->sin6_addr;
- cma_hdr->dst_addr.ip6 = dst6->sin6_addr;
- cma_hdr->port = src6->sin6_port;
- break;
- }
+ src6 = (struct sockaddr_in6 *) cma_src_addr(id_priv);
+ dst6 = (struct sockaddr_in6 *) cma_dst_addr(id_priv);
+
+ cma_set_ip_ver(cma_hdr, 6);
+ cma_hdr->src_addr.ip6 = src6->sin6_addr;
+ cma_hdr->dst_addr.ip6 = dst6->sin6_addr;
+ cma_hdr->port = src6->sin6_port;
}
return 0;
}
@@ -2358,7 +2651,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
int ret = 0;
- if (cma_disable_callback(id_priv, CMA_CONNECT))
+ if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
return 0;
memset(&event, 0, sizeof event);
@@ -2375,15 +2668,10 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
event.status = ib_event->param.sidr_rep_rcvd.status;
break;
}
- ret = cma_set_qkey(id_priv);
+ ret = cma_set_qkey(id_priv, rep->qkey);
if (ret) {
event.event = RDMA_CM_EVENT_ADDR_ERROR;
- event.status = -EINVAL;
- break;
- }
- if (id_priv->qkey != rep->qkey) {
- event.event = RDMA_CM_EVENT_UNREACHABLE;
- event.status = -EINVAL;
+ event.status = ret;
break;
}
ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num,
@@ -2404,7 +2692,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
if (ret) {
/* Destroy the CM ID by returning a non-zero value. */
id_priv->cm_id.ib = NULL;
- cma_exch(id_priv, CMA_DESTROYING);
+ cma_exch(id_priv, RDMA_CM_DESTROYING);
mutex_unlock(&id_priv->handler_mutex);
rdma_destroy_id(&id_priv->id);
return ret;
@@ -2418,34 +2706,45 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
struct rdma_conn_param *conn_param)
{
struct ib_cm_sidr_req_param req;
- struct rdma_route *route;
- int ret;
+ struct ib_cm_id *id;
+ void *private_data;
+ int offset, ret;
- req.private_data_len = sizeof(struct cma_hdr) +
- conn_param->private_data_len;
- req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
- if (!req.private_data)
- return -ENOMEM;
+ memset(&req, 0, sizeof req);
+ offset = cma_user_data_offset(id_priv);
+ req.private_data_len = offset + conn_param->private_data_len;
+ if (req.private_data_len < conn_param->private_data_len)
+ return -EINVAL;
+
+ if (req.private_data_len) {
+ private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
+ if (!private_data)
+ return -ENOMEM;
+ } else {
+ private_data = NULL;
+ }
if (conn_param->private_data && conn_param->private_data_len)
- memcpy((void *) req.private_data + sizeof(struct cma_hdr),
- conn_param->private_data, conn_param->private_data_len);
+ memcpy(private_data + offset, conn_param->private_data,
+ conn_param->private_data_len);
- route = &id_priv->id.route;
- ret = cma_format_hdr((void *) req.private_data, id_priv->id.ps, route);
- if (ret)
- goto out;
+ if (private_data) {
+ ret = cma_format_hdr(private_data, id_priv);
+ if (ret)
+ goto out;
+ req.private_data = private_data;
+ }
- id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device,
- cma_sidr_rep_handler, id_priv);
- if (IS_ERR(id_priv->cm_id.ib)) {
- ret = PTR_ERR(id_priv->cm_id.ib);
+ id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler,
+ id_priv);
+ if (IS_ERR(id)) {
+ ret = PTR_ERR(id);
goto out;
}
+ id_priv->cm_id.ib = id;
- req.path = route->path_rec;
- req.service_id = cma_get_service_id(id_priv->id.ps,
- (struct sockaddr *) &route->addr.dst_addr);
+ req.path = id_priv->id.route.path_rec;
+ req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8);
req.max_cm_retries = CMA_MAX_CM_RETRIES;
@@ -2455,7 +2754,7 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
id_priv->cm_id.ib = NULL;
}
out:
- kfree(req.private_data);
+ kfree(private_data);
return ret;
}
@@ -2465,46 +2764,55 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
struct ib_cm_req_param req;
struct rdma_route *route;
void *private_data;
+ struct ib_cm_id *id;
int offset, ret;
memset(&req, 0, sizeof req);
- offset = cma_user_data_offset(id_priv->id.ps);
+ offset = cma_user_data_offset(id_priv);
req.private_data_len = offset + conn_param->private_data_len;
- private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
- if (!private_data)
- return -ENOMEM;
+ if (req.private_data_len < conn_param->private_data_len)
+ return -EINVAL;
+
+ if (req.private_data_len) {
+ private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
+ if (!private_data)
+ return -ENOMEM;
+ } else {
+ private_data = NULL;
+ }
if (conn_param->private_data && conn_param->private_data_len)
memcpy(private_data + offset, conn_param->private_data,
conn_param->private_data_len);
- id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_ib_handler,
- id_priv);
- if (IS_ERR(id_priv->cm_id.ib)) {
- ret = PTR_ERR(id_priv->cm_id.ib);
+ id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv);
+ if (IS_ERR(id)) {
+ ret = PTR_ERR(id);
goto out;
}
+ id_priv->cm_id.ib = id;
route = &id_priv->id.route;
- ret = cma_format_hdr(private_data, id_priv->id.ps, route);
- if (ret)
- goto out;
- req.private_data = private_data;
+ if (private_data) {
+ ret = cma_format_hdr(private_data, id_priv);
+ if (ret)
+ goto out;
+ req.private_data = private_data;
+ }
req.primary_path = &route->path_rec[0];
if (route->num_paths == 2)
req.alternate_path = &route->path_rec[1];
- req.service_id = cma_get_service_id(id_priv->id.ps,
- (struct sockaddr *) &route->addr.dst_addr);
+ req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
req.qp_num = id_priv->qp_num;
- req.qp_type = IB_QPT_RC;
+ req.qp_type = id_priv->id.qp_type;
req.starting_psn = id_priv->seq_num;
req.responder_resources = conn_param->responder_resources;
req.initiator_depth = conn_param->initiator_depth;
req.flow_control = conn_param->flow_control;
- req.retry_count = conn_param->retry_count;
- req.rnr_retry_count = conn_param->rnr_retry_count;
+ req.retry_count = min_t(u8, 7, conn_param->retry_count);
+ req.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
req.max_cm_retries = CMA_MAX_CM_RETRIES;
@@ -2512,8 +2820,8 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
ret = ib_send_cm_req(id_priv->cm_id.ib, &req);
out:
- if (ret && !IS_ERR(id_priv->cm_id.ib)) {
- ib_destroy_cm_id(id_priv->cm_id.ib);
+ if (ret && !IS_ERR(id)) {
+ ib_destroy_cm_id(id);
id_priv->cm_id.ib = NULL;
}
@@ -2525,39 +2833,37 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
struct rdma_conn_param *conn_param)
{
struct iw_cm_id *cm_id;
- struct sockaddr_in* sin;
int ret;
struct iw_cm_conn_param iw_param;
cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv);
- if (IS_ERR(cm_id)) {
- ret = PTR_ERR(cm_id);
- goto out;
- }
+ if (IS_ERR(cm_id))
+ return PTR_ERR(cm_id);
id_priv->cm_id.iw = cm_id;
- sin = (struct sockaddr_in*) &id_priv->id.route.addr.src_addr;
- cm_id->local_addr = *sin;
-
- sin = (struct sockaddr_in*) &id_priv->id.route.addr.dst_addr;
- cm_id->remote_addr = *sin;
+ memcpy(&cm_id->local_addr, cma_src_addr(id_priv),
+ rdma_addr_size(cma_src_addr(id_priv)));
+ memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv),
+ rdma_addr_size(cma_dst_addr(id_priv)));
ret = cma_modify_qp_rtr(id_priv, conn_param);
if (ret)
goto out;
- iw_param.ord = conn_param->initiator_depth;
- iw_param.ird = conn_param->responder_resources;
- iw_param.private_data = conn_param->private_data;
- iw_param.private_data_len = conn_param->private_data_len;
- if (id_priv->id.qp)
+ if (conn_param) {
+ iw_param.ord = conn_param->initiator_depth;
+ iw_param.ird = conn_param->responder_resources;
+ iw_param.private_data = conn_param->private_data;
+ iw_param.private_data_len = conn_param->private_data_len;
+ iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num;
+ } else {
+ memset(&iw_param, 0, sizeof iw_param);
iw_param.qpn = id_priv->qp_num;
- else
- iw_param.qpn = conn_param->qp_num;
+ }
ret = iw_cm_connect(cm_id, &iw_param);
out:
- if (ret && !IS_ERR(cm_id)) {
+ if (ret) {
iw_destroy_cm_id(cm_id);
id_priv->cm_id.iw = NULL;
}
@@ -2570,7 +2876,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
int ret;
id_priv = container_of(id, struct rdma_id_private, id);
- if (!cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_CONNECT))
+ if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT))
return -EINVAL;
if (!id->qp) {
@@ -2580,7 +2886,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
switch (rdma_node_get_transport(id->device->node_type)) {
case RDMA_TRANSPORT_IB:
- if (cma_is_ud_ps(id->ps))
+ if (id->qp_type == IB_QPT_UD)
ret = cma_resolve_ib_udp(id_priv, conn_param);
else
ret = cma_connect_ib(id_priv, conn_param);
@@ -2597,7 +2903,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
return 0;
err:
- cma_comp_exch(id_priv, CMA_CONNECT, CMA_ROUTE_RESOLVED);
+ cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED);
return ret;
}
EXPORT_SYMBOL(rdma_connect);
@@ -2625,7 +2931,7 @@ static int cma_accept_ib(struct rdma_id_private *id_priv,
rep.initiator_depth = conn_param->initiator_depth;
rep.failover_accepted = 0;
rep.flow_control = conn_param->flow_control;
- rep.rnr_retry_count = conn_param->rnr_retry_count;
+ rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
rep.srq = id_priv->srq ? 1 : 0;
ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
@@ -2656,7 +2962,7 @@ static int cma_accept_iw(struct rdma_id_private *id_priv,
}
static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
- enum ib_cm_sidr_status status,
+ enum ib_cm_sidr_status status, u32 qkey,
const void *private_data, int private_data_len)
{
struct ib_cm_sidr_rep_param rep;
@@ -2665,7 +2971,7 @@ static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
memset(&rep, 0, sizeof rep);
rep.status = status;
if (status == IB_SIDR_SUCCESS) {
- ret = cma_set_qkey(id_priv);
+ ret = cma_set_qkey(id_priv, qkey);
if (ret)
return ret;
rep.qp_num = id_priv->qp_num;
@@ -2683,7 +2989,10 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
int ret;
id_priv = container_of(id, struct rdma_id_private, id);
- if (!cma_comp(id_priv, CMA_CONNECT))
+
+ id_priv->owner = task_pid_nr(current);
+
+ if (!cma_comp(id_priv, RDMA_CM_CONNECT))
return -EINVAL;
if (!id->qp && conn_param) {
@@ -2693,14 +3002,21 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
switch (rdma_node_get_transport(id->device->node_type)) {
case RDMA_TRANSPORT_IB:
- if (cma_is_ud_ps(id->ps))
- ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
- conn_param->private_data,
- conn_param->private_data_len);
- else if (conn_param)
- ret = cma_accept_ib(id_priv, conn_param);
- else
- ret = cma_rep_recv(id_priv);
+ if (id->qp_type == IB_QPT_UD) {
+ if (conn_param)
+ ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
+ conn_param->qkey,
+ conn_param->private_data,
+ conn_param->private_data_len);
+ else
+ ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
+ 0, NULL, 0);
+ } else {
+ if (conn_param)
+ ret = cma_accept_ib(id_priv, conn_param);
+ else
+ ret = cma_rep_recv(id_priv);
+ }
break;
case RDMA_TRANSPORT_IWARP:
ret = cma_accept_iw(id_priv, conn_param);
@@ -2727,7 +3043,7 @@ int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
int ret;
id_priv = container_of(id, struct rdma_id_private, id);
- if (!cma_has_cm_dev(id_priv))
+ if (!id_priv->cm_id.ib)
return -EINVAL;
switch (id->device->node_type) {
@@ -2749,13 +3065,13 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
int ret;
id_priv = container_of(id, struct rdma_id_private, id);
- if (!cma_has_cm_dev(id_priv))
+ if (!id_priv->cm_id.ib)
return -EINVAL;
switch (rdma_node_get_transport(id->device->node_type)) {
case RDMA_TRANSPORT_IB:
- if (cma_is_ud_ps(id->ps))
- ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT,
+ if (id->qp_type == IB_QPT_UD)
+ ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0,
private_data, private_data_len);
else
ret = ib_send_cm_rej(id_priv->cm_id.ib,
@@ -2780,7 +3096,7 @@ int rdma_disconnect(struct rdma_cm_id *id)
int ret;
id_priv = container_of(id, struct rdma_id_private, id);
- if (!cma_has_cm_dev(id_priv))
+ if (!id_priv->cm_id.ib)
return -EINVAL;
switch (rdma_node_get_transport(id->device->node_type)) {
@@ -2812,14 +3128,16 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
int ret;
id_priv = mc->id_priv;
- if (cma_disable_callback(id_priv, CMA_ADDR_BOUND) &&
- cma_disable_callback(id_priv, CMA_ADDR_RESOLVED))
+ if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) &&
+ cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED))
return 0;
+ if (!status)
+ status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
mutex_lock(&id_priv->qp_mutex);
if (!status && id_priv->id.qp)
status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
- multicast->rec.mlid);
+ be16_to_cpu(multicast->rec.mlid));
mutex_unlock(&id_priv->qp_mutex);
memset(&event, 0, sizeof event);
@@ -2837,7 +3155,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
ret = id_priv->id.event_handler(&id_priv->id, &event);
if (ret) {
- cma_exch(id_priv, CMA_DESTROYING);
+ cma_exch(id_priv, RDMA_CM_DESTROYING);
mutex_unlock(&id_priv->handler_mutex);
rdma_destroy_id(&id_priv->id);
return 0;
@@ -2862,6 +3180,8 @@ static void cma_set_mgid(struct rdma_id_private *id_priv,
0xFF10A01B)) {
/* IPv6 address is an SA assigned MGID. */
memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
+ } else if (addr->sa_family == AF_IB) {
+ memcpy(mgid, &((struct sockaddr_ib *) addr)->sib_addr, sizeof *mgid);
} else if ((addr->sa_family == AF_INET6)) {
ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map);
if (id_priv->id.ps == RDMA_PS_UDP)
@@ -2889,9 +3209,12 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
if (ret)
return ret;
+ ret = cma_set_qkey(id_priv, 0);
+ if (ret)
+ return ret;
+
cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid);
- if (id_priv->id.ps == RDMA_PS_UDP)
- rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
+ rec.qkey = cpu_to_be32(id_priv->qkey);
rdma_addr_get_sgid(dev_addr, &rec.port_gid);
rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
rec.join_state = 1;
@@ -2904,16 +3227,16 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
if (id_priv->id.ps == RDMA_PS_IPOIB)
comp_mask |= IB_SA_MCMEMBER_REC_RATE |
- IB_SA_MCMEMBER_REC_RATE_SELECTOR;
+ IB_SA_MCMEMBER_REC_RATE_SELECTOR |
+ IB_SA_MCMEMBER_REC_MTU_SELECTOR |
+ IB_SA_MCMEMBER_REC_MTU |
+ IB_SA_MCMEMBER_REC_HOP_LIMIT;
mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device,
id_priv->id.port_num, &rec,
comp_mask, GFP_KERNEL,
cma_ib_mc_handler, mc);
- if (IS_ERR(mc->multicast.ib))
- return PTR_ERR(mc->multicast.ib);
-
- return 0;
+ return PTR_ERR_OR_ZERO(mc->multicast.ib);
}
static void iboe_mcast_work_handler(struct work_struct *work)
@@ -2996,7 +3319,8 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
err = -EINVAL;
goto out2;
}
- iboe_addr_get_sgid(dev_addr, &mc->multicast.ib->rec.port_gid);
+ rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
+ &mc->multicast.ib->rec.port_gid);
work->id = id_priv;
work->mc = mc;
INIT_WORK(&work->work, iboe_mcast_work_handler);
@@ -3020,15 +3344,15 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
int ret;
id_priv = container_of(id, struct rdma_id_private, id);
- if (!cma_comp(id_priv, CMA_ADDR_BOUND) &&
- !cma_comp(id_priv, CMA_ADDR_RESOLVED))
+ if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) &&
+ !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED))
return -EINVAL;
mc = kmalloc(sizeof *mc, GFP_KERNEL);
if (!mc)
return -ENOMEM;
- memcpy(&mc->addr, addr, ip_addr_size(addr));
+ memcpy(&mc->addr, addr, rdma_addr_size(addr));
mc->context = context;
mc->id_priv = id_priv;
@@ -3073,14 +3397,14 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
id_priv = container_of(id, struct rdma_id_private, id);
spin_lock_irq(&id_priv->lock);
list_for_each_entry(mc, &id_priv->mc_list, list) {
- if (!memcmp(&mc->addr, addr, ip_addr_size(addr))) {
+ if (!memcmp(&mc->addr, addr, rdma_addr_size(addr))) {
list_del(&mc->list);
spin_unlock_irq(&id_priv->lock);
if (id->qp)
ib_detach_mcast(id->qp,
&mc->multicast.ib->rec.mgid,
- mc->multicast.ib->rec.mlid);
+ be16_to_cpu(mc->multicast.ib->rec.mlid));
if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) == RDMA_TRANSPORT_IB) {
switch (rdma_port_get_link_layer(id->device, id->port_num)) {
case IB_LINK_LAYER_INFINIBAND:
@@ -3127,9 +3451,9 @@ static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id
}
static int cma_netdev_callback(struct notifier_block *self, unsigned long event,
- void *ctx)
+ void *ptr)
{
- struct net_device *ndev = (struct net_device *)ctx;
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
struct cma_device *cma_dev;
struct rdma_id_private *id_priv;
int ret = NOTIFY_DONE;
@@ -3186,19 +3510,19 @@ static void cma_add_one(struct ib_device *device)
static int cma_remove_id_dev(struct rdma_id_private *id_priv)
{
struct rdma_cm_event event;
- enum cma_state state;
+ enum rdma_cm_state state;
int ret = 0;
/* Record that we want to remove the device */
- state = cma_exch(id_priv, CMA_DEVICE_REMOVAL);
- if (state == CMA_DESTROYING)
+ state = cma_exch(id_priv, RDMA_CM_DEVICE_REMOVAL);
+ if (state == RDMA_CM_DESTROYING)
return 0;
cma_cancel_operation(id_priv, state);
mutex_lock(&id_priv->handler_mutex);
/* Check for destruction from another callback. */
- if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL))
+ if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL))
goto out;
memset(&event, 0, sizeof event);
@@ -3253,6 +3577,85 @@ static void cma_remove_one(struct ib_device *device)
kfree(cma_dev);
}
+static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct nlmsghdr *nlh;
+ struct rdma_cm_id_stats *id_stats;
+ struct rdma_id_private *id_priv;
+ struct rdma_cm_id *id = NULL;
+ struct cma_device *cma_dev;
+ int i_dev = 0, i_id = 0;
+
+ /*
+ * We export all of the IDs as a sequence of messages. Each
+ * ID gets its own netlink message.
+ */
+ mutex_lock(&lock);
+
+ list_for_each_entry(cma_dev, &dev_list, list) {
+ if (i_dev < cb->args[0]) {
+ i_dev++;
+ continue;
+ }
+
+ i_id = 0;
+ list_for_each_entry(id_priv, &cma_dev->id_list, list) {
+ if (i_id < cb->args[1]) {
+ i_id++;
+ continue;
+ }
+
+ id_stats = ibnl_put_msg(skb, &nlh, cb->nlh->nlmsg_seq,
+ sizeof *id_stats, RDMA_NL_RDMA_CM,
+ RDMA_NL_RDMA_CM_ID_STATS,
+ NLM_F_MULTI);
+ if (!id_stats)
+ goto out;
+
+ memset(id_stats, 0, sizeof *id_stats);
+ id = &id_priv->id;
+ id_stats->node_type = id->route.addr.dev_addr.dev_type;
+ id_stats->port_num = id->port_num;
+ id_stats->bound_dev_if =
+ id->route.addr.dev_addr.bound_dev_if;
+
+ if (ibnl_put_attr(skb, nlh,
+ rdma_addr_size(cma_src_addr(id_priv)),
+ cma_src_addr(id_priv),
+ RDMA_NL_RDMA_CM_ATTR_SRC_ADDR))
+ goto out;
+ if (ibnl_put_attr(skb, nlh,
+ rdma_addr_size(cma_src_addr(id_priv)),
+ cma_dst_addr(id_priv),
+ RDMA_NL_RDMA_CM_ATTR_DST_ADDR))
+ goto out;
+
+ id_stats->pid = id_priv->owner;
+ id_stats->port_space = id->ps;
+ id_stats->cm_state = id_priv->state;
+ id_stats->qp_num = id_priv->qp_num;
+ id_stats->qp_type = id->qp_type;
+
+ i_id++;
+ }
+
+ cb->args[1] = 0;
+ i_dev++;
+ }
+
+out:
+ mutex_unlock(&lock);
+ cb->args[0] = i_dev;
+ cb->args[1] = i_id;
+
+ return skb->len;
+}
+
+static const struct ibnl_client_cbs cma_cb_table[] = {
+ [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats,
+ .module = THIS_MODULE },
+};
+
static int __init cma_init(void)
{
int ret;
@@ -3268,6 +3671,10 @@ static int __init cma_init(void)
ret = ib_register_client(&cma_client);
if (ret)
goto err;
+
+ if (ibnl_add_client(RDMA_NL_RDMA_CM, RDMA_NL_RDMA_CM_NUM_OPS, cma_cb_table))
+ printk(KERN_WARNING "RDMA CMA: failed to add netlink callback\n");
+
return 0;
err:
@@ -3280,15 +3687,16 @@ err:
static void __exit cma_cleanup(void)
{
+ ibnl_remove_client(RDMA_NL_RDMA_CM);
ib_unregister_client(&cma_client);
unregister_netdevice_notifier(&cma_nb);
rdma_addr_unregister_client(&addr_client);
ib_sa_unregister_client(&sa_client);
destroy_workqueue(cma_wq);
- idr_destroy(&sdp_ps);
idr_destroy(&tcp_ps);
idr_destroy(&udp_ps);
idr_destroy(&ipoib_ps);
+ idr_destroy(&ib_ps);
}
module_init(cma_init);