diff options
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/core/addr.c | 30 | ||||
-rw-r--r-- | drivers/infiniband/core/cache.c | 3 | ||||
-rw-r--r-- | drivers/infiniband/core/cm.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/core/sa_query.c | 3 | ||||
-rw-r--r-- | drivers/infiniband/core/uverbs.h | 2 | ||||
-rw-r--r-- | drivers/infiniband/core/uverbs_main.c | 8 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_allocator.c | 30 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_main.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_provider.c | 11 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_provider.h | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_qp.c | 54 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/Kconfig | 3 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iscsi_iser.c | 22 | ||||
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.c | 19 |
14 files changed, 120 insertions, 79 deletions
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index d294bbc42f0..1205e802782 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -35,6 +35,7 @@ #include <net/arp.h> #include <net/neighbour.h> #include <net/route.h> +#include <net/netevent.h> #include <rdma/ib_addr.h> MODULE_AUTHOR("Sean Hefty"); @@ -326,25 +327,22 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr) } EXPORT_SYMBOL(rdma_addr_cancel); -static int addr_arp_recv(struct sk_buff *skb, struct net_device *dev, - struct packet_type *pkt, struct net_device *orig_dev) +static int netevent_callback(struct notifier_block *self, unsigned long event, + void *ctx) { - struct arphdr *arp_hdr; + if (event == NETEVENT_NEIGH_UPDATE) { + struct neighbour *neigh = ctx; - arp_hdr = (struct arphdr *) skb->nh.raw; - - if (arp_hdr->ar_op == htons(ARPOP_REQUEST) || - arp_hdr->ar_op == htons(ARPOP_REPLY)) - set_timeout(jiffies); - - kfree_skb(skb); + if (neigh->dev->type == ARPHRD_INFINIBAND && + (neigh->nud_state & NUD_VALID)) { + set_timeout(jiffies); + } + } return 0; } -static struct packet_type addr_arp = { - .type = __constant_htons(ETH_P_ARP), - .func = addr_arp_recv, - .af_packet_priv = (void*) 1, +static struct notifier_block nb = { + .notifier_call = netevent_callback }; static int addr_init(void) @@ -353,13 +351,13 @@ static int addr_init(void) if (!addr_wq) return -ENOMEM; - dev_add_pack(&addr_arp); + register_netevent_notifier(&nb); return 0; } static void addr_cleanup(void) { - dev_remove_pack(&addr_arp); + unregister_netevent_notifier(&nb); destroy_workqueue(addr_wq); } diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index e05ca2cdc73..75313ade2e0 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -301,7 +301,8 @@ static void ib_cache_event(struct ib_event_handler *handler, event->event == IB_EVENT_PORT_ACTIVE || event->event == IB_EVENT_LID_CHANGE || event->event == IB_EVENT_PKEY_CHANGE || - event->event == IB_EVENT_SM_CHANGE) { + event->event == IB_EVENT_SM_CHANGE || + event->event == IB_EVENT_CLIENT_REREGISTER) { work = kmalloc(sizeof *work, GFP_ATOMIC); if (work) { INIT_WORK(&work->work, ib_cache_task, work); diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index f85c97f7500..0de335b7bfc 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -975,8 +975,10 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> id.local_id); - if (IS_ERR(cm_id_priv->timewait_info)) + if (IS_ERR(cm_id_priv->timewait_info)) { + ret = PTR_ERR(cm_id_priv->timewait_info); goto out; + } ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av); if (ret) diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index aeda484ffd8..d6b84226bba 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -405,7 +405,8 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event event->event == IB_EVENT_PORT_ACTIVE || event->event == IB_EVENT_LID_CHANGE || event->event == IB_EVENT_PKEY_CHANGE || - event->event == IB_EVENT_SM_CHANGE) { + event->event == IB_EVENT_SM_CHANGE || + event->event == IB_EVENT_CLIENT_REREGISTER) { struct ib_sa_device *sa_dev; sa_dev = container_of(handler, typeof(*sa_dev), event_handler); diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index bb9bee56a82..102a59c033f 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h @@ -42,6 +42,7 @@ #include <linux/kref.h> #include <linux/idr.h> #include <linux/mutex.h> +#include <linux/completion.h> #include <rdma/ib_verbs.h> #include <rdma/ib_user_verbs.h> @@ -69,6 +70,7 @@ struct ib_uverbs_device { struct kref ref; + struct completion comp; int devnum; struct cdev *dev; struct class_device *class_dev; diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index e725cccc7cd..4e16314e8e6 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -122,7 +122,7 @@ static void ib_uverbs_release_dev(struct kref *ref) struct ib_uverbs_device *dev = container_of(ref, struct ib_uverbs_device, ref); - kfree(dev); + complete(&dev->comp); } void ib_uverbs_release_ucq(struct ib_uverbs_file *file, @@ -740,6 +740,7 @@ static void ib_uverbs_add_one(struct ib_device *device) return; kref_init(&uverbs_dev->ref); + init_completion(&uverbs_dev->comp); spin_lock(&map_lock); uverbs_dev->devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES); @@ -793,6 +794,8 @@ err_cdev: err: kref_put(&uverbs_dev->ref, ib_uverbs_release_dev); + wait_for_completion(&uverbs_dev->comp); + kfree(uverbs_dev); return; } @@ -812,7 +815,10 @@ static void ib_uverbs_remove_one(struct ib_device *device) spin_unlock(&map_lock); clear_bit(uverbs_dev->devnum, dev_map); + kref_put(&uverbs_dev->ref, ib_uverbs_release_dev); + wait_for_completion(&uverbs_dev->comp); + kfree(uverbs_dev); } static int uverbs_event_get_sb(struct file_system_type *fs_type, int flags, diff --git a/drivers/infiniband/hw/mthca/mthca_allocator.c b/drivers/infiniband/hw/mthca/mthca_allocator.c index 9ba3211cef7..f930e55b58f 100644 --- a/drivers/infiniband/hw/mthca/mthca_allocator.c +++ b/drivers/infiniband/hw/mthca/mthca_allocator.c @@ -41,9 +41,11 @@ /* Trivial bitmap-based allocator */ u32 mthca_alloc(struct mthca_alloc *alloc) { + unsigned long flags; u32 obj; - spin_lock(&alloc->lock); + spin_lock_irqsave(&alloc->lock, flags); + obj = find_next_zero_bit(alloc->table, alloc->max, alloc->last); if (obj >= alloc->max) { alloc->top = (alloc->top + alloc->max) & alloc->mask; @@ -56,19 +58,24 @@ u32 mthca_alloc(struct mthca_alloc *alloc) } else obj = -1; - spin_unlock(&alloc->lock); + spin_unlock_irqrestore(&alloc->lock, flags); return obj; } void mthca_free(struct mthca_alloc *alloc, u32 obj) { + unsigned long flags; + obj &= alloc->max - 1; - spin_lock(&alloc->lock); + + spin_lock_irqsave(&alloc->lock, flags); + clear_bit(obj, alloc->table); alloc->last = min(alloc->last, obj); alloc->top = (alloc->top + alloc->max) & alloc->mask; - spin_unlock(&alloc->lock); + + spin_unlock_irqrestore(&alloc->lock, flags); } int mthca_alloc_init(struct mthca_alloc *alloc, u32 num, u32 mask, @@ -108,14 +115,15 @@ void mthca_alloc_cleanup(struct mthca_alloc *alloc) * serialize access to the array. */ +#define MTHCA_ARRAY_MASK (PAGE_SIZE / sizeof (void *) - 1) + void *mthca_array_get(struct mthca_array *array, int index) { int p = (index * sizeof (void *)) >> PAGE_SHIFT; - if (array->page_list[p].page) { - int i = index & (PAGE_SIZE / sizeof (void *) - 1); - return array->page_list[p].page[i]; - } else + if (array->page_list[p].page) + return array->page_list[p].page[index & MTHCA_ARRAY_MASK]; + else return NULL; } @@ -130,8 +138,7 @@ int mthca_array_set(struct mthca_array *array, int index, void *value) if (!array->page_list[p].page) return -ENOMEM; - array->page_list[p].page[index & (PAGE_SIZE / sizeof (void *) - 1)] = - value; + array->page_list[p].page[index & MTHCA_ARRAY_MASK] = value; ++array->page_list[p].used; return 0; @@ -144,7 +151,8 @@ void mthca_array_clear(struct mthca_array *array, int index) if (--array->page_list[p].used == 0) { free_page((unsigned long) array->page_list[p].page); array->page_list[p].page = NULL; - } + } else + array->page_list[p].page[index & MTHCA_ARRAY_MASK] = NULL; if (array->page_list[p].used < 0) pr_debug("Array %p index %d page %d with ref count %d < 0\n", diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c index 557cde3a456..7b82c1907f0 100644 --- a/drivers/infiniband/hw/mthca/mthca_main.c +++ b/drivers/infiniband/hw/mthca/mthca_main.c @@ -967,12 +967,12 @@ static struct { } mthca_hca_table[] = { [TAVOR] = { .latest_fw = MTHCA_FW_VER(3, 4, 0), .flags = 0 }, - [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 7, 400), + [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 7, 600), .flags = MTHCA_FLAG_PCIE }, - [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 1, 0), + [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 1, 400), .flags = MTHCA_FLAG_MEMFREE | MTHCA_FLAG_PCIE }, - [SINAI] = { .latest_fw = MTHCA_FW_VER(1, 0, 800), + [SINAI] = { .latest_fw = MTHCA_FW_VER(1, 1, 0), .flags = MTHCA_FLAG_MEMFREE | MTHCA_FLAG_PCIE | MTHCA_FLAG_SINAI_OPT } diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 230ae21db8f..265b1d1c4a6 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -1287,11 +1287,7 @@ int mthca_register_device(struct mthca_dev *dev) (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | - (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | - (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | - (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | - (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | - (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ); + (1ull << IB_USER_VERBS_CMD_DETACH_MCAST); dev->ib_dev.node_type = IB_NODE_CA; dev->ib_dev.phys_port_cnt = dev->limits.num_ports; dev->ib_dev.dma_device = &dev->pdev->dev; @@ -1316,6 +1312,11 @@ int mthca_register_device(struct mthca_dev *dev) dev->ib_dev.modify_srq = mthca_modify_srq; dev->ib_dev.query_srq = mthca_query_srq; dev->ib_dev.destroy_srq = mthca_destroy_srq; + dev->ib_dev.uverbs_cmd_mask |= + (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | + (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | + (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | + (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ); if (mthca_is_memfree(dev)) dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv; diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h index 8de2887ba15..9a5bece3fa5 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.h +++ b/drivers/infiniband/hw/mthca/mthca_provider.h @@ -136,8 +136,8 @@ struct mthca_ah { * We have one global lock that protects dev->cq/qp_table. Each * struct mthca_cq/qp also has its own lock. An individual qp lock * may be taken inside of an individual cq lock. Both cqs attached to - * a qp may be locked, with the send cq locked first. No other - * nesting should be done. + * a qp may be locked, with the cq with the lower cqn locked first. + * No other nesting should be done. * * Each struct mthca_cq/qp also has an ref count, protected by the * corresponding table lock. The pointer from the cq/qp_table to the diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index cd8b6721ac9..2e8f6f36e0a 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c @@ -99,6 +99,10 @@ enum { MTHCA_QP_BIT_RSC = 1 << 3 }; +enum { + MTHCA_SEND_DOORBELL_FENCE = 1 << 5 +}; + struct mthca_qp_path { __be32 port_pkey; u8 rnr_retry; @@ -1259,6 +1263,32 @@ int mthca_alloc_qp(struct mthca_dev *dev, return 0; } +static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) +{ + if (send_cq == recv_cq) + spin_lock_irq(&send_cq->lock); + else if (send_cq->cqn < recv_cq->cqn) { + spin_lock_irq(&send_cq->lock); + spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); + } else { + spin_lock_irq(&recv_cq->lock); + spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); + } +} + +static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) +{ + if (send_cq == recv_cq) + spin_unlock_irq(&send_cq->lock); + else if (send_cq->cqn < recv_cq->cqn) { + spin_unlock(&recv_cq->lock); + spin_unlock_irq(&send_cq->lock); + } else { + spin_unlock(&send_cq->lock); + spin_unlock_irq(&recv_cq->lock); + } +} + int mthca_alloc_sqp(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_cq *send_cq, @@ -1311,17 +1341,13 @@ int mthca_alloc_sqp(struct mthca_dev *dev, * Lock CQs here, so that CQ polling code can do QP lookup * without taking a lock. */ - spin_lock_irq(&send_cq->lock); - if (send_cq != recv_cq) - spin_lock(&recv_cq->lock); + mthca_lock_cqs(send_cq, recv_cq); spin_lock(&dev->qp_table.lock); mthca_array_clear(&dev->qp_table.qp, mqpn); spin_unlock(&dev->qp_table.lock); - if (send_cq != recv_cq) - spin_unlock(&recv_cq->lock); - spin_unlock_irq(&send_cq->lock); + mthca_unlock_cqs(send_cq, recv_cq); err_out: dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size, @@ -1355,9 +1381,7 @@ void mthca_free_qp(struct mthca_dev *dev, * Lock CQs here, so that CQ polling code can do QP lookup * without taking a lock. */ - spin_lock_irq(&send_cq->lock); - if (send_cq != recv_cq) - spin_lock(&recv_cq->lock); + mthca_lock_cqs(send_cq, recv_cq); spin_lock(&dev->qp_table.lock); mthca_array_clear(&dev->qp_table.qp, @@ -1365,9 +1389,7 @@ void mthca_free_qp(struct mthca_dev *dev, --qp->refcount; spin_unlock(&dev->qp_table.lock); - if (send_cq != recv_cq) - spin_unlock(&recv_cq->lock); - spin_unlock_irq(&send_cq->lock); + mthca_unlock_cqs(send_cq, recv_cq); wait_event(qp->wait, !get_qp_refcount(dev, qp)); @@ -1502,7 +1524,7 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, int i; int size; int size0 = 0; - u32 f0 = 0; + u32 f0; int ind; u8 op0 = 0; @@ -1686,6 +1708,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, if (!size0) { size0 = size; op0 = mthca_opcode[wr->opcode]; + f0 = wr->send_flags & IB_SEND_FENCE ? + MTHCA_SEND_DOORBELL_FENCE : 0; } ++ind; @@ -1843,7 +1867,7 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, int i; int size; int size0 = 0; - u32 f0 = 0; + u32 f0; int ind; u8 op0 = 0; @@ -2051,6 +2075,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, if (!size0) { size0 = size; op0 = mthca_opcode[wr->opcode]; + f0 = wr->send_flags & IB_SEND_FENCE ? + MTHCA_SEND_DOORBELL_FENCE : 0; } ++ind; diff --git a/drivers/infiniband/ulp/ipoib/Kconfig b/drivers/infiniband/ulp/ipoib/Kconfig index 13d6d01c72c..d74653d7de1 100644 --- a/drivers/infiniband/ulp/ipoib/Kconfig +++ b/drivers/infiniband/ulp/ipoib/Kconfig @@ -6,8 +6,7 @@ config INFINIBAND_IPOIB transports IP packets over InfiniBand so you can use your IB device as a fancy NIC. - The IPoIB protocol is defined by the IETF ipoib working - group: <http://www.ietf.org/html.charters/ipoib-charter.html>. + See Documentation/infiniband/ipoib.txt for more information config INFINIBAND_IPOIB_DEBUG bool "IP-over-InfiniBand debugging" if EMBEDDED diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 34b0da5cfa0..1437d7ee3b1 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -378,21 +378,6 @@ iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn) return iser_conn_set_full_featured_mode(conn); } -static void -iscsi_iser_conn_terminate(struct iscsi_conn *conn) -{ - struct iscsi_iser_conn *iser_conn = conn->dd_data; - struct iser_conn *ib_conn = iser_conn->ib_conn; - - BUG_ON(!ib_conn); - /* starts conn teardown process, waits until all previously * - * posted buffers get flushed, deallocates all conn resources */ - iser_conn_terminate(ib_conn); - iser_conn->ib_conn = NULL; - conn->recv_lock = NULL; -} - - static struct iscsi_transport iscsi_iser_transport; static struct iscsi_cls_session * @@ -555,13 +540,13 @@ iscsi_iser_ep_poll(__u64 ep_handle, int timeout_ms) static void iscsi_iser_ep_disconnect(__u64 ep_handle) { - struct iser_conn *ib_conn = iscsi_iser_ib_conn_lookup(ep_handle); + struct iser_conn *ib_conn; + ib_conn = iscsi_iser_ib_conn_lookup(ep_handle); if (!ib_conn) return; iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state); - iser_conn_terminate(ib_conn); } @@ -614,9 +599,6 @@ static struct iscsi_transport iscsi_iser_transport = { .get_session_param = iscsi_session_get_param, .start_conn = iscsi_iser_conn_start, .stop_conn = iscsi_conn_stop, - /* these are called as part of conn recovery */ - .suspend_conn_recv = NULL, /* FIXME is/how this relvant to iser? */ - .terminate_conn = iscsi_iser_conn_terminate, /* IO */ .send_pdu = iscsi_conn_send_pdu, .get_stats = iscsi_iser_conn_get_stats, diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 8f472e7113b..8257d5a2c8f 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -77,6 +77,14 @@ MODULE_PARM_DESC(topspin_workarounds, static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad }; +static int mellanox_workarounds = 1; + +module_param(mellanox_workarounds, int, 0444); +MODULE_PARM_DESC(mellanox_workarounds, + "Enable workarounds for Mellanox SRP target bugs if != 0"); + +static const u8 mellanox_oui[3] = { 0x00, 0x02, 0xc9 }; + static void srp_add_one(struct ib_device *device); static void srp_remove_one(struct ib_device *device); static void srp_completion(struct ib_cq *cq, void *target_ptr); @@ -526,8 +534,10 @@ static int srp_reconnect_target(struct srp_target_port *target) while (ib_poll_cq(target->cq, 1, &wc) > 0) ; /* nothing */ + spin_lock_irq(target->scsi_host->host_lock); list_for_each_entry_safe(req, tmp, &target->req_queue, list) srp_reset_req(target, req); + spin_unlock_irq(target->scsi_host->host_lock); target->rx_head = 0; target->tx_head = 0; @@ -567,7 +577,7 @@ err: return ret; } -static int srp_map_fmr(struct srp_device *dev, struct scatterlist *scat, +static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat, int sg_cnt, struct srp_request *req, struct srp_direct_buf *buf) { @@ -577,10 +587,15 @@ static int srp_map_fmr(struct srp_device *dev, struct scatterlist *scat, int page_cnt; int i, j; int ret; + struct srp_device *dev = target->srp_host->dev; if (!dev->fmr_pool) return -ENODEV; + if ((sg_dma_address(&scat[0]) & ~dev->fmr_page_mask) && + mellanox_workarounds && !memcmp(&target->ioc_guid, mellanox_oui, 3)) + return -EINVAL; + len = page_cnt = 0; for (i = 0; i < sg_cnt; ++i) { if (sg_dma_address(&scat[i]) & ~dev->fmr_page_mask) { @@ -683,7 +698,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, buf->va = cpu_to_be64(sg_dma_address(scat)); buf->key = cpu_to_be32(target->srp_host->dev->mr->rkey); buf->len = cpu_to_be32(sg_dma_len(scat)); - } else if (srp_map_fmr(target->srp_host->dev, scat, count, req, + } else if (srp_map_fmr(target, scat, count, req, (void *) cmd->add_data)) { /* * FMR mapping failed, and the scatterlist has more |