aboutsummaryrefslogtreecommitdiff
path: root/drivers/infiniband/ulp/iser
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/ulp/iser')
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c352
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h188
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c347
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c546
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c682
5 files changed, 1621 insertions, 494 deletions
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 7b2fc98e2f2..eb7973957a6 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -5,6 +5,7 @@
* Copyright (C) 2004 Alex Aizman
* Copyright (C) 2005 Mike Christie
* Copyright (c) 2005, 2006 Voltaire, Inc. All rights reserved.
+ * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
* maintained by openib-general@openib.org
*
* This software is available to you under a choice of one of two
@@ -57,6 +58,7 @@
#include <linux/scatterlist.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <net/sock.h>
@@ -80,15 +82,24 @@ static unsigned int iscsi_max_lun = 512;
module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
int iser_debug_level = 0;
+bool iser_pi_enable = false;
+int iser_pi_guard = 0;
-MODULE_DESCRIPTION("iSER (iSCSI Extensions for RDMA) Datamover "
- "v" DRV_VER " (" DRV_DATE ")");
+MODULE_DESCRIPTION("iSER (iSCSI Extensions for RDMA) Datamover");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Alex Nezhinsky, Dan Bar Dov, Or Gerlitz");
+MODULE_VERSION(DRV_VER);
module_param_named(debug_level, iser_debug_level, int, 0644);
MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:disabled)");
+module_param_named(pi_enable, iser_pi_enable, bool, 0644);
+MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)");
+
+module_param_named(pi_guard, iser_pi_guard, int, 0644);
+MODULE_PARM_DESC(pi_guard, "T10-PI guard_type, 0:CRC|1:IP_CSUM (default:CRC)");
+
+static struct workqueue_struct *release_wq;
struct iser_global ig;
void
@@ -101,13 +112,17 @@ iscsi_iser_recv(struct iscsi_conn *conn,
/* verify PDU length */
datalen = ntoh24(hdr->dlength);
- if (datalen != rx_data_len) {
- printk(KERN_ERR "iscsi_iser: datalen %d (hdr) != %d (IB) \n",
- datalen, rx_data_len);
+ if (datalen > rx_data_len || (datalen + 4) < rx_data_len) {
+ iser_err("wrong datalen %d (hdr), %d (IB)\n",
+ datalen, rx_data_len);
rc = ISCSI_ERR_DATALEN;
goto error;
}
+ if (datalen != rx_data_len)
+ iser_dbg("aligned datalen (%d) hdr, %d (IB)\n",
+ datalen, rx_data_len);
+
/* read AHS */
ahslen = hdr->hlength * 4;
@@ -132,8 +147,8 @@ static int iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
int iser_initialize_task_headers(struct iscsi_task *task,
struct iser_tx_desc *tx_desc)
{
- struct iscsi_iser_conn *iser_conn = task->conn->dd_data;
- struct iser_device *device = iser_conn->ib_conn->device;
+ struct iser_conn *ib_conn = task->conn->dd_data;
+ struct iser_device *device = ib_conn->device;
struct iscsi_iser_task *iser_task = task->dd_data;
u64 dma_addr;
@@ -147,8 +162,7 @@ int iser_initialize_task_headers(struct iscsi_task *task,
tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
tx_desc->tx_sg[0].lkey = device->mr->lkey;
- iser_task->headers_initialized = 1;
- iser_task->iser_conn = iser_conn;
+ iser_task->ib_conn = ib_conn;
return 0;
}
/**
@@ -162,8 +176,7 @@ iscsi_iser_task_init(struct iscsi_task *task)
{
struct iscsi_iser_task *iser_task = task->dd_data;
- if (!iser_task->headers_initialized)
- if (iser_initialize_task_headers(task, &iser_task->desc))
+ if (iser_initialize_task_headers(task, &iser_task->desc))
return -ENOMEM;
/* mgmt task */
@@ -172,6 +185,8 @@ iscsi_iser_task_init(struct iscsi_task *task)
iser_task->command_sent = 0;
iser_task_rdma_init(iser_task);
+ iser_task->sc = task->sc;
+
return 0;
}
@@ -274,6 +289,12 @@ iscsi_iser_task_xmit(struct iscsi_task *task)
static void iscsi_iser_cleanup_task(struct iscsi_task *task)
{
struct iscsi_iser_task *iser_task = task->dd_data;
+ struct iser_tx_desc *tx_desc = &iser_task->desc;
+ struct iser_conn *ib_conn = task->conn->dd_data;
+ struct iser_device *device = ib_conn->device;
+
+ ib_dma_unmap_single(device->ib_device,
+ tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
/* mgmt tasks do not need special cleanup */
if (!task->sc)
@@ -285,14 +306,25 @@ static void iscsi_iser_cleanup_task(struct iscsi_task *task)
}
}
+static u8 iscsi_iser_check_protection(struct iscsi_task *task, sector_t *sector)
+{
+ struct iscsi_iser_task *iser_task = task->dd_data;
+
+ if (iser_task->dir[ISER_DIR_IN])
+ return iser_check_task_pi_status(iser_task, ISER_DIR_IN,
+ sector);
+ else
+ return iser_check_task_pi_status(iser_task, ISER_DIR_OUT,
+ sector);
+}
+
static struct iscsi_cls_conn *
iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
{
struct iscsi_conn *conn;
struct iscsi_cls_conn *cls_conn;
- struct iscsi_iser_conn *iser_conn;
- cls_conn = iscsi_conn_setup(cls_session, sizeof(*iser_conn), conn_idx);
+ cls_conn = iscsi_conn_setup(cls_session, 0, conn_idx);
if (!cls_conn)
return NULL;
conn = cls_conn->dd_data;
@@ -303,39 +335,16 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
*/
conn->max_recv_dlength = ISER_RECV_DATA_SEG_LEN;
- iser_conn = conn->dd_data;
- conn->dd_data = iser_conn;
- iser_conn->iscsi_conn = conn;
-
return cls_conn;
}
-static void
-iscsi_iser_conn_destroy(struct iscsi_cls_conn *cls_conn)
-{
- struct iscsi_conn *conn = cls_conn->dd_data;
- struct iscsi_iser_conn *iser_conn = conn->dd_data;
- struct iser_conn *ib_conn = iser_conn->ib_conn;
-
- iscsi_conn_teardown(cls_conn);
- /*
- * Userspace will normally call the stop callback and
- * already have freed the ib_conn, but if it goofed up then
- * we free it here.
- */
- if (ib_conn) {
- ib_conn->iser_conn = NULL;
- iser_conn_put(ib_conn, 1); /* deref iscsi/ib conn unbinding */
- }
-}
-
static int
iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
int is_leading)
{
struct iscsi_conn *conn = cls_conn->dd_data;
- struct iscsi_iser_conn *iser_conn;
+ struct iscsi_session *session;
struct iser_conn *ib_conn;
struct iscsi_endpoint *ep;
int error;
@@ -354,51 +363,51 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
}
ib_conn = ep->dd_data;
+ session = conn->session;
+ if (iser_alloc_rx_descriptors(ib_conn, session))
+ return -ENOMEM;
+
/* binds the iSER connection retrieved from the previously
* connected ep_handle to the iSCSI layer connection. exchanges
* connection pointers */
- iser_err("binding iscsi/iser conn %p %p to ib_conn %p\n",
- conn, conn->dd_data, ib_conn);
- iser_conn = conn->dd_data;
- ib_conn->iser_conn = iser_conn;
- iser_conn->ib_conn = ib_conn;
- iser_conn_get(ib_conn); /* ref iscsi/ib conn binding */
+ iser_info("binding iscsi conn %p to ib_conn %p\n", conn, ib_conn);
+
+ conn->dd_data = ib_conn;
+ ib_conn->iscsi_conn = conn;
+
return 0;
}
+static int
+iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
+{
+ struct iscsi_conn *iscsi_conn;
+ struct iser_conn *ib_conn;
+
+ iscsi_conn = cls_conn->dd_data;
+ ib_conn = iscsi_conn->dd_data;
+ reinit_completion(&ib_conn->stop_completion);
+
+ return iscsi_conn_start(cls_conn);
+}
+
static void
iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
{
struct iscsi_conn *conn = cls_conn->dd_data;
- struct iscsi_iser_conn *iser_conn = conn->dd_data;
- struct iser_conn *ib_conn = iser_conn->ib_conn;
+ struct iser_conn *ib_conn = conn->dd_data;
+
+ iser_dbg("stopping iscsi_conn: %p, ib_conn: %p\n", conn, ib_conn);
+ iscsi_conn_stop(cls_conn, flag);
/*
* Userspace may have goofed up and not bound the connection or
* might have only partially setup the connection.
*/
if (ib_conn) {
- iscsi_conn_stop(cls_conn, flag);
- /*
- * There is no unbind event so the stop callback
- * must release the ref from the bind.
- */
- iser_conn_put(ib_conn, 1); /* deref iscsi/ib conn unbinding */
+ conn->dd_data = NULL;
+ complete(&ib_conn->stop_completion);
}
- iser_conn->ib_conn = NULL;
-}
-
-static int
-iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
-{
- struct iscsi_conn *conn = cls_conn->dd_data;
- int err;
-
- err = iser_conn_set_full_featured_mode(conn);
- if (err)
- return err;
-
- return iscsi_conn_start(cls_conn);
}
static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
@@ -410,6 +419,17 @@ static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
iscsi_host_free(shost);
}
+static inline unsigned int
+iser_dif_prot_caps(int prot_caps)
+{
+ return ((prot_caps & IB_PROT_T10DIF_TYPE_1) ? SHOST_DIF_TYPE1_PROTECTION |
+ SHOST_DIX_TYPE1_PROTECTION : 0) |
+ ((prot_caps & IB_PROT_T10DIF_TYPE_2) ? SHOST_DIF_TYPE2_PROTECTION |
+ SHOST_DIX_TYPE2_PROTECTION : 0) |
+ ((prot_caps & IB_PROT_T10DIF_TYPE_3) ? SHOST_DIF_TYPE3_PROTECTION |
+ SHOST_DIX_TYPE3_PROTECTION : 0);
+}
+
static struct iscsi_cls_session *
iscsi_iser_session_create(struct iscsi_endpoint *ep,
uint16_t cmds_max, uint16_t qdepth,
@@ -418,12 +438,13 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
struct iscsi_cls_session *cls_session;
struct iscsi_session *session;
struct Scsi_Host *shost;
- struct iser_conn *ib_conn;
+ struct iser_conn *ib_conn = NULL;
shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0);
if (!shost)
return NULL;
shost->transportt = iscsi_iser_scsi_transport;
+ shost->cmd_per_lun = qdepth;
shost->max_lun = iscsi_max_lun;
shost->max_id = 0;
shost->max_channel = 0;
@@ -433,19 +454,31 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
* older userspace tools (before 2.0-870) did not pass us
* the leading conn's ep so this will be NULL;
*/
- if (ep)
+ if (ep) {
ib_conn = ep->dd_data;
+ if (ib_conn->pi_support) {
+ u32 sig_caps = ib_conn->device->dev_attr.sig_prot_cap;
+
+ scsi_host_set_prot(shost, iser_dif_prot_caps(sig_caps));
+ if (iser_pi_guard)
+ scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP);
+ else
+ scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
+ }
+ }
if (iscsi_host_add(shost,
ep ? ib_conn->device->ib_device->dma_device : NULL))
goto free_host;
- /*
- * we do not support setting can_queue cmd_per_lun from userspace yet
- * because we preallocate so many resources
- */
+ if (cmds_max > ISER_DEF_XMIT_CMDS_MAX) {
+ iser_info("cmds_max changed from %u to %u\n",
+ cmds_max, ISER_DEF_XMIT_CMDS_MAX);
+ cmds_max = ISER_DEF_XMIT_CMDS_MAX;
+ }
+
cls_session = iscsi_session_setup(&iscsi_iser_transport, shost,
- ISCSI_DEF_XMIT_CMDS_MAX, 0,
+ cmds_max, 0,
sizeof(struct iscsi_iser_task),
initial_cmdsn, 0);
if (!cls_session)
@@ -475,28 +508,28 @@ iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn,
case ISCSI_PARAM_HDRDGST_EN:
sscanf(buf, "%d", &value);
if (value) {
- printk(KERN_ERR "DataDigest wasn't negotiated to None");
+ iser_err("DataDigest wasn't negotiated to None\n");
return -EPROTO;
}
break;
case ISCSI_PARAM_DATADGST_EN:
sscanf(buf, "%d", &value);
if (value) {
- printk(KERN_ERR "DataDigest wasn't negotiated to None");
+ iser_err("DataDigest wasn't negotiated to None\n");
return -EPROTO;
}
break;
case ISCSI_PARAM_IFMARKER_EN:
sscanf(buf, "%d", &value);
if (value) {
- printk(KERN_ERR "IFMarker wasn't negotiated to No");
+ iser_err("IFMarker wasn't negotiated to No\n");
return -EPROTO;
}
break;
case ISCSI_PARAM_OFMARKER_EN:
sscanf(buf, "%d", &value);
if (value) {
- printk(KERN_ERR "OFMarker wasn't negotiated to No");
+ iser_err("OFMarker wasn't negotiated to No\n");
return -EPROTO;
}
break;
@@ -532,6 +565,29 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s
stats->custom[3].value = conn->fmr_unalign_cnt;
}
+static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep,
+ enum iscsi_param param, char *buf)
+{
+ struct iser_conn *ib_conn = ep->dd_data;
+ int len;
+
+ switch (param) {
+ case ISCSI_PARAM_CONN_PORT:
+ case ISCSI_PARAM_CONN_ADDRESS:
+ if (!ib_conn || !ib_conn->cma_id)
+ return -ENOTCONN;
+
+ return iscsi_conn_get_addr_param((struct sockaddr_storage *)
+ &ib_conn->cma_id->route.addr.dst_addr,
+ param, buf);
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ return len;
+}
+
static struct iscsi_endpoint *
iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
int non_blocking)
@@ -550,10 +606,9 @@ iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr,
non_blocking);
- if (err) {
- iscsi_destroy_endpoint(ep);
+ if (err)
return ERR_PTR(err);
- }
+
return ep;
}
@@ -574,7 +629,7 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
ib_conn->state == ISER_CONN_DOWN))
rc = -1;
- iser_err("ib conn %p rc = %d\n", ib_conn, rc);
+ iser_info("ib conn %p rc = %d\n", ib_conn, rc);
if (rc > 0)
return 1; /* success, this is the equivalent of POLLOUT */
@@ -590,24 +645,79 @@ iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
struct iser_conn *ib_conn;
ib_conn = ep->dd_data;
- if (ib_conn->iser_conn)
- /*
- * Must suspend xmit path if the ep is bound to the
- * iscsi_conn, so we know we are not accessing the ib_conn
- * when we free it.
- *
- * This may not be bound if the ep poll failed.
- */
- iscsi_suspend_tx(ib_conn->iser_conn->iscsi_conn);
-
-
- iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state);
+ iser_info("ep %p ib conn %p state %d\n", ep, ib_conn, ib_conn->state);
iser_conn_terminate(ib_conn);
+
+ /*
+ * if iser_conn and iscsi_conn are bound, we must wait iscsi_conn_stop
+ * call and ISER_CONN_DOWN state before freeing the iser resources.
+ * otherwise we are safe to free resources immediately.
+ */
+ if (ib_conn->iscsi_conn) {
+ INIT_WORK(&ib_conn->release_work, iser_release_work);
+ queue_work(release_wq, &ib_conn->release_work);
+ } else {
+ iser_conn_release(ib_conn);
+ }
+}
+
+static umode_t iser_attr_is_visible(int param_type, int param)
+{
+ switch (param_type) {
+ case ISCSI_HOST_PARAM:
+ switch (param) {
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+ case ISCSI_HOST_PARAM_HWADDRESS:
+ case ISCSI_HOST_PARAM_INITIATOR_NAME:
+ return S_IRUGO;
+ default:
+ return 0;
+ }
+ case ISCSI_PARAM:
+ switch (param) {
+ case ISCSI_PARAM_MAX_RECV_DLENGTH:
+ case ISCSI_PARAM_MAX_XMIT_DLENGTH:
+ case ISCSI_PARAM_HDRDGST_EN:
+ case ISCSI_PARAM_DATADGST_EN:
+ case ISCSI_PARAM_CONN_ADDRESS:
+ case ISCSI_PARAM_CONN_PORT:
+ case ISCSI_PARAM_EXP_STATSN:
+ case ISCSI_PARAM_PERSISTENT_ADDRESS:
+ case ISCSI_PARAM_PERSISTENT_PORT:
+ case ISCSI_PARAM_PING_TMO:
+ case ISCSI_PARAM_RECV_TMO:
+ case ISCSI_PARAM_INITIAL_R2T_EN:
+ case ISCSI_PARAM_MAX_R2T:
+ case ISCSI_PARAM_IMM_DATA_EN:
+ case ISCSI_PARAM_FIRST_BURST:
+ case ISCSI_PARAM_MAX_BURST:
+ case ISCSI_PARAM_PDU_INORDER_EN:
+ case ISCSI_PARAM_DATASEQ_INORDER_EN:
+ case ISCSI_PARAM_TARGET_NAME:
+ case ISCSI_PARAM_TPGT:
+ case ISCSI_PARAM_USERNAME:
+ case ISCSI_PARAM_PASSWORD:
+ case ISCSI_PARAM_USERNAME_IN:
+ case ISCSI_PARAM_PASSWORD_IN:
+ case ISCSI_PARAM_FAST_ABORT:
+ case ISCSI_PARAM_ABORT_TMO:
+ case ISCSI_PARAM_LU_RESET_TMO:
+ case ISCSI_PARAM_TGT_RESET_TMO:
+ case ISCSI_PARAM_IFACE_NAME:
+ case ISCSI_PARAM_INITIATOR_NAME:
+ case ISCSI_PARAM_DISCOVERY_SESS:
+ return S_IRUGO;
+ default:
+ return 0;
+ }
+ }
+
+ return 0;
}
static struct scsi_host_template iscsi_iser_sht = {
.module = THIS_MODULE,
- .name = "iSCSI Initiator over iSER, v." DRV_VER,
+ .name = "iSCSI Initiator over iSER",
.queuecommand = iscsi_queuecommand,
.change_queue_depth = iscsi_change_queue_depth,
.sg_tablesize = ISCSI_ISER_SG_TABLESIZE,
@@ -625,40 +735,18 @@ static struct scsi_host_template iscsi_iser_sht = {
static struct iscsi_transport iscsi_iser_transport = {
.owner = THIS_MODULE,
.name = "iser",
- .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T,
- .param_mask = ISCSI_MAX_RECV_DLENGTH |
- ISCSI_MAX_XMIT_DLENGTH |
- ISCSI_HDRDGST_EN |
- ISCSI_DATADGST_EN |
- ISCSI_INITIAL_R2T_EN |
- ISCSI_MAX_R2T |
- ISCSI_IMM_DATA_EN |
- ISCSI_FIRST_BURST |
- ISCSI_MAX_BURST |
- ISCSI_PDU_INORDER_EN |
- ISCSI_DATASEQ_INORDER_EN |
- ISCSI_EXP_STATSN |
- ISCSI_PERSISTENT_PORT |
- ISCSI_PERSISTENT_ADDRESS |
- ISCSI_TARGET_NAME | ISCSI_TPGT |
- ISCSI_USERNAME | ISCSI_PASSWORD |
- ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
- ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
- ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO |
- ISCSI_PING_TMO | ISCSI_RECV_TMO |
- ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
- .host_param_mask = ISCSI_HOST_HWADDRESS |
- ISCSI_HOST_NETDEV_NAME |
- ISCSI_HOST_INITIATOR_NAME,
+ .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_TEXT_NEGO,
/* session management */
.create_session = iscsi_iser_session_create,
.destroy_session = iscsi_iser_session_destroy,
/* connection management */
.create_conn = iscsi_iser_conn_create,
.bind_conn = iscsi_iser_conn_bind,
- .destroy_conn = iscsi_iser_conn_destroy,
+ .destroy_conn = iscsi_conn_teardown,
+ .attr_is_visible = iser_attr_is_visible,
.set_param = iscsi_iser_set_param,
.get_conn_param = iscsi_conn_get_param,
+ .get_ep_param = iscsi_iser_get_ep_param,
.get_session_param = iscsi_session_get_param,
.start_conn = iscsi_iser_conn_start,
.stop_conn = iscsi_iser_conn_stop,
@@ -672,6 +760,7 @@ static struct iscsi_transport iscsi_iser_transport = {
.xmit_task = iscsi_iser_task_xmit,
.cleanup_task = iscsi_iser_cleanup_task,
.alloc_pdu = iscsi_iser_pdu_alloc,
+ .check_protection = iscsi_iser_check_protection,
/* recovery */
.session_recovery_timedout = iscsi_session_recovery_timedout,
@@ -687,7 +776,7 @@ static int __init iser_init(void)
iser_dbg("Starting iSER datamover...\n");
if (iscsi_max_lun < 1) {
- printk(KERN_ERR "Invalid max_lun value of %u\n", iscsi_max_lun);
+ iser_err("Invalid max_lun value of %u\n", iscsi_max_lun);
return -EINVAL;
}
@@ -706,6 +795,12 @@ static int __init iser_init(void)
mutex_init(&ig.connlist_mutex);
INIT_LIST_HEAD(&ig.connlist);
+ release_wq = alloc_workqueue("release workqueue", 0, 0);
+ if (!release_wq) {
+ iser_err("failed to allocate release workqueue\n");
+ return -ENOMEM;
+ }
+
iscsi_iser_scsi_transport = iscsi_register_transport(
&iscsi_iser_transport);
if (!iscsi_iser_scsi_transport) {
@@ -724,7 +819,24 @@ register_transport_failure:
static void __exit iser_exit(void)
{
+ struct iser_conn *ib_conn, *n;
+ int connlist_empty;
+
iser_dbg("Removing iSER datamover...\n");
+ destroy_workqueue(release_wq);
+
+ mutex_lock(&ig.connlist_mutex);
+ connlist_empty = list_empty(&ig.connlist);
+ mutex_unlock(&ig.connlist_mutex);
+
+ if (!connlist_empty) {
+ iser_err("Error cleanup stage completed but we still have iser "
+ "connections, destroying them anyway.\n");
+ list_for_each_entry_safe(ib_conn, n, &ig.connlist, conn_list) {
+ iser_conn_release(ib_conn);
+ }
+ }
+
iscsi_unregister_transport(&iscsi_iser_transport);
kmem_cache_destroy(ig.desc_cache);
}
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index f1df01567bb..97cd385bf7f 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -8,6 +8,7 @@
*
* Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
* Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
+ * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -42,9 +43,13 @@
#include <linux/types.h>
#include <linux/net.h>
+#include <linux/printk.h>
#include <scsi/libiscsi.h>
#include <scsi/scsi_transport_iscsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <linux/interrupt.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/list.h>
@@ -64,12 +69,11 @@
#define DRV_NAME "iser"
#define PFX DRV_NAME ": "
-#define DRV_VER "0.1"
-#define DRV_DATE "May 7th, 2006"
+#define DRV_VER "1.4"
#define iser_dbg(fmt, arg...) \
do { \
- if (iser_debug_level > 1) \
+ if (iser_debug_level > 2) \
printk(KERN_DEBUG PFX "%s:" fmt,\
__func__ , ## arg); \
} while (0)
@@ -77,7 +81,14 @@
#define iser_warn(fmt, arg...) \
do { \
if (iser_debug_level > 0) \
- printk(KERN_DEBUG PFX "%s:" fmt,\
+ pr_warn(PFX "%s:" fmt, \
+ __func__ , ## arg); \
+ } while (0)
+
+#define iser_info(fmt, arg...) \
+ do { \
+ if (iser_debug_level > 1) \
+ pr_info(PFX "%s:" fmt, \
__func__ , ## arg); \
} while (0)
@@ -88,12 +99,18 @@
} while (0)
#define SHIFT_4K 12
-#define SIZE_4K (1UL << SHIFT_4K)
+#define SIZE_4K (1ULL << SHIFT_4K)
#define MASK_4K (~(SIZE_4K-1))
- /* support upto 512KB in one RDMA */
+ /* support up to 512KB in one RDMA */
#define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K)
-#define ISER_DEF_CMD_PER_LUN 128
+#define ISER_DEF_XMIT_CMDS_DEFAULT 512
+#if ISCSI_DEF_XMIT_CMDS_MAX > ISER_DEF_XMIT_CMDS_DEFAULT
+ #define ISER_DEF_XMIT_CMDS_MAX ISCSI_DEF_XMIT_CMDS_MAX
+#else
+ #define ISER_DEF_XMIT_CMDS_MAX ISER_DEF_XMIT_CMDS_DEFAULT
+#endif
+#define ISER_DEF_CMD_PER_LUN ISER_DEF_XMIT_CMDS_MAX
/* QP settings */
/* Maximal bounds on received asynchronous PDUs */
@@ -102,9 +119,9 @@
#define ISER_MAX_TX_MISC_PDUS 6 /* NOOP_OUT(2), TEXT(1), *
* SCSI_TMFUNC(2), LOGOUT(1) */
-#define ISER_QP_MAX_RECV_DTOS (ISCSI_DEF_XMIT_CMDS_MAX)
+#define ISER_QP_MAX_RECV_DTOS (ISER_DEF_XMIT_CMDS_MAX)
-#define ISER_MIN_POSTED_RX (ISCSI_DEF_XMIT_CMDS_MAX >> 2)
+#define ISER_MIN_POSTED_RX (ISER_DEF_XMIT_CMDS_MAX >> 2)
/* the max TX (send) WR supported by the iSER QP is defined by *
* max_send_wr = T * (1 + D) + C ; D is how many inflight dataouts we expect *
@@ -114,15 +131,26 @@
#define ISER_INFLIGHT_DATAOUTS 8
-#define ISER_QP_MAX_REQ_DTOS (ISCSI_DEF_XMIT_CMDS_MAX * \
+#define ISER_QP_MAX_REQ_DTOS (ISER_DEF_XMIT_CMDS_MAX * \
(1 + ISER_INFLIGHT_DATAOUTS) + \
ISER_MAX_TX_MISC_PDUS + \
ISER_MAX_RX_MISC_PDUS)
+/* Max registration work requests per command */
+#define ISER_MAX_REG_WR_PER_CMD 5
+
+/* For Signature we don't support DATAOUTs so no need to make room for them */
+#define ISER_QP_SIG_MAX_REQ_DTOS (ISER_DEF_XMIT_CMDS_MAX * \
+ (1 + ISER_MAX_REG_WR_PER_CMD) + \
+ ISER_MAX_TX_MISC_PDUS + \
+ ISER_MAX_RX_MISC_PDUS)
+
#define ISER_VER 0x10
#define ISER_WSV 0x08
#define ISER_RSV 0x04
+#define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL
+
struct iser_hdr {
u8 flags;
u8 rsvd[3];
@@ -132,6 +160,15 @@ struct iser_hdr {
__be64 read_va;
} __attribute__((packed));
+
+#define ISER_ZBVA_NOT_SUPPORTED 0x80
+#define ISER_SEND_W_INV_NOT_SUPPORTED 0x40
+
+struct iser_cm_hdr {
+ u8 flags;
+ u8 rsvd[3];
+} __packed;
+
/* Constant PDU lengths calculations */
#define ISER_HEADERS_LEN (sizeof(struct iser_hdr) + sizeof(struct iscsi_hdr))
@@ -176,7 +213,7 @@ struct iser_data_buf {
/* fwd declarations */
struct iser_device;
-struct iscsi_iser_conn;
+struct iser_cq_desc;
struct iscsi_iser_task;
struct iscsi_endpoint;
@@ -186,7 +223,7 @@ struct iser_mem_reg {
u64 va;
u64 len;
void *mem_h;
- int is_fmr;
+ int is_mr;
};
struct iser_regd_buf {
@@ -225,20 +262,63 @@ struct iser_rx_desc {
char pad[ISER_RX_PAD_SIZE];
} __attribute__((packed));
+#define ISER_MAX_CQ 4
+
+struct iser_conn;
+struct iscsi_iser_task;
+
struct iser_device {
struct ib_device *ib_device;
struct ib_pd *pd;
- struct ib_cq *rx_cq;
- struct ib_cq *tx_cq;
+ struct ib_device_attr dev_attr;
+ struct ib_cq *rx_cq[ISER_MAX_CQ];
+ struct ib_cq *tx_cq[ISER_MAX_CQ];
struct ib_mr *mr;
- struct tasklet_struct cq_tasklet;
+ struct tasklet_struct cq_tasklet[ISER_MAX_CQ];
struct ib_event_handler event_handler;
struct list_head ig_list; /* entry in ig devices list */
int refcount;
+ int cq_active_qps[ISER_MAX_CQ];
+ int cqs_used;
+ struct iser_cq_desc *cq_desc;
+ int (*iser_alloc_rdma_reg_res)(struct iser_conn *ib_conn,
+ unsigned cmds_max);
+ void (*iser_free_rdma_reg_res)(struct iser_conn *ib_conn);
+ int (*iser_reg_rdma_mem)(struct iscsi_iser_task *iser_task,
+ enum iser_data_dir cmd_dir);
+ void (*iser_unreg_rdma_mem)(struct iscsi_iser_task *iser_task,
+ enum iser_data_dir cmd_dir);
+};
+
+#define ISER_CHECK_GUARD 0xc0
+#define ISER_CHECK_REFTAG 0x0f
+#define ISER_CHECK_APPTAG 0x30
+
+enum iser_reg_indicator {
+ ISER_DATA_KEY_VALID = 1 << 0,
+ ISER_PROT_KEY_VALID = 1 << 1,
+ ISER_SIG_KEY_VALID = 1 << 2,
+ ISER_FASTREG_PROTECTED = 1 << 3,
+};
+
+struct iser_pi_context {
+ struct ib_mr *prot_mr;
+ struct ib_fast_reg_page_list *prot_frpl;
+ struct ib_mr *sig_mr;
+};
+
+struct fast_reg_descriptor {
+ struct list_head list;
+ /* For fast registration - FRWR */
+ struct ib_mr *data_mr;
+ struct ib_fast_reg_page_list *data_frpl;
+ struct iser_pi_context *pi_ctx;
+ /* registration indicators container */
+ u8 reg_indicators;
};
struct iser_conn {
- struct iscsi_iser_conn *iser_conn; /* iser conn for upcalls */
+ struct iscsi_conn *iscsi_conn;
struct iscsi_endpoint *ep;
enum iser_ib_conn_state state; /* rdma connection state */
atomic_t refcount;
@@ -246,37 +326,51 @@ struct iser_conn {
struct iser_device *device; /* device context */
struct rdma_cm_id *cma_id; /* CMA ID */
struct ib_qp *qp; /* QP */
- struct ib_fmr_pool *fmr_pool; /* pool of IB FMRs */
wait_queue_head_t wait; /* waitq for conn/disconn */
+ unsigned qp_max_recv_dtos; /* num of rx buffers */
+ unsigned qp_max_recv_dtos_mask; /* above minus 1 */
+ unsigned min_posted_rx; /* qp_max_recv_dtos >> 2 */
int post_recv_buf_count; /* posted rx count */
atomic_t post_send_buf_count; /* posted tx count */
char name[ISER_OBJECT_NAME_SIZE];
- struct iser_page_vec *page_vec; /* represents SG to fmr maps*
- * maps serialized as tx is*/
+ struct work_struct release_work;
+ struct completion stop_completion;
struct list_head conn_list; /* entry in ig conn list */
char *login_buf;
- u64 login_dma;
+ char *login_req_buf, *login_resp_buf;
+ u64 login_req_dma, login_resp_dma;
unsigned int rx_desc_head;
struct iser_rx_desc *rx_descs;
struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX];
-};
-
-struct iscsi_iser_conn {
- struct iscsi_conn *iscsi_conn;/* ptr to iscsi conn */
- struct iser_conn *ib_conn; /* iSER IB conn */
+ bool pi_support;
+
+ /* Connection memory registration pool */
+ union {
+ struct {
+ struct ib_fmr_pool *pool; /* pool of IB FMRs */
+ struct iser_page_vec *page_vec; /* represents SG to fmr maps*
+ * maps serialized as tx is*/
+ } fmr;
+ struct {
+ struct list_head pool;
+ int pool_size;
+ } fastreg;
+ };
};
struct iscsi_iser_task {
struct iser_tx_desc desc;
- struct iscsi_iser_conn *iser_conn;
+ struct iser_conn *ib_conn;
enum iser_task_status status;
+ struct scsi_cmnd *sc;
int command_sent; /* set if command sent */
int dir[ISER_DIRS_NUM]; /* set if dir use*/
struct iser_regd_buf rdma_regd[ISER_DIRS_NUM];/* regd rdma buf */
struct iser_data_buf data[ISER_DIRS_NUM]; /* orig. data des*/
struct iser_data_buf data_copy[ISER_DIRS_NUM];/* contig. copy */
- int headers_initialized;
+ struct iser_data_buf prot[ISER_DIRS_NUM]; /* prot desc */
+ struct iser_data_buf prot_copy[ISER_DIRS_NUM];/* prot copy */
};
struct iser_page_vec {
@@ -286,6 +380,11 @@ struct iser_page_vec {
int data_size;
};
+struct iser_cq_desc {
+ struct iser_device *device;
+ int cq_index;
+};
+
struct iser_global {
struct mutex device_list_mutex;/* */
struct list_head device_list; /* all iSER devices */
@@ -297,6 +396,8 @@ struct iser_global {
extern struct iser_global ig;
extern int iser_debug_level;
+extern bool iser_pi_enable;
+extern int iser_pi_guard;
/* allocate connection resources needed for rdma functionality */
int iser_conn_set_full_featured_mode(struct iscsi_conn *conn);
@@ -318,12 +419,12 @@ void iscsi_iser_recv(struct iscsi_conn *conn,
void iser_conn_init(struct iser_conn *ib_conn);
-void iser_conn_get(struct iser_conn *ib_conn);
-
-int iser_conn_put(struct iser_conn *ib_conn, int destroy_cma_id_allowed);
+void iser_conn_release(struct iser_conn *ib_conn);
void iser_conn_terminate(struct iser_conn *ib_conn);
+void iser_release_work(struct work_struct *work);
+
void iser_rcv_completion(struct iser_rx_desc *desc,
unsigned long dto_xfer_len,
struct iser_conn *ib_conn);
@@ -336,11 +437,15 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *task);
void iser_free_rx_descriptors(struct iser_conn *ib_conn);
-void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *task,
- enum iser_data_dir cmd_dir);
+void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
+ struct iser_data_buf *mem,
+ struct iser_data_buf *mem_copy,
+ enum iser_data_dir cmd_dir);
-int iser_reg_rdma_mem(struct iscsi_iser_task *task,
- enum iser_data_dir cmd_dir);
+int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *task,
+ enum iser_data_dir cmd_dir);
+int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *task,
+ enum iser_data_dir cmd_dir);
int iser_connect(struct iser_conn *ib_conn,
struct sockaddr_in *src_addr,
@@ -351,7 +456,10 @@ int iser_reg_page_vec(struct iser_conn *ib_conn,
struct iser_page_vec *page_vec,
struct iser_mem_reg *mem_reg);
-void iser_unreg_mem(struct iser_mem_reg *mem_reg);
+void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
+ enum iser_data_dir cmd_dir);
+void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
+ enum iser_data_dir cmd_dir);
int iser_post_recvl(struct iser_conn *ib_conn);
int iser_post_recvm(struct iser_conn *ib_conn, int count);
@@ -362,7 +470,15 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
enum iser_data_dir iser_dir,
enum dma_data_direction dma_dir);
-void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task);
+void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
+ struct iser_data_buf *data);
int iser_initialize_task_headers(struct iscsi_task *task,
struct iser_tx_desc *tx_desc);
+int iser_alloc_rx_descriptors(struct iser_conn *ib_conn, struct iscsi_session *session);
+int iser_create_fmr_pool(struct iser_conn *ib_conn, unsigned cmds_max);
+void iser_free_fmr_pool(struct iser_conn *ib_conn);
+int iser_create_fastreg_pool(struct iser_conn *ib_conn, unsigned cmds_max);
+void iser_free_fastreg_pool(struct iser_conn *ib_conn);
+u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
+ enum iser_data_dir cmd_dir, sector_t *sector);
#endif
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 95a08a8ca8a..8d44a406063 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
+ * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -40,14 +41,15 @@
#include "iscsi_iser.h"
/* Register user buffer memory and initialize passive rdma
- * dto descriptor. Total data size is stored in
- * iser_task->data[ISER_DIR_IN].data_len
+ * dto descriptor. Data size is stored in
+ * task->data[ISER_DIR_IN].data_len, Protection size
+ * os stored in task->prot[ISER_DIR_IN].data_len
*/
-static int iser_prepare_read_cmd(struct iscsi_task *task,
- unsigned int edtl)
+static int iser_prepare_read_cmd(struct iscsi_task *task)
{
struct iscsi_iser_task *iser_task = task->dd_data;
+ struct iser_device *device = iser_task->ib_conn->device;
struct iser_regd_buf *regd_buf;
int err;
struct iser_hdr *hdr = &iser_task->desc.iser_header;
@@ -60,15 +62,18 @@ static int iser_prepare_read_cmd(struct iscsi_task *task,
if (err)
return err;
- if (edtl > iser_task->data[ISER_DIR_IN].data_len) {
- iser_err("Total data length: %ld, less than EDTL: "
- "%d, in READ cmd BHS itt: %d, conn: 0x%p\n",
- iser_task->data[ISER_DIR_IN].data_len, edtl,
- task->itt, iser_task->iser_conn);
- return -EINVAL;
+ if (scsi_prot_sg_count(iser_task->sc)) {
+ struct iser_data_buf *pbuf_in = &iser_task->prot[ISER_DIR_IN];
+
+ err = iser_dma_map_task_data(iser_task,
+ pbuf_in,
+ ISER_DIR_IN,
+ DMA_FROM_DEVICE);
+ if (err)
+ return err;
}
- err = iser_reg_rdma_mem(iser_task,ISER_DIR_IN);
+ err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_IN);
if (err) {
iser_err("Failed to set up Data-IN RDMA\n");
return err;
@@ -87,8 +92,9 @@ static int iser_prepare_read_cmd(struct iscsi_task *task,
}
/* Register user buffer memory and initialize passive rdma
- * dto descriptor. Total data size is stored in
- * task->data[ISER_DIR_OUT].data_len
+ * dto descriptor. Data size is stored in
+ * task->data[ISER_DIR_OUT].data_len, Protection size
+ * is stored at task->prot[ISER_DIR_OUT].data_len
*/
static int
iser_prepare_write_cmd(struct iscsi_task *task,
@@ -97,6 +103,7 @@ iser_prepare_write_cmd(struct iscsi_task *task,
unsigned int edtl)
{
struct iscsi_iser_task *iser_task = task->dd_data;
+ struct iser_device *device = iser_task->ib_conn->device;
struct iser_regd_buf *regd_buf;
int err;
struct iser_hdr *hdr = &iser_task->desc.iser_header;
@@ -110,15 +117,18 @@ iser_prepare_write_cmd(struct iscsi_task *task,
if (err)
return err;
- if (edtl > iser_task->data[ISER_DIR_OUT].data_len) {
- iser_err("Total data length: %ld, less than EDTL: %d, "
- "in WRITE cmd BHS itt: %d, conn: 0x%p\n",
- iser_task->data[ISER_DIR_OUT].data_len,
- edtl, task->itt, task->conn);
- return -EINVAL;
+ if (scsi_prot_sg_count(iser_task->sc)) {
+ struct iser_data_buf *pbuf_out = &iser_task->prot[ISER_DIR_OUT];
+
+ err = iser_dma_map_task_data(iser_task,
+ pbuf_out,
+ ISER_DIR_OUT,
+ DMA_TO_DEVICE);
+ if (err)
+ return err;
}
- err = iser_reg_rdma_mem(iser_task,ISER_DIR_OUT);
+ err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_OUT);
if (err != 0) {
iser_err("Failed to register write cmd RDMA mem\n");
return err;
@@ -169,8 +179,78 @@ static void iser_create_send_desc(struct iser_conn *ib_conn,
}
}
+static void iser_free_login_buf(struct iser_conn *ib_conn)
+{
+ if (!ib_conn->login_buf)
+ return;
+
+ if (ib_conn->login_req_dma)
+ ib_dma_unmap_single(ib_conn->device->ib_device,
+ ib_conn->login_req_dma,
+ ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
+
+ if (ib_conn->login_resp_dma)
+ ib_dma_unmap_single(ib_conn->device->ib_device,
+ ib_conn->login_resp_dma,
+ ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
-static int iser_alloc_rx_descriptors(struct iser_conn *ib_conn)
+ kfree(ib_conn->login_buf);
+
+ /* make sure we never redo any unmapping */
+ ib_conn->login_req_dma = 0;
+ ib_conn->login_resp_dma = 0;
+ ib_conn->login_buf = NULL;
+}
+
+static int iser_alloc_login_buf(struct iser_conn *ib_conn)
+{
+ struct iser_device *device;
+ int req_err, resp_err;
+
+ BUG_ON(ib_conn->device == NULL);
+
+ device = ib_conn->device;
+
+ ib_conn->login_buf = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
+ ISER_RX_LOGIN_SIZE, GFP_KERNEL);
+ if (!ib_conn->login_buf)
+ goto out_err;
+
+ ib_conn->login_req_buf = ib_conn->login_buf;
+ ib_conn->login_resp_buf = ib_conn->login_buf +
+ ISCSI_DEF_MAX_RECV_SEG_LEN;
+
+ ib_conn->login_req_dma = ib_dma_map_single(ib_conn->device->ib_device,
+ (void *)ib_conn->login_req_buf,
+ ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
+
+ ib_conn->login_resp_dma = ib_dma_map_single(ib_conn->device->ib_device,
+ (void *)ib_conn->login_resp_buf,
+ ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
+
+ req_err = ib_dma_mapping_error(device->ib_device,
+ ib_conn->login_req_dma);
+ resp_err = ib_dma_mapping_error(device->ib_device,
+ ib_conn->login_resp_dma);
+
+ if (req_err || resp_err) {
+ if (req_err)
+ ib_conn->login_req_dma = 0;
+ if (resp_err)
+ ib_conn->login_resp_dma = 0;
+ goto free_login_buf;
+ }
+ return 0;
+
+free_login_buf:
+ iser_free_login_buf(ib_conn);
+
+out_err:
+ iser_err("unable to alloc or map login buf\n");
+ return -ENOMEM;
+}
+
+int iser_alloc_rx_descriptors(struct iser_conn *ib_conn, struct iscsi_session *session)
{
int i, j;
u64 dma_addr;
@@ -178,14 +258,24 @@ static int iser_alloc_rx_descriptors(struct iser_conn *ib_conn)
struct ib_sge *rx_sg;
struct iser_device *device = ib_conn->device;
- ib_conn->rx_descs = kmalloc(ISER_QP_MAX_RECV_DTOS *
+ ib_conn->qp_max_recv_dtos = session->cmds_max;
+ ib_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */
+ ib_conn->min_posted_rx = ib_conn->qp_max_recv_dtos >> 2;
+
+ if (device->iser_alloc_rdma_reg_res(ib_conn, session->scsi_cmds_max))
+ goto create_rdma_reg_res_failed;
+
+ if (iser_alloc_login_buf(ib_conn))
+ goto alloc_login_buf_fail;
+
+ ib_conn->rx_descs = kmalloc(session->cmds_max *
sizeof(struct iser_rx_desc), GFP_KERNEL);
if (!ib_conn->rx_descs)
goto rx_desc_alloc_fail;
rx_desc = ib_conn->rx_descs;
- for (i = 0; i < ISER_QP_MAX_RECV_DTOS; i++, rx_desc++) {
+ for (i = 0; i < ib_conn->qp_max_recv_dtos; i++, rx_desc++) {
dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
if (ib_dma_mapping_error(device->ib_device, dma_addr))
@@ -206,10 +296,14 @@ rx_desc_dma_map_failed:
rx_desc = ib_conn->rx_descs;
for (j = 0; j < i; j++, rx_desc++)
ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
- ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+ ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
kfree(ib_conn->rx_descs);
ib_conn->rx_descs = NULL;
rx_desc_alloc_fail:
+ iser_free_login_buf(ib_conn);
+alloc_login_buf_fail:
+ device->iser_free_rdma_reg_res(ib_conn);
+create_rdma_reg_res_failed:
iser_err("failed allocating rx descriptors / data buffers\n");
return -ENOMEM;
}
@@ -220,41 +314,51 @@ void iser_free_rx_descriptors(struct iser_conn *ib_conn)
struct iser_rx_desc *rx_desc;
struct iser_device *device = ib_conn->device;
- if (ib_conn->login_buf) {
- ib_dma_unmap_single(device->ib_device, ib_conn->login_dma,
- ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
- kfree(ib_conn->login_buf);
- }
-
if (!ib_conn->rx_descs)
- return;
+ goto free_login_buf;
+
+ if (device->iser_free_rdma_reg_res)
+ device->iser_free_rdma_reg_res(ib_conn);
rx_desc = ib_conn->rx_descs;
- for (i = 0; i < ISER_QP_MAX_RECV_DTOS; i++, rx_desc++)
+ for (i = 0; i < ib_conn->qp_max_recv_dtos; i++, rx_desc++)
ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
- ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+ ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
kfree(ib_conn->rx_descs);
+ /* make sure we never redo any unmapping */
+ ib_conn->rx_descs = NULL;
+
+free_login_buf:
+ iser_free_login_buf(ib_conn);
}
-/**
- * iser_conn_set_full_featured_mode - (iSER API)
- */
-int iser_conn_set_full_featured_mode(struct iscsi_conn *conn)
+static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
{
- struct iscsi_iser_conn *iser_conn = conn->dd_data;
+ struct iser_conn *ib_conn = conn->dd_data;
+ struct iscsi_session *session = conn->session;
- iser_dbg("Initially post: %d\n", ISER_MIN_POSTED_RX);
+ iser_dbg("req op %x flags %x\n", req->opcode, req->flags);
+ /* check if this is the last login - going to full feature phase */
+ if ((req->flags & ISCSI_FULL_FEATURE_PHASE) != ISCSI_FULL_FEATURE_PHASE)
+ return 0;
- /* Check that there is no posted recv or send buffers left - */
- /* they must be consumed during the login phase */
- BUG_ON(iser_conn->ib_conn->post_recv_buf_count != 0);
- BUG_ON(atomic_read(&iser_conn->ib_conn->post_send_buf_count) != 0);
+ /*
+ * Check that there is one posted recv buffer (for the last login
+ * response) and no posted send buffers left - they must have been
+ * consumed during previous login phases.
+ */
+ WARN_ON(ib_conn->post_recv_buf_count != 1);
+ WARN_ON(atomic_read(&ib_conn->post_send_buf_count) != 0);
- if (iser_alloc_rx_descriptors(iser_conn->ib_conn))
- return -ENOMEM;
+ if (session->discovery_sess) {
+ iser_info("Discovery session, re-using login RX buffer\n");
+ return 0;
+ } else
+ iser_info("Normal session, posting batch of RX %d buffers\n",
+ ib_conn->min_posted_rx);
/* Initial post receive buffers */
- if (iser_post_recvm(iser_conn->ib_conn, ISER_MIN_POSTED_RX))
+ if (iser_post_recvm(ib_conn, ib_conn->min_posted_rx))
return -ENOMEM;
return 0;
@@ -266,12 +370,12 @@ int iser_conn_set_full_featured_mode(struct iscsi_conn *conn)
int iser_send_command(struct iscsi_conn *conn,
struct iscsi_task *task)
{
- struct iscsi_iser_conn *iser_conn = conn->dd_data;
+ struct iser_conn *ib_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
unsigned long edtl;
int err;
- struct iser_data_buf *data_buf;
- struct iscsi_cmd *hdr = (struct iscsi_cmd *)task->hdr;
+ struct iser_data_buf *data_buf, *prot_buf;
+ struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
struct scsi_cmnd *sc = task->sc;
struct iser_tx_desc *tx_desc = &iser_task->desc;
@@ -279,22 +383,31 @@ int iser_send_command(struct iscsi_conn *conn,
/* build the tx desc regd header and add it to the tx desc dto */
tx_desc->type = ISCSI_TX_SCSI_COMMAND;
- iser_create_send_desc(iser_conn->ib_conn, tx_desc);
+ iser_create_send_desc(ib_conn, tx_desc);
- if (hdr->flags & ISCSI_FLAG_CMD_READ)
+ if (hdr->flags & ISCSI_FLAG_CMD_READ) {
data_buf = &iser_task->data[ISER_DIR_IN];
- else
+ prot_buf = &iser_task->prot[ISER_DIR_IN];
+ } else {
data_buf = &iser_task->data[ISER_DIR_OUT];
+ prot_buf = &iser_task->prot[ISER_DIR_OUT];
+ }
if (scsi_sg_count(sc)) { /* using a scatter list */
data_buf->buf = scsi_sglist(sc);
data_buf->size = scsi_sg_count(sc);
}
-
data_buf->data_len = scsi_bufflen(sc);
+ if (scsi_prot_sg_count(sc)) {
+ prot_buf->buf = scsi_prot_sglist(sc);
+ prot_buf->size = scsi_prot_sg_count(sc);
+ prot_buf->data_len = data_buf->data_len >>
+ ilog2(sc->device->sector_size) * 8;
+ }
+
if (hdr->flags & ISCSI_FLAG_CMD_READ) {
- err = iser_prepare_read_cmd(task, edtl);
+ err = iser_prepare_read_cmd(task);
if (err)
goto send_command_error;
}
@@ -310,7 +423,7 @@ int iser_send_command(struct iscsi_conn *conn,
iser_task->status = ISER_TASK_STATUS_STARTED;
- err = iser_post_send(iser_conn->ib_conn, tx_desc);
+ err = iser_post_send(ib_conn, tx_desc);
if (!err)
return 0;
@@ -326,7 +439,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
struct iscsi_task *task,
struct iscsi_data *hdr)
{
- struct iscsi_iser_conn *iser_conn = conn->dd_data;
+ struct iser_conn *ib_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_tx_desc *tx_desc = NULL;
struct iser_regd_buf *regd_buf;
@@ -375,7 +488,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
itt, buf_offset, data_seg_len);
- err = iser_post_send(iser_conn->ib_conn, tx_desc);
+ err = iser_post_send(ib_conn, tx_desc);
if (!err)
return 0;
@@ -388,7 +501,7 @@ send_data_out_error:
int iser_send_control(struct iscsi_conn *conn,
struct iscsi_task *task)
{
- struct iscsi_iser_conn *iser_conn = conn->dd_data;
+ struct iser_conn *ib_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_tx_desc *mdesc = &iser_task->desc;
unsigned long data_seg_len;
@@ -397,9 +510,9 @@ int iser_send_control(struct iscsi_conn *conn,
/* build the tx desc regd header and add it to the tx desc dto */
mdesc->type = ISCSI_TX_CONTROL;
- iser_create_send_desc(iser_conn->ib_conn, mdesc);
+ iser_create_send_desc(ib_conn, mdesc);
- device = iser_conn->ib_conn->device;
+ device = ib_conn->device;
data_seg_len = ntoh24(task->hdr->dlength);
@@ -409,21 +522,35 @@ int iser_send_control(struct iscsi_conn *conn,
iser_err("data present on non login task!!!\n");
goto send_control_error;
}
- memcpy(iser_conn->ib_conn->login_buf, task->data,
- task->data_count);
- tx_dsg->addr = iser_conn->ib_conn->login_dma;
- tx_dsg->length = data_seg_len;
+
+ ib_dma_sync_single_for_cpu(device->ib_device,
+ ib_conn->login_req_dma, task->data_count,
+ DMA_TO_DEVICE);
+
+ memcpy(ib_conn->login_req_buf, task->data, task->data_count);
+
+ ib_dma_sync_single_for_device(device->ib_device,
+ ib_conn->login_req_dma, task->data_count,
+ DMA_TO_DEVICE);
+
+ tx_dsg->addr = ib_conn->login_req_dma;
+ tx_dsg->length = task->data_count;
tx_dsg->lkey = device->mr->lkey;
mdesc->num_sge = 2;
}
if (task == conn->login_task) {
- err = iser_post_recvl(iser_conn->ib_conn);
+ iser_dbg("op %x dsl %lx, posting login rx buffer\n",
+ task->hdr->opcode, data_seg_len);
+ err = iser_post_recvl(ib_conn);
+ if (err)
+ goto send_control_error;
+ err = iser_post_rx_bufs(conn, task->hdr);
if (err)
goto send_control_error;
}
- err = iser_post_send(iser_conn->ib_conn, mdesc);
+ err = iser_post_send(ib_conn, mdesc);
if (!err)
return 0;
@@ -439,14 +566,13 @@ void iser_rcv_completion(struct iser_rx_desc *rx_desc,
unsigned long rx_xfer_len,
struct iser_conn *ib_conn)
{
- struct iscsi_iser_conn *conn = ib_conn->iser_conn;
struct iscsi_hdr *hdr;
u64 rx_dma;
int rx_buflen, outstanding, count, err;
/* differentiate between login to all other PDUs */
- if ((char *)rx_desc == ib_conn->login_buf) {
- rx_dma = ib_conn->login_dma;
+ if ((char *)rx_desc == ib_conn->login_resp_buf) {
+ rx_dma = ib_conn->login_resp_dma;
rx_buflen = ISER_RX_LOGIN_SIZE;
} else {
rx_dma = rx_desc->dma_addr;
@@ -461,25 +587,25 @@ void iser_rcv_completion(struct iser_rx_desc *rx_desc,
iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
hdr->itt, (int)(rx_xfer_len - ISER_HEADERS_LEN));
- iscsi_iser_recv(conn->iscsi_conn, hdr,
- rx_desc->data, rx_xfer_len - ISER_HEADERS_LEN);
+ iscsi_iser_recv(ib_conn->iscsi_conn, hdr, rx_desc->data,
+ rx_xfer_len - ISER_HEADERS_LEN);
ib_dma_sync_single_for_device(ib_conn->device->ib_device, rx_dma,
- rx_buflen, DMA_FROM_DEVICE);
+ rx_buflen, DMA_FROM_DEVICE);
/* decrementing conn->post_recv_buf_count only --after-- freeing the *
* task eliminates the need to worry on tasks which are completed in *
* parallel to the execution of iser_conn_term. So the code that waits *
* for the posted rx bufs refcount to become zero handles everything */
- conn->ib_conn->post_recv_buf_count--;
+ ib_conn->post_recv_buf_count--;
- if (rx_dma == ib_conn->login_dma)
+ if (rx_dma == ib_conn->login_resp_dma)
return;
outstanding = ib_conn->post_recv_buf_count;
- if (outstanding + ISER_MIN_POSTED_RX <= ISER_QP_MAX_RECV_DTOS) {
- count = min(ISER_QP_MAX_RECV_DTOS - outstanding,
- ISER_MIN_POSTED_RX);
+ if (outstanding + ib_conn->min_posted_rx <= ib_conn->qp_max_recv_dtos) {
+ count = min(ib_conn->qp_max_recv_dtos - outstanding,
+ ib_conn->min_posted_rx);
err = iser_post_recvm(ib_conn, count);
if (err)
iser_err("posting %d rx bufs err %d\n", count, err);
@@ -496,11 +622,12 @@ void iser_snd_completion(struct iser_tx_desc *tx_desc,
ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
kmem_cache_free(ig.desc_cache, tx_desc);
+ tx_desc = NULL;
}
atomic_dec(&ib_conn->post_send_buf_count);
- if (tx_desc->type == ISCSI_TX_CONTROL) {
+ if (tx_desc && tx_desc->type == ISCSI_TX_CONTROL) {
/* this arithmetic is legal by libiscsi dd_data allocation */
task = (void *) ((long)(void *)tx_desc -
sizeof(struct iscsi_task));
@@ -520,6 +647,9 @@ void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
iser_task->data[ISER_DIR_IN].data_len = 0;
iser_task->data[ISER_DIR_OUT].data_len = 0;
+ iser_task->prot[ISER_DIR_IN].data_len = 0;
+ iser_task->prot[ISER_DIR_OUT].data_len = 0;
+
memset(&iser_task->rdma_regd[ISER_DIR_IN], 0,
sizeof(struct iser_regd_buf));
memset(&iser_task->rdma_regd[ISER_DIR_OUT], 0,
@@ -528,34 +658,63 @@ void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
{
- int is_rdma_aligned = 1;
- struct iser_regd_buf *regd;
+ struct iser_device *device = iser_task->ib_conn->device;
+ int is_rdma_data_aligned = 1;
+ int is_rdma_prot_aligned = 1;
+ int prot_count = scsi_prot_sg_count(iser_task->sc);
/* if we were reading, copy back to unaligned sglist,
* anyway dma_unmap and free the copy
*/
if (iser_task->data_copy[ISER_DIR_IN].copy_buf != NULL) {
- is_rdma_aligned = 0;
- iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_IN);
+ is_rdma_data_aligned = 0;
+ iser_finalize_rdma_unaligned_sg(iser_task,
+ &iser_task->data[ISER_DIR_IN],
+ &iser_task->data_copy[ISER_DIR_IN],
+ ISER_DIR_IN);
}
+
if (iser_task->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
- is_rdma_aligned = 0;
- iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_OUT);
+ is_rdma_data_aligned = 0;
+ iser_finalize_rdma_unaligned_sg(iser_task,
+ &iser_task->data[ISER_DIR_OUT],
+ &iser_task->data_copy[ISER_DIR_OUT],
+ ISER_DIR_OUT);
+ }
+
+ if (iser_task->prot_copy[ISER_DIR_IN].copy_buf != NULL) {
+ is_rdma_prot_aligned = 0;
+ iser_finalize_rdma_unaligned_sg(iser_task,
+ &iser_task->prot[ISER_DIR_IN],
+ &iser_task->prot_copy[ISER_DIR_IN],
+ ISER_DIR_IN);
+ }
+
+ if (iser_task->prot_copy[ISER_DIR_OUT].copy_buf != NULL) {
+ is_rdma_prot_aligned = 0;
+ iser_finalize_rdma_unaligned_sg(iser_task,
+ &iser_task->prot[ISER_DIR_OUT],
+ &iser_task->prot_copy[ISER_DIR_OUT],
+ ISER_DIR_OUT);
}
if (iser_task->dir[ISER_DIR_IN]) {
- regd = &iser_task->rdma_regd[ISER_DIR_IN];
- if (regd->reg.is_fmr)
- iser_unreg_mem(&regd->reg);
+ device->iser_unreg_rdma_mem(iser_task, ISER_DIR_IN);
+ if (is_rdma_data_aligned)
+ iser_dma_unmap_task_data(iser_task,
+ &iser_task->data[ISER_DIR_IN]);
+ if (prot_count && is_rdma_prot_aligned)
+ iser_dma_unmap_task_data(iser_task,
+ &iser_task->prot[ISER_DIR_IN]);
}
if (iser_task->dir[ISER_DIR_OUT]) {
- regd = &iser_task->rdma_regd[ISER_DIR_OUT];
- if (regd->reg.is_fmr)
- iser_unreg_mem(&regd->reg);
+ device->iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT);
+ if (is_rdma_data_aligned)
+ iser_dma_unmap_task_data(iser_task,
+ &iser_task->data[ISER_DIR_OUT]);
+ if (prot_count && is_rdma_prot_aligned)
+ iser_dma_unmap_task_data(iser_task,
+ &iser_task->prot[ISER_DIR_OUT]);
}
-
- /* if the data was unaligned, it was already unmapped and then copied */
- if (is_rdma_aligned)
- iser_dma_unmap_task_data(iser_task);
}
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index fb88d6896b6..47acd3ad3a1 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
+ * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -44,13 +45,19 @@
* iser_start_rdma_unaligned_sg
*/
static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
+ struct iser_data_buf *data,
+ struct iser_data_buf *data_copy,
enum iser_data_dir cmd_dir)
{
- int dma_nents;
- struct ib_device *dev;
+ struct ib_device *dev = iser_task->ib_conn->device->ib_device;
+ struct scatterlist *sgl = (struct scatterlist *)data->buf;
+ struct scatterlist *sg;
char *mem = NULL;
- struct iser_data_buf *data = &iser_task->data[cmd_dir];
- unsigned long cmd_data_len = data->data_len;
+ unsigned long cmd_data_len = 0;
+ int dma_nents, i;
+
+ for_each_sg(sgl, sg, data->size, i)
+ cmd_data_len += ib_sg_dma_len(dev, sg);
if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
mem = (void *)__get_free_pages(GFP_ATOMIC,
@@ -60,61 +67,58 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
if (mem == NULL) {
iser_err("Failed to allocate mem size %d %d for copying sglist\n",
- data->size,(int)cmd_data_len);
+ data->size, (int)cmd_data_len);
return -ENOMEM;
}
if (cmd_dir == ISER_DIR_OUT) {
/* copy the unaligned sg the buffer which is used for RDMA */
- struct scatterlist *sgl = (struct scatterlist *)data->buf;
- struct scatterlist *sg;
int i;
char *p, *from;
+ sgl = (struct scatterlist *)data->buf;
p = mem;
for_each_sg(sgl, sg, data->size, i) {
- from = kmap_atomic(sg_page(sg), KM_USER0);
+ from = kmap_atomic(sg_page(sg));
memcpy(p,
from + sg->offset,
sg->length);
- kunmap_atomic(from, KM_USER0);
+ kunmap_atomic(from);
p += sg->length;
}
}
- sg_init_one(&iser_task->data_copy[cmd_dir].sg_single, mem, cmd_data_len);
- iser_task->data_copy[cmd_dir].buf =
- &iser_task->data_copy[cmd_dir].sg_single;
- iser_task->data_copy[cmd_dir].size = 1;
+ sg_init_one(&data_copy->sg_single, mem, cmd_data_len);
+ data_copy->buf = &data_copy->sg_single;
+ data_copy->size = 1;
+ data_copy->copy_buf = mem;
- iser_task->data_copy[cmd_dir].copy_buf = mem;
-
- dev = iser_task->iser_conn->ib_conn->device->ib_device;
- dma_nents = ib_dma_map_sg(dev,
- &iser_task->data_copy[cmd_dir].sg_single,
- 1,
+ dma_nents = ib_dma_map_sg(dev, &data_copy->sg_single, 1,
(cmd_dir == ISER_DIR_OUT) ?
DMA_TO_DEVICE : DMA_FROM_DEVICE);
BUG_ON(dma_nents == 0);
- iser_task->data_copy[cmd_dir].dma_nents = dma_nents;
+ data_copy->dma_nents = dma_nents;
+ data_copy->data_len = cmd_data_len;
+
return 0;
}
/**
* iser_finalize_rdma_unaligned_sg
*/
+
void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
- enum iser_data_dir cmd_dir)
+ struct iser_data_buf *data,
+ struct iser_data_buf *data_copy,
+ enum iser_data_dir cmd_dir)
{
struct ib_device *dev;
- struct iser_data_buf *mem_copy;
unsigned long cmd_data_len;
- dev = iser_task->iser_conn->ib_conn->device->ib_device;
- mem_copy = &iser_task->data_copy[cmd_dir];
+ dev = iser_task->ib_conn->device->ib_device;
- ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1,
+ ib_dma_unmap_sg(dev, &data_copy->sg_single, 1,
(cmd_dir == ISER_DIR_OUT) ?
DMA_TO_DEVICE : DMA_FROM_DEVICE);
@@ -126,31 +130,31 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
int i;
/* copy back read RDMA to unaligned sg */
- mem = mem_copy->copy_buf;
+ mem = data_copy->copy_buf;
- sgl = (struct scatterlist *)iser_task->data[ISER_DIR_IN].buf;
- sg_size = iser_task->data[ISER_DIR_IN].size;
+ sgl = (struct scatterlist *)data->buf;
+ sg_size = data->size;
p = mem;
for_each_sg(sgl, sg, sg_size, i) {
- to = kmap_atomic(sg_page(sg), KM_SOFTIRQ0);
+ to = kmap_atomic(sg_page(sg));
memcpy(to + sg->offset,
p,
sg->length);
- kunmap_atomic(to, KM_SOFTIRQ0);
+ kunmap_atomic(to);
p += sg->length;
}
}
- cmd_data_len = iser_task->data[cmd_dir].data_len;
+ cmd_data_len = data->data_len;
if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
- free_pages((unsigned long)mem_copy->copy_buf,
+ free_pages((unsigned long)data_copy->copy_buf,
ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT);
else
- kfree(mem_copy->copy_buf);
+ kfree(data_copy->copy_buf);
- mem_copy->copy_buf = NULL;
+ data_copy->copy_buf = NULL;
}
#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
@@ -169,8 +173,8 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
*/
static int iser_sg_to_page_vec(struct iser_data_buf *data,
- struct iser_page_vec *page_vec,
- struct ib_device *ibdev)
+ struct ib_device *ibdev, u64 *pages,
+ int *offset, int *data_size)
{
struct scatterlist *sg, *sgl = (struct scatterlist *)data->buf;
u64 start_addr, end_addr, page, chunk_start = 0;
@@ -179,7 +183,7 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
int i, new_chunk, cur_page, last_ent = data->dma_nents - 1;
/* compute the offset of first element */
- page_vec->offset = (u64) sgl[0].offset & ~MASK_4K;
+ *offset = (u64) sgl[0].offset & ~MASK_4K;
new_chunk = 1;
cur_page = 0;
@@ -203,13 +207,14 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
which might be unaligned */
page = chunk_start & MASK_4K;
do {
- page_vec->pages[cur_page++] = page;
+ pages[cur_page++] = page;
page += SIZE_4K;
} while (page < end_addr);
}
- page_vec->data_size = total_sz;
- iser_dbg("page_vec->data_size:%d cur_page %d\n", page_vec->data_size,cur_page);
+ *data_size = total_sz;
+ iser_dbg("page_vec->data_size:%d cur_page %d\n",
+ *data_size, cur_page);
return cur_page;
}
@@ -266,11 +271,8 @@ static void iser_data_buf_dump(struct iser_data_buf *data,
struct scatterlist *sg;
int i;
- if (iser_debug_level == 0)
- return;
-
for_each_sg(sgl, sg, data->dma_nents, i)
- iser_warn("sg[%d] dma_addr:0x%lX page:0x%p "
+ iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p "
"off:0x%x sz:0x%x dma_len:0x%x\n",
i, (unsigned long)ib_sg_dma_address(ibdev, sg),
sg_page(sg), sg->offset,
@@ -297,8 +299,10 @@ static void iser_page_vec_build(struct iser_data_buf *data,
page_vec->offset = 0;
iser_dbg("Translating sg sz: %d\n", data->dma_nents);
- page_vec_len = iser_sg_to_page_vec(data, page_vec, ibdev);
- iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents,page_vec_len);
+ page_vec_len = iser_sg_to_page_vec(data, ibdev, page_vec->pages,
+ &page_vec->offset,
+ &page_vec->data_size);
+ iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents, page_vec_len);
page_vec->length = page_vec_len;
@@ -318,7 +322,7 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
struct ib_device *dev;
iser_task->dir[iser_dir] = 1;
- dev = iser_task->iser_conn->ib_conn->device->ib_device;
+ dev = iser_task->ib_conn->device->ib_device;
data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
if (data->dma_nents == 0) {
@@ -328,35 +332,52 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
return 0;
}
-void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task)
+void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
+ struct iser_data_buf *data)
{
struct ib_device *dev;
- struct iser_data_buf *data;
- dev = iser_task->iser_conn->ib_conn->device->ib_device;
+ dev = iser_task->ib_conn->device->ib_device;
+ ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
+}
- if (iser_task->dir[ISER_DIR_IN]) {
- data = &iser_task->data[ISER_DIR_IN];
- ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
- }
+static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
+ struct ib_device *ibdev,
+ struct iser_data_buf *mem,
+ struct iser_data_buf *mem_copy,
+ enum iser_data_dir cmd_dir,
+ int aligned_len)
+{
+ struct iscsi_conn *iscsi_conn = iser_task->ib_conn->iscsi_conn;
- if (iser_task->dir[ISER_DIR_OUT]) {
- data = &iser_task->data[ISER_DIR_OUT];
- ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE);
- }
+ iscsi_conn->fmr_unalign_cnt++;
+ iser_warn("rdma alignment violation (%d/%d aligned) or FMR not supported\n",
+ aligned_len, mem->size);
+
+ if (iser_debug_level > 0)
+ iser_data_buf_dump(mem, ibdev);
+
+ /* unmap the command data before accessing it */
+ iser_dma_unmap_task_data(iser_task, mem);
+
+ /* allocate copy buf, if we are writing, copy the */
+ /* unaligned scatterlist, dma map the copy */
+ if (iser_start_rdma_unaligned_sg(iser_task, mem, mem_copy, cmd_dir) != 0)
+ return -ENOMEM;
+
+ return 0;
}
/**
- * iser_reg_rdma_mem - Registers memory intended for RDMA,
- * obtaining rkey and va
+ * iser_reg_rdma_mem_fmr - Registers memory intended for RDMA,
+ * using FMR (if possible) obtaining rkey and va
*
* returns 0 on success, errno code on failure
*/
-int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
- enum iser_data_dir cmd_dir)
+int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
+ enum iser_data_dir cmd_dir)
{
- struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
- struct iser_conn *ib_conn = iser_task->iser_conn->ib_conn;
+ struct iser_conn *ib_conn = iser_task->ib_conn;
struct iser_device *device = ib_conn->device;
struct ib_device *ibdev = device->ib_device;
struct iser_data_buf *mem = &iser_task->data[cmd_dir];
@@ -370,18 +391,13 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
aligned_len = iser_data_buf_aligned_len(mem, ibdev);
if (aligned_len != mem->dma_nents) {
- iscsi_conn->fmr_unalign_cnt++;
- iser_warn("rdma alignment violation %d/%d aligned\n",
- aligned_len, mem->size);
- iser_data_buf_dump(mem, ibdev);
-
- /* unmap the command data before accessing it */
- iser_dma_unmap_task_data(iser_task);
-
- /* allocate copy buf, if we are writing, copy the */
- /* unaligned scatterlist, dma map the copy */
- if (iser_start_rdma_unaligned_sg(iser_task, cmd_dir) != 0)
- return -ENOMEM;
+ err = fall_to_bounce_buf(iser_task, ibdev, mem,
+ &iser_task->data_copy[cmd_dir],
+ cmd_dir, aligned_len);
+ if (err) {
+ iser_err("failed to allocate bounce buffer\n");
+ return err;
+ }
mem = &iser_task->data_copy[cmd_dir];
}
@@ -393,7 +409,7 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
regd_buf->reg.rkey = device->mr->rkey;
regd_buf->reg.len = ib_sg_dma_len(ibdev, &sg[0]);
regd_buf->reg.va = ib_sg_dma_address(ibdev, &sg[0]);
- regd_buf->reg.is_fmr = 0;
+ regd_buf->reg.is_mr = 0;
iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X "
"va: 0x%08lX sz: %ld]\n",
@@ -402,21 +418,383 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
(unsigned long)regd_buf->reg.va,
(unsigned long)regd_buf->reg.len);
} else { /* use FMR for multiple dma entries */
- iser_page_vec_build(mem, ib_conn->page_vec, ibdev);
- err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, &regd_buf->reg);
- if (err) {
+ iser_page_vec_build(mem, ib_conn->fmr.page_vec, ibdev);
+ err = iser_reg_page_vec(ib_conn, ib_conn->fmr.page_vec,
+ &regd_buf->reg);
+ if (err && err != -EAGAIN) {
iser_data_buf_dump(mem, ibdev);
iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
mem->dma_nents,
ntoh24(iser_task->desc.iscsi_header.dlength));
iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
- ib_conn->page_vec->data_size, ib_conn->page_vec->length,
- ib_conn->page_vec->offset);
- for (i=0 ; i<ib_conn->page_vec->length ; i++)
+ ib_conn->fmr.page_vec->data_size,
+ ib_conn->fmr.page_vec->length,
+ ib_conn->fmr.page_vec->offset);
+ for (i = 0; i < ib_conn->fmr.page_vec->length; i++)
iser_err("page_vec[%d] = 0x%llx\n", i,
- (unsigned long long) ib_conn->page_vec->pages[i]);
+ (unsigned long long) ib_conn->fmr.page_vec->pages[i]);
+ }
+ if (err)
return err;
+ }
+ return 0;
+}
+
+static inline enum ib_t10_dif_type
+scsi2ib_prot_type(unsigned char prot_type)
+{
+ switch (prot_type) {
+ case SCSI_PROT_DIF_TYPE0:
+ return IB_T10DIF_NONE;
+ case SCSI_PROT_DIF_TYPE1:
+ return IB_T10DIF_TYPE1;
+ case SCSI_PROT_DIF_TYPE2:
+ return IB_T10DIF_TYPE2;
+ case SCSI_PROT_DIF_TYPE3:
+ return IB_T10DIF_TYPE3;
+ default:
+ return IB_T10DIF_NONE;
+ }
+}
+
+
+static int
+iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs)
+{
+ unsigned char scsi_ptype = scsi_get_prot_type(sc);
+
+ sig_attrs->mem.sig_type = IB_SIG_TYPE_T10_DIF;
+ sig_attrs->wire.sig_type = IB_SIG_TYPE_T10_DIF;
+ sig_attrs->mem.sig.dif.pi_interval = sc->device->sector_size;
+ sig_attrs->wire.sig.dif.pi_interval = sc->device->sector_size;
+
+ switch (scsi_get_prot_op(sc)) {
+ case SCSI_PROT_WRITE_INSERT:
+ case SCSI_PROT_READ_STRIP:
+ sig_attrs->mem.sig.dif.type = IB_T10DIF_NONE;
+ sig_attrs->wire.sig.dif.type = scsi2ib_prot_type(scsi_ptype);
+ sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
+ sig_attrs->wire.sig.dif.ref_tag = scsi_get_lba(sc) &
+ 0xffffffff;
+ break;
+ case SCSI_PROT_READ_INSERT:
+ case SCSI_PROT_WRITE_STRIP:
+ sig_attrs->mem.sig.dif.type = scsi2ib_prot_type(scsi_ptype);
+ sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC;
+ sig_attrs->mem.sig.dif.ref_tag = scsi_get_lba(sc) &
+ 0xffffffff;
+ sig_attrs->wire.sig.dif.type = IB_T10DIF_NONE;
+ break;
+ case SCSI_PROT_READ_PASS:
+ case SCSI_PROT_WRITE_PASS:
+ sig_attrs->mem.sig.dif.type = scsi2ib_prot_type(scsi_ptype);
+ sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC;
+ sig_attrs->mem.sig.dif.ref_tag = scsi_get_lba(sc) &
+ 0xffffffff;
+ sig_attrs->wire.sig.dif.type = scsi2ib_prot_type(scsi_ptype);
+ sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
+ sig_attrs->wire.sig.dif.ref_tag = scsi_get_lba(sc) &
+ 0xffffffff;
+ break;
+ default:
+ iser_err("Unsupported PI operation %d\n",
+ scsi_get_prot_op(sc));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+
+static int
+iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
+{
+ switch (scsi_get_prot_type(sc)) {
+ case SCSI_PROT_DIF_TYPE0:
+ *mask = 0x0;
+ break;
+ case SCSI_PROT_DIF_TYPE1:
+ case SCSI_PROT_DIF_TYPE2:
+ *mask = ISER_CHECK_GUARD | ISER_CHECK_REFTAG;
+ break;
+ case SCSI_PROT_DIF_TYPE3:
+ *mask = ISER_CHECK_GUARD;
+ break;
+ default:
+ iser_err("Unsupported protection type %d\n",
+ scsi_get_prot_type(sc));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
+ struct fast_reg_descriptor *desc, struct ib_sge *data_sge,
+ struct ib_sge *prot_sge, struct ib_sge *sig_sge)
+{
+ struct iser_conn *ib_conn = iser_task->ib_conn;
+ struct iser_pi_context *pi_ctx = desc->pi_ctx;
+ struct ib_send_wr sig_wr, inv_wr;
+ struct ib_send_wr *bad_wr, *wr = NULL;
+ struct ib_sig_attrs sig_attrs;
+ int ret;
+ u32 key;
+
+ memset(&sig_attrs, 0, sizeof(sig_attrs));
+ ret = iser_set_sig_attrs(iser_task->sc, &sig_attrs);
+ if (ret)
+ goto err;
+
+ ret = iser_set_prot_checks(iser_task->sc, &sig_attrs.check_mask);
+ if (ret)
+ goto err;
+
+ if (!(desc->reg_indicators & ISER_SIG_KEY_VALID)) {
+ memset(&inv_wr, 0, sizeof(inv_wr));
+ inv_wr.opcode = IB_WR_LOCAL_INV;
+ inv_wr.wr_id = ISER_FASTREG_LI_WRID;
+ inv_wr.ex.invalidate_rkey = pi_ctx->sig_mr->rkey;
+ wr = &inv_wr;
+ /* Bump the key */
+ key = (u8)(pi_ctx->sig_mr->rkey & 0x000000FF);
+ ib_update_fast_reg_key(pi_ctx->sig_mr, ++key);
+ }
+
+ memset(&sig_wr, 0, sizeof(sig_wr));
+ sig_wr.opcode = IB_WR_REG_SIG_MR;
+ sig_wr.wr_id = ISER_FASTREG_LI_WRID;
+ sig_wr.sg_list = data_sge;
+ sig_wr.num_sge = 1;
+ sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
+ sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
+ if (scsi_prot_sg_count(iser_task->sc))
+ sig_wr.wr.sig_handover.prot = prot_sge;
+ sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_WRITE;
+
+ if (!wr)
+ wr = &sig_wr;
+ else
+ wr->next = &sig_wr;
+
+ ret = ib_post_send(ib_conn->qp, wr, &bad_wr);
+ if (ret) {
+ iser_err("reg_sig_mr failed, ret:%d\n", ret);
+ goto err;
+ }
+ desc->reg_indicators &= ~ISER_SIG_KEY_VALID;
+
+ sig_sge->lkey = pi_ctx->sig_mr->lkey;
+ sig_sge->addr = 0;
+ sig_sge->length = data_sge->length + prot_sge->length;
+ if (scsi_get_prot_op(iser_task->sc) == SCSI_PROT_WRITE_INSERT ||
+ scsi_get_prot_op(iser_task->sc) == SCSI_PROT_READ_STRIP) {
+ sig_sge->length += (data_sge->length /
+ iser_task->sc->device->sector_size) * 8;
+ }
+
+ iser_dbg("sig_sge: addr: 0x%llx length: %u lkey: 0x%x\n",
+ sig_sge->addr, sig_sge->length,
+ sig_sge->lkey);
+err:
+ return ret;
+}
+
+static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
+ struct iser_regd_buf *regd_buf,
+ struct iser_data_buf *mem,
+ enum iser_reg_indicator ind,
+ struct ib_sge *sge)
+{
+ struct fast_reg_descriptor *desc = regd_buf->reg.mem_h;
+ struct iser_conn *ib_conn = iser_task->ib_conn;
+ struct iser_device *device = ib_conn->device;
+ struct ib_device *ibdev = device->ib_device;
+ struct ib_mr *mr;
+ struct ib_fast_reg_page_list *frpl;
+ struct ib_send_wr fastreg_wr, inv_wr;
+ struct ib_send_wr *bad_wr, *wr = NULL;
+ u8 key;
+ int ret, offset, size, plen;
+
+ /* if there a single dma entry, dma mr suffices */
+ if (mem->dma_nents == 1) {
+ struct scatterlist *sg = (struct scatterlist *)mem->buf;
+
+ sge->lkey = device->mr->lkey;
+ sge->addr = ib_sg_dma_address(ibdev, &sg[0]);
+ sge->length = ib_sg_dma_len(ibdev, &sg[0]);
+
+ iser_dbg("Single DMA entry: lkey=0x%x, addr=0x%llx, length=0x%x\n",
+ sge->lkey, sge->addr, sge->length);
+ return 0;
+ }
+
+ if (ind == ISER_DATA_KEY_VALID) {
+ mr = desc->data_mr;
+ frpl = desc->data_frpl;
+ } else {
+ mr = desc->pi_ctx->prot_mr;
+ frpl = desc->pi_ctx->prot_frpl;
+ }
+
+ plen = iser_sg_to_page_vec(mem, device->ib_device, frpl->page_list,
+ &offset, &size);
+ if (plen * SIZE_4K < size) {
+ iser_err("fast reg page_list too short to hold this SG\n");
+ return -EINVAL;
+ }
+
+ if (!(desc->reg_indicators & ind)) {
+ memset(&inv_wr, 0, sizeof(inv_wr));
+ inv_wr.wr_id = ISER_FASTREG_LI_WRID;
+ inv_wr.opcode = IB_WR_LOCAL_INV;
+ inv_wr.ex.invalidate_rkey = mr->rkey;
+ wr = &inv_wr;
+ /* Bump the key */
+ key = (u8)(mr->rkey & 0x000000FF);
+ ib_update_fast_reg_key(mr, ++key);
+ }
+
+ /* Prepare FASTREG WR */
+ memset(&fastreg_wr, 0, sizeof(fastreg_wr));
+ fastreg_wr.wr_id = ISER_FASTREG_LI_WRID;
+ fastreg_wr.opcode = IB_WR_FAST_REG_MR;
+ fastreg_wr.wr.fast_reg.iova_start = frpl->page_list[0] + offset;
+ fastreg_wr.wr.fast_reg.page_list = frpl;
+ fastreg_wr.wr.fast_reg.page_list_len = plen;
+ fastreg_wr.wr.fast_reg.page_shift = SHIFT_4K;
+ fastreg_wr.wr.fast_reg.length = size;
+ fastreg_wr.wr.fast_reg.rkey = mr->rkey;
+ fastreg_wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_READ);
+
+ if (!wr)
+ wr = &fastreg_wr;
+ else
+ wr->next = &fastreg_wr;
+
+ ret = ib_post_send(ib_conn->qp, wr, &bad_wr);
+ if (ret) {
+ iser_err("fast registration failed, ret:%d\n", ret);
+ return ret;
+ }
+ desc->reg_indicators &= ~ind;
+
+ sge->lkey = mr->lkey;
+ sge->addr = frpl->page_list[0] + offset;
+ sge->length = size;
+
+ return ret;
+}
+
+/**
+ * iser_reg_rdma_mem_fastreg - Registers memory intended for RDMA,
+ * using Fast Registration WR (if possible) obtaining rkey and va
+ *
+ * returns 0 on success, errno code on failure
+ */
+int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task,
+ enum iser_data_dir cmd_dir)
+{
+ struct iser_conn *ib_conn = iser_task->ib_conn;
+ struct iser_device *device = ib_conn->device;
+ struct ib_device *ibdev = device->ib_device;
+ struct iser_data_buf *mem = &iser_task->data[cmd_dir];
+ struct iser_regd_buf *regd_buf = &iser_task->rdma_regd[cmd_dir];
+ struct fast_reg_descriptor *desc = NULL;
+ struct ib_sge data_sge;
+ int err, aligned_len;
+ unsigned long flags;
+
+ aligned_len = iser_data_buf_aligned_len(mem, ibdev);
+ if (aligned_len != mem->dma_nents) {
+ err = fall_to_bounce_buf(iser_task, ibdev, mem,
+ &iser_task->data_copy[cmd_dir],
+ cmd_dir, aligned_len);
+ if (err) {
+ iser_err("failed to allocate bounce buffer\n");
+ return err;
+ }
+ mem = &iser_task->data_copy[cmd_dir];
+ }
+
+ if (mem->dma_nents != 1 ||
+ scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) {
+ spin_lock_irqsave(&ib_conn->lock, flags);
+ desc = list_first_entry(&ib_conn->fastreg.pool,
+ struct fast_reg_descriptor, list);
+ list_del(&desc->list);
+ spin_unlock_irqrestore(&ib_conn->lock, flags);
+ regd_buf->reg.mem_h = desc;
+ }
+
+ err = iser_fast_reg_mr(iser_task, regd_buf, mem,
+ ISER_DATA_KEY_VALID, &data_sge);
+ if (err)
+ goto err_reg;
+
+ if (scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) {
+ struct ib_sge prot_sge, sig_sge;
+
+ memset(&prot_sge, 0, sizeof(prot_sge));
+ if (scsi_prot_sg_count(iser_task->sc)) {
+ mem = &iser_task->prot[cmd_dir];
+ aligned_len = iser_data_buf_aligned_len(mem, ibdev);
+ if (aligned_len != mem->dma_nents) {
+ err = fall_to_bounce_buf(iser_task, ibdev, mem,
+ &iser_task->prot_copy[cmd_dir],
+ cmd_dir, aligned_len);
+ if (err) {
+ iser_err("failed to allocate bounce buffer\n");
+ return err;
+ }
+ mem = &iser_task->prot_copy[cmd_dir];
+ }
+
+ err = iser_fast_reg_mr(iser_task, regd_buf, mem,
+ ISER_PROT_KEY_VALID, &prot_sge);
+ if (err)
+ goto err_reg;
}
+
+ err = iser_reg_sig_mr(iser_task, desc, &data_sge,
+ &prot_sge, &sig_sge);
+ if (err) {
+ iser_err("Failed to register signature mr\n");
+ return err;
+ }
+ desc->reg_indicators |= ISER_FASTREG_PROTECTED;
+
+ regd_buf->reg.lkey = sig_sge.lkey;
+ regd_buf->reg.rkey = desc->pi_ctx->sig_mr->rkey;
+ regd_buf->reg.va = sig_sge.addr;
+ regd_buf->reg.len = sig_sge.length;
+ regd_buf->reg.is_mr = 1;
+ } else {
+ if (desc) {
+ regd_buf->reg.rkey = desc->data_mr->rkey;
+ regd_buf->reg.is_mr = 1;
+ } else {
+ regd_buf->reg.rkey = device->mr->rkey;
+ regd_buf->reg.is_mr = 0;
+ }
+
+ regd_buf->reg.lkey = data_sge.lkey;
+ regd_buf->reg.va = data_sge.addr;
+ regd_buf->reg.len = data_sge.length;
}
+
return 0;
+err_reg:
+ if (desc) {
+ spin_lock_irqsave(&ib_conn->lock, flags);
+ list_add_tail(&desc->list, &ib_conn->fastreg.pool);
+ spin_unlock_irqrestore(&ib_conn->lock, flags);
+ }
+
+ return err;
}
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 9876865732f..ea01075f9f9 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -1,6 +1,7 @@
/*
* Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
* Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
+ * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -70,32 +71,78 @@ static void iser_event_handler(struct ib_event_handler *handler,
*/
static int iser_create_device_ib_res(struct iser_device *device)
{
+ struct iser_cq_desc *cq_desc;
+ struct ib_device_attr *dev_attr = &device->dev_attr;
+ int ret, i, j;
+
+ ret = ib_query_device(device->ib_device, dev_attr);
+ if (ret) {
+ pr_warn("Query device failed for %s\n", device->ib_device->name);
+ return ret;
+ }
+
+ /* Assign function handles - based on FMR support */
+ if (device->ib_device->alloc_fmr && device->ib_device->dealloc_fmr &&
+ device->ib_device->map_phys_fmr && device->ib_device->unmap_fmr) {
+ iser_info("FMR supported, using FMR for registration\n");
+ device->iser_alloc_rdma_reg_res = iser_create_fmr_pool;
+ device->iser_free_rdma_reg_res = iser_free_fmr_pool;
+ device->iser_reg_rdma_mem = iser_reg_rdma_mem_fmr;
+ device->iser_unreg_rdma_mem = iser_unreg_mem_fmr;
+ } else
+ if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
+ iser_info("FastReg supported, using FastReg for registration\n");
+ device->iser_alloc_rdma_reg_res = iser_create_fastreg_pool;
+ device->iser_free_rdma_reg_res = iser_free_fastreg_pool;
+ device->iser_reg_rdma_mem = iser_reg_rdma_mem_fastreg;
+ device->iser_unreg_rdma_mem = iser_unreg_mem_fastreg;
+ } else {
+ iser_err("IB device does not support FMRs nor FastRegs, can't register memory\n");
+ return -1;
+ }
+
+ device->cqs_used = min(ISER_MAX_CQ, device->ib_device->num_comp_vectors);
+ iser_info("using %d CQs, device %s supports %d vectors\n",
+ device->cqs_used, device->ib_device->name,
+ device->ib_device->num_comp_vectors);
+
+ device->cq_desc = kmalloc(sizeof(struct iser_cq_desc) * device->cqs_used,
+ GFP_KERNEL);
+ if (device->cq_desc == NULL)
+ goto cq_desc_err;
+ cq_desc = device->cq_desc;
+
device->pd = ib_alloc_pd(device->ib_device);
if (IS_ERR(device->pd))
goto pd_err;
- device->rx_cq = ib_create_cq(device->ib_device,
- iser_cq_callback,
- iser_cq_event_callback,
- (void *)device,
- ISER_MAX_RX_CQ_LEN, 0);
- if (IS_ERR(device->rx_cq))
- goto rx_cq_err;
+ for (i = 0; i < device->cqs_used; i++) {
+ cq_desc[i].device = device;
+ cq_desc[i].cq_index = i;
- device->tx_cq = ib_create_cq(device->ib_device,
- NULL, iser_cq_event_callback,
- (void *)device,
- ISER_MAX_TX_CQ_LEN, 0);
+ device->rx_cq[i] = ib_create_cq(device->ib_device,
+ iser_cq_callback,
+ iser_cq_event_callback,
+ (void *)&cq_desc[i],
+ ISER_MAX_RX_CQ_LEN, i);
+ if (IS_ERR(device->rx_cq[i]))
+ goto cq_err;
- if (IS_ERR(device->tx_cq))
- goto tx_cq_err;
+ device->tx_cq[i] = ib_create_cq(device->ib_device,
+ NULL, iser_cq_event_callback,
+ (void *)&cq_desc[i],
+ ISER_MAX_TX_CQ_LEN, i);
- if (ib_req_notify_cq(device->rx_cq, IB_CQ_NEXT_COMP))
- goto cq_arm_err;
+ if (IS_ERR(device->tx_cq[i]))
+ goto cq_err;
- tasklet_init(&device->cq_tasklet,
- iser_cq_tasklet_fn,
- (unsigned long)device);
+ if (ib_req_notify_cq(device->rx_cq[i], IB_CQ_NEXT_COMP))
+ goto cq_err;
+
+ tasklet_init(&device->cq_tasklet[i],
+ iser_cq_tasklet_fn,
+ (unsigned long)&cq_desc[i]);
+ }
device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE |
@@ -113,14 +160,19 @@ static int iser_create_device_ib_res(struct iser_device *device)
handler_err:
ib_dereg_mr(device->mr);
dma_mr_err:
- tasklet_kill(&device->cq_tasklet);
-cq_arm_err:
- ib_destroy_cq(device->tx_cq);
-tx_cq_err:
- ib_destroy_cq(device->rx_cq);
-rx_cq_err:
+ for (j = 0; j < device->cqs_used; j++)
+ tasklet_kill(&device->cq_tasklet[j]);
+cq_err:
+ for (j = 0; j < i; j++) {
+ if (device->tx_cq[j])
+ ib_destroy_cq(device->tx_cq[j]);
+ if (device->rx_cq[j])
+ ib_destroy_cq(device->rx_cq[j]);
+ }
ib_dealloc_pd(device->pd);
pd_err:
+ kfree(device->cq_desc);
+cq_desc_err:
iser_err("failed to allocate an IB resource\n");
return -1;
}
@@ -131,52 +183,45 @@ pd_err:
*/
static void iser_free_device_ib_res(struct iser_device *device)
{
+ int i;
BUG_ON(device->mr == NULL);
- tasklet_kill(&device->cq_tasklet);
+ for (i = 0; i < device->cqs_used; i++) {
+ tasklet_kill(&device->cq_tasklet[i]);
+ (void)ib_destroy_cq(device->tx_cq[i]);
+ (void)ib_destroy_cq(device->rx_cq[i]);
+ device->tx_cq[i] = NULL;
+ device->rx_cq[i] = NULL;
+ }
+
(void)ib_unregister_event_handler(&device->event_handler);
(void)ib_dereg_mr(device->mr);
- (void)ib_destroy_cq(device->tx_cq);
- (void)ib_destroy_cq(device->rx_cq);
(void)ib_dealloc_pd(device->pd);
+ kfree(device->cq_desc);
+
device->mr = NULL;
- device->tx_cq = NULL;
- device->rx_cq = NULL;
device->pd = NULL;
}
/**
- * iser_create_ib_conn_res - Creates FMR pool and Queue-Pair (QP)
+ * iser_create_fmr_pool - Creates FMR pool and page_vector
*
- * returns 0 on success, -1 on failure
+ * returns 0 on success, or errno code on failure
*/
-static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
+int iser_create_fmr_pool(struct iser_conn *ib_conn, unsigned cmds_max)
{
- struct iser_device *device;
- struct ib_qp_init_attr init_attr;
- int ret = -ENOMEM;
+ struct iser_device *device = ib_conn->device;
struct ib_fmr_pool_param params;
+ int ret = -ENOMEM;
- BUG_ON(ib_conn->device == NULL);
-
- device = ib_conn->device;
-
- ib_conn->login_buf = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL);
- if (!ib_conn->login_buf)
- goto out_err;
-
- ib_conn->login_dma = ib_dma_map_single(ib_conn->device->ib_device,
- (void *)ib_conn->login_buf, ISER_RX_LOGIN_SIZE,
- DMA_FROM_DEVICE);
+ ib_conn->fmr.page_vec = kmalloc(sizeof(*ib_conn->fmr.page_vec) +
+ (sizeof(u64)*(ISCSI_ISER_SG_TABLESIZE + 1)),
+ GFP_KERNEL);
+ if (!ib_conn->fmr.page_vec)
+ return ret;
- ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) +
- (sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)),
- GFP_KERNEL);
- if (!ib_conn->page_vec)
- goto out_err;
-
- ib_conn->page_vec->pages = (u64 *) (ib_conn->page_vec + 1);
+ ib_conn->fmr.page_vec->pages = (u64 *)(ib_conn->fmr.page_vec + 1);
params.page_shift = SHIFT_4K;
/* when the first/last SG element are not start/end *
@@ -184,42 +229,260 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1;
/* make the pool size twice the max number of SCSI commands *
* the ML is expected to queue, watermark for unmap at 50% */
- params.pool_size = ISCSI_DEF_XMIT_CMDS_MAX * 2;
- params.dirty_watermark = ISCSI_DEF_XMIT_CMDS_MAX;
+ params.pool_size = cmds_max * 2;
+ params.dirty_watermark = cmds_max;
params.cache = 0;
params.flush_function = NULL;
params.access = (IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE |
IB_ACCESS_REMOTE_READ);
- ib_conn->fmr_pool = ib_create_fmr_pool(device->pd, &params);
- if (IS_ERR(ib_conn->fmr_pool)) {
- ret = PTR_ERR(ib_conn->fmr_pool);
- ib_conn->fmr_pool = NULL;
- goto out_err;
+ ib_conn->fmr.pool = ib_create_fmr_pool(device->pd, &params);
+ if (!IS_ERR(ib_conn->fmr.pool))
+ return 0;
+
+ /* no FMR => no need for page_vec */
+ kfree(ib_conn->fmr.page_vec);
+ ib_conn->fmr.page_vec = NULL;
+
+ ret = PTR_ERR(ib_conn->fmr.pool);
+ ib_conn->fmr.pool = NULL;
+ if (ret != -ENOSYS) {
+ iser_err("FMR allocation failed, err %d\n", ret);
+ return ret;
+ } else {
+ iser_warn("FMRs are not supported, using unaligned mode\n");
+ return 0;
+ }
+}
+
+/**
+ * iser_free_fmr_pool - releases the FMR pool and page vec
+ */
+void iser_free_fmr_pool(struct iser_conn *ib_conn)
+{
+ iser_info("freeing conn %p fmr pool %p\n",
+ ib_conn, ib_conn->fmr.pool);
+
+ if (ib_conn->fmr.pool != NULL)
+ ib_destroy_fmr_pool(ib_conn->fmr.pool);
+
+ ib_conn->fmr.pool = NULL;
+
+ kfree(ib_conn->fmr.page_vec);
+ ib_conn->fmr.page_vec = NULL;
+}
+
+static int
+iser_create_fastreg_desc(struct ib_device *ib_device, struct ib_pd *pd,
+ bool pi_enable, struct fast_reg_descriptor *desc)
+{
+ int ret;
+
+ desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
+ ISCSI_ISER_SG_TABLESIZE + 1);
+ if (IS_ERR(desc->data_frpl)) {
+ ret = PTR_ERR(desc->data_frpl);
+ iser_err("Failed to allocate ib_fast_reg_page_list err=%d\n",
+ ret);
+ return PTR_ERR(desc->data_frpl);
+ }
+
+ desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE + 1);
+ if (IS_ERR(desc->data_mr)) {
+ ret = PTR_ERR(desc->data_mr);
+ iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret);
+ goto fast_reg_mr_failure;
+ }
+ desc->reg_indicators |= ISER_DATA_KEY_VALID;
+
+ if (pi_enable) {
+ struct ib_mr_init_attr mr_init_attr = {0};
+ struct iser_pi_context *pi_ctx = NULL;
+
+ desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
+ if (!desc->pi_ctx) {
+ iser_err("Failed to allocate pi context\n");
+ ret = -ENOMEM;
+ goto pi_ctx_alloc_failure;
+ }
+ pi_ctx = desc->pi_ctx;
+
+ pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device,
+ ISCSI_ISER_SG_TABLESIZE);
+ if (IS_ERR(pi_ctx->prot_frpl)) {
+ ret = PTR_ERR(pi_ctx->prot_frpl);
+ iser_err("Failed to allocate prot frpl ret=%d\n",
+ ret);
+ goto prot_frpl_failure;
+ }
+
+ pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd,
+ ISCSI_ISER_SG_TABLESIZE + 1);
+ if (IS_ERR(pi_ctx->prot_mr)) {
+ ret = PTR_ERR(pi_ctx->prot_mr);
+ iser_err("Failed to allocate prot frmr ret=%d\n",
+ ret);
+ goto prot_mr_failure;
+ }
+ desc->reg_indicators |= ISER_PROT_KEY_VALID;
+
+ mr_init_attr.max_reg_descriptors = 2;
+ mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
+ pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
+ if (IS_ERR(pi_ctx->sig_mr)) {
+ ret = PTR_ERR(pi_ctx->sig_mr);
+ iser_err("Failed to allocate signature enabled mr err=%d\n",
+ ret);
+ goto sig_mr_failure;
+ }
+ desc->reg_indicators |= ISER_SIG_KEY_VALID;
+ }
+ desc->reg_indicators &= ~ISER_FASTREG_PROTECTED;
+
+ iser_dbg("Create fr_desc %p page_list %p\n",
+ desc, desc->data_frpl->page_list);
+
+ return 0;
+sig_mr_failure:
+ ib_dereg_mr(desc->pi_ctx->prot_mr);
+prot_mr_failure:
+ ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
+prot_frpl_failure:
+ kfree(desc->pi_ctx);
+pi_ctx_alloc_failure:
+ ib_dereg_mr(desc->data_mr);
+fast_reg_mr_failure:
+ ib_free_fast_reg_page_list(desc->data_frpl);
+
+ return ret;
+}
+
+/**
+ * iser_create_fastreg_pool - Creates pool of fast_reg descriptors
+ * for fast registration work requests.
+ * returns 0 on success, or errno code on failure
+ */
+int iser_create_fastreg_pool(struct iser_conn *ib_conn, unsigned cmds_max)
+{
+ struct iser_device *device = ib_conn->device;
+ struct fast_reg_descriptor *desc;
+ int i, ret;
+
+ INIT_LIST_HEAD(&ib_conn->fastreg.pool);
+ ib_conn->fastreg.pool_size = 0;
+ for (i = 0; i < cmds_max; i++) {
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc) {
+ iser_err("Failed to allocate a new fast_reg descriptor\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = iser_create_fastreg_desc(device->ib_device, device->pd,
+ ib_conn->pi_support, desc);
+ if (ret) {
+ iser_err("Failed to create fastreg descriptor err=%d\n",
+ ret);
+ kfree(desc);
+ goto err;
+ }
+
+ list_add_tail(&desc->list, &ib_conn->fastreg.pool);
+ ib_conn->fastreg.pool_size++;
}
+ return 0;
+
+err:
+ iser_free_fastreg_pool(ib_conn);
+ return ret;
+}
+
+/**
+ * iser_free_fastreg_pool - releases the pool of fast_reg descriptors
+ */
+void iser_free_fastreg_pool(struct iser_conn *ib_conn)
+{
+ struct fast_reg_descriptor *desc, *tmp;
+ int i = 0;
+
+ if (list_empty(&ib_conn->fastreg.pool))
+ return;
+
+ iser_info("freeing conn %p fr pool\n", ib_conn);
+
+ list_for_each_entry_safe(desc, tmp, &ib_conn->fastreg.pool, list) {
+ list_del(&desc->list);
+ ib_free_fast_reg_page_list(desc->data_frpl);
+ ib_dereg_mr(desc->data_mr);
+ if (desc->pi_ctx) {
+ ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
+ ib_dereg_mr(desc->pi_ctx->prot_mr);
+ ib_destroy_mr(desc->pi_ctx->sig_mr);
+ kfree(desc->pi_ctx);
+ }
+ kfree(desc);
+ ++i;
+ }
+
+ if (i < ib_conn->fastreg.pool_size)
+ iser_warn("pool still has %d regions registered\n",
+ ib_conn->fastreg.pool_size - i);
+}
+
+/**
+ * iser_create_ib_conn_res - Queue-Pair (QP)
+ *
+ * returns 0 on success, -1 on failure
+ */
+static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
+{
+ struct iser_device *device;
+ struct ib_qp_init_attr init_attr;
+ int ret = -ENOMEM;
+ int index, min_index = 0;
+
+ BUG_ON(ib_conn->device == NULL);
+
+ device = ib_conn->device;
+
memset(&init_attr, 0, sizeof init_attr);
+ mutex_lock(&ig.connlist_mutex);
+ /* select the CQ with the minimal number of usages */
+ for (index = 0; index < device->cqs_used; index++)
+ if (device->cq_active_qps[index] <
+ device->cq_active_qps[min_index])
+ min_index = index;
+ device->cq_active_qps[min_index]++;
+ mutex_unlock(&ig.connlist_mutex);
+ iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn);
+
init_attr.event_handler = iser_qp_event_callback;
init_attr.qp_context = (void *)ib_conn;
- init_attr.send_cq = device->tx_cq;
- init_attr.recv_cq = device->rx_cq;
- init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS;
+ init_attr.send_cq = device->tx_cq[min_index];
+ init_attr.recv_cq = device->rx_cq[min_index];
init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS;
init_attr.cap.max_send_sge = 2;
init_attr.cap.max_recv_sge = 1;
init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
init_attr.qp_type = IB_QPT_RC;
+ if (ib_conn->pi_support) {
+ init_attr.cap.max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS;
+ init_attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
+ } else {
+ init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS;
+ }
ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
if (ret)
goto out_err;
ib_conn->qp = ib_conn->cma_id->qp;
- iser_err("setting conn %p cma_id %p: fmr_pool %p qp %p\n",
- ib_conn, ib_conn->cma_id,
- ib_conn->fmr_pool, ib_conn->cma_id->qp);
+ iser_info("setting conn %p cma_id %p qp %p\n",
+ ib_conn, ib_conn->cma_id,
+ ib_conn->cma_id->qp);
return ret;
out_err:
@@ -228,32 +491,28 @@ out_err:
}
/**
- * releases the FMR pool, QP and CMA ID objects, returns 0 on success,
+ * releases the QP objects, returns 0 on success,
* -1 on failure
*/
-static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id)
+static int iser_free_ib_conn_res(struct iser_conn *ib_conn)
{
+ int cq_index;
BUG_ON(ib_conn == NULL);
- iser_err("freeing conn %p cma_id %p fmr pool %p qp %p\n",
- ib_conn, ib_conn->cma_id,
- ib_conn->fmr_pool, ib_conn->qp);
+ iser_info("freeing conn %p cma_id %p qp %p\n",
+ ib_conn, ib_conn->cma_id,
+ ib_conn->qp);
/* qp is created only once both addr & route are resolved */
- if (ib_conn->fmr_pool != NULL)
- ib_destroy_fmr_pool(ib_conn->fmr_pool);
- if (ib_conn->qp != NULL)
- rdma_destroy_qp(ib_conn->cma_id);
+ if (ib_conn->qp != NULL) {
+ cq_index = ((struct iser_cq_desc *)ib_conn->qp->recv_cq->cq_context)->cq_index;
+ ib_conn->device->cq_active_qps[cq_index]--;
- /* if cma handler context, the caller acts s.t the cma destroy the id */
- if (ib_conn->cma_id != NULL && can_destroy_id)
- rdma_destroy_id(ib_conn->cma_id);
+ rdma_destroy_qp(ib_conn->cma_id);
+ }
- ib_conn->fmr_pool = NULL;
ib_conn->qp = NULL;
- ib_conn->cma_id = NULL;
- kfree(ib_conn->page_vec);
return 0;
}
@@ -300,7 +559,7 @@ static void iser_device_try_release(struct iser_device *device)
{
mutex_lock(&ig.device_list_mutex);
device->refcount--;
- iser_err("device %p refcount %d\n",device,device->refcount);
+ iser_info("device %p refcount %d\n", device, device->refcount);
if (!device->refcount) {
iser_free_device_ib_res(device);
list_del(&device->ig_list);
@@ -322,39 +581,46 @@ static int iser_conn_state_comp_exch(struct iser_conn *ib_conn,
return ret;
}
+void iser_release_work(struct work_struct *work)
+{
+ struct iser_conn *ib_conn;
+
+ ib_conn = container_of(work, struct iser_conn, release_work);
+
+ /* wait for .conn_stop callback */
+ wait_for_completion(&ib_conn->stop_completion);
+
+ /* wait for the qp`s post send and post receive buffers to empty */
+ wait_event_interruptible(ib_conn->wait,
+ ib_conn->state == ISER_CONN_DOWN);
+
+ iser_conn_release(ib_conn);
+}
+
/**
* Frees all conn objects and deallocs conn descriptor
*/
-static void iser_conn_release(struct iser_conn *ib_conn, int can_destroy_id)
+void iser_conn_release(struct iser_conn *ib_conn)
{
struct iser_device *device = ib_conn->device;
- BUG_ON(ib_conn->state != ISER_CONN_DOWN);
+ BUG_ON(ib_conn->state == ISER_CONN_UP);
mutex_lock(&ig.connlist_mutex);
list_del(&ib_conn->conn_list);
mutex_unlock(&ig.connlist_mutex);
iser_free_rx_descriptors(ib_conn);
- iser_free_ib_conn_res(ib_conn, can_destroy_id);
+ iser_free_ib_conn_res(ib_conn);
ib_conn->device = NULL;
/* on EVENT_ADDR_ERROR there's no device yet for this conn */
if (device != NULL)
iser_device_try_release(device);
- iscsi_destroy_endpoint(ib_conn->ep);
-}
-
-void iser_conn_get(struct iser_conn *ib_conn)
-{
- atomic_inc(&ib_conn->refcount);
-}
-
-int iser_conn_put(struct iser_conn *ib_conn, int can_destroy_id)
-{
- if (atomic_dec_and_test(&ib_conn->refcount)) {
- iser_conn_release(ib_conn, can_destroy_id);
- return 1;
+ /* if cma handler context, the caller actually destroy the id */
+ if (ib_conn->cma_id != NULL) {
+ rdma_destroy_id(ib_conn->cma_id);
+ ib_conn->cma_id = NULL;
}
- return 0;
+ iscsi_destroy_endpoint(ib_conn->ep);
}
/**
@@ -374,24 +640,19 @@ void iser_conn_terminate(struct iser_conn *ib_conn)
if (err)
iser_err("Failed to disconnect, conn: 0x%p err %d\n",
ib_conn,err);
-
- wait_event_interruptible(ib_conn->wait,
- ib_conn->state == ISER_CONN_DOWN);
-
- iser_conn_put(ib_conn, 1); /* deref ib conn deallocate */
}
-static int iser_connect_error(struct rdma_cm_id *cma_id)
+static void iser_connect_error(struct rdma_cm_id *cma_id)
{
struct iser_conn *ib_conn;
+
ib_conn = (struct iser_conn *)cma_id->context;
ib_conn->state = ISER_CONN_DOWN;
wake_up_interruptible(&ib_conn->wait);
- return iser_conn_put(ib_conn, 0); /* deref ib conn's cma id */
}
-static int iser_addr_handler(struct rdma_cm_id *cma_id)
+static void iser_addr_handler(struct rdma_cm_id *cma_id)
{
struct iser_device *device;
struct iser_conn *ib_conn;
@@ -400,25 +661,39 @@ static int iser_addr_handler(struct rdma_cm_id *cma_id)
device = iser_device_find_by_ib_device(cma_id);
if (!device) {
iser_err("device lookup/creation failed\n");
- return iser_connect_error(cma_id);
+ iser_connect_error(cma_id);
+ return;
}
ib_conn = (struct iser_conn *)cma_id->context;
ib_conn->device = device;
+ /* connection T10-PI support */
+ if (iser_pi_enable) {
+ if (!(device->dev_attr.device_cap_flags &
+ IB_DEVICE_SIGNATURE_HANDOVER)) {
+ iser_warn("T10-PI requested but not supported on %s, "
+ "continue without T10-PI\n",
+ ib_conn->device->ib_device->name);
+ ib_conn->pi_support = false;
+ } else {
+ ib_conn->pi_support = true;
+ }
+ }
+
ret = rdma_resolve_route(cma_id, 1000);
if (ret) {
iser_err("resolve route failed: %d\n", ret);
- return iser_connect_error(cma_id);
+ iser_connect_error(cma_id);
+ return;
}
-
- return 0;
}
-static int iser_route_handler(struct rdma_cm_id *cma_id)
+static void iser_route_handler(struct rdma_cm_id *cma_id)
{
struct rdma_conn_param conn_param;
int ret;
+ struct iser_cm_hdr req_hdr;
ret = iser_create_ib_conn_res((struct iser_conn *)cma_id->context);
if (ret)
@@ -430,39 +705,52 @@ static int iser_route_handler(struct rdma_cm_id *cma_id)
conn_param.retry_count = 7;
conn_param.rnr_retry_count = 6;
+ memset(&req_hdr, 0, sizeof(req_hdr));
+ req_hdr.flags = (ISER_ZBVA_NOT_SUPPORTED |
+ ISER_SEND_W_INV_NOT_SUPPORTED);
+ conn_param.private_data = (void *)&req_hdr;
+ conn_param.private_data_len = sizeof(struct iser_cm_hdr);
+
ret = rdma_connect(cma_id, &conn_param);
if (ret) {
iser_err("failure connecting: %d\n", ret);
goto failure;
}
- return 0;
+ return;
failure:
- return iser_connect_error(cma_id);
+ iser_connect_error(cma_id);
}
static void iser_connected_handler(struct rdma_cm_id *cma_id)
{
struct iser_conn *ib_conn;
+ struct ib_qp_attr attr;
+ struct ib_qp_init_attr init_attr;
+
+ (void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr);
+ iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num);
ib_conn = (struct iser_conn *)cma_id->context;
- ib_conn->state = ISER_CONN_UP;
- wake_up_interruptible(&ib_conn->wait);
+ if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_PENDING, ISER_CONN_UP))
+ wake_up_interruptible(&ib_conn->wait);
}
-static int iser_disconnected_handler(struct rdma_cm_id *cma_id)
+static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
{
struct iser_conn *ib_conn;
- int ret;
ib_conn = (struct iser_conn *)cma_id->context;
/* getting here when the state is UP means that the conn is being *
* terminated asynchronously from the iSCSI layer's perspective. */
if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP,
- ISER_CONN_TERMINATING))
- iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
- ISCSI_ERR_CONN_FAILED);
+ ISER_CONN_TERMINATING)){
+ if (ib_conn->iscsi_conn)
+ iscsi_conn_failure(ib_conn->iscsi_conn, ISCSI_ERR_CONN_FAILED);
+ else
+ iser_err("iscsi_iser connection isn't bound\n");
+ }
/* Complete the termination process if no posts are pending */
if (ib_conn->post_recv_buf_count == 0 &&
@@ -470,24 +758,19 @@ static int iser_disconnected_handler(struct rdma_cm_id *cma_id)
ib_conn->state = ISER_CONN_DOWN;
wake_up_interruptible(&ib_conn->wait);
}
-
- ret = iser_conn_put(ib_conn, 0); /* deref ib conn's cma id */
- return ret;
}
static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
- int ret = 0;
-
- iser_err("event %d status %d conn %p id %p\n",
- event->event, event->status, cma_id->context, cma_id);
+ iser_info("event %d status %d conn %p id %p\n",
+ event->event, event->status, cma_id->context, cma_id);
switch (event->event) {
case RDMA_CM_EVENT_ADDR_RESOLVED:
- ret = iser_addr_handler(cma_id);
+ iser_addr_handler(cma_id);
break;
case RDMA_CM_EVENT_ROUTE_RESOLVED:
- ret = iser_route_handler(cma_id);
+ iser_route_handler(cma_id);
break;
case RDMA_CM_EVENT_ESTABLISHED:
iser_connected_handler(cma_id);
@@ -497,18 +780,18 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
case RDMA_CM_EVENT_CONNECT_ERROR:
case RDMA_CM_EVENT_UNREACHABLE:
case RDMA_CM_EVENT_REJECTED:
- ret = iser_connect_error(cma_id);
+ iser_connect_error(cma_id);
break;
case RDMA_CM_EVENT_DISCONNECTED:
case RDMA_CM_EVENT_DEVICE_REMOVAL:
case RDMA_CM_EVENT_ADDR_CHANGE:
- ret = iser_disconnected_handler(cma_id);
+ iser_disconnected_handler(cma_id);
break;
default:
iser_err("Unexpected RDMA CM event (%d)\n", event->event);
break;
}
- return ret;
+ return 0;
}
void iser_conn_init(struct iser_conn *ib_conn)
@@ -517,7 +800,7 @@ void iser_conn_init(struct iser_conn *ib_conn)
init_waitqueue_head(&ib_conn->wait);
ib_conn->post_recv_buf_count = 0;
atomic_set(&ib_conn->post_send_buf_count, 0);
- atomic_set(&ib_conn->refcount, 1); /* ref ib conn allocation */
+ init_completion(&ib_conn->stop_completion);
INIT_LIST_HEAD(&ib_conn->conn_list);
spin_lock_init(&ib_conn->lock);
}
@@ -540,15 +823,14 @@ int iser_connect(struct iser_conn *ib_conn,
/* the device is known only --after-- address resolution */
ib_conn->device = NULL;
- iser_err("connecting to: %pI4, port 0x%x\n",
- &dst_addr->sin_addr, dst_addr->sin_port);
+ iser_info("connecting to: %pI4, port 0x%x\n",
+ &dst_addr->sin_addr, dst_addr->sin_port);
ib_conn->state = ISER_CONN_PENDING;
- iser_conn_get(ib_conn); /* ref ib conn's cma id */
ib_conn->cma_id = rdma_create_id(iser_cma_handler,
(void *)ib_conn,
- RDMA_PS_TCP);
+ RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(ib_conn->cma_id)) {
err = PTR_ERR(ib_conn->cma_id);
iser_err("rdma_create_id failed: %d\n", err);
@@ -583,7 +865,7 @@ id_failure:
addr_failure:
ib_conn->state = ISER_CONN_DOWN;
connect_failure:
- iser_conn_release(ib_conn, 1);
+ iser_conn_release(ib_conn);
return err;
}
@@ -604,7 +886,7 @@ int iser_reg_page_vec(struct iser_conn *ib_conn,
page_list = page_vec->pages;
io_addr = page_list[0];
- mem = ib_fmr_pool_map_phys(ib_conn->fmr_pool,
+ mem = ib_fmr_pool_map_phys(ib_conn->fmr.pool,
page_list,
page_vec->length,
io_addr);
@@ -619,7 +901,7 @@ int iser_reg_page_vec(struct iser_conn *ib_conn,
mem_reg->rkey = mem->fmr->rkey;
mem_reg->len = page_vec->length * SIZE_4K;
mem_reg->va = io_addr;
- mem_reg->is_fmr = 1;
+ mem_reg->is_mr = 1;
mem_reg->mem_h = (void *)mem;
mem_reg->va += page_vec->offset;
@@ -637,12 +919,18 @@ int iser_reg_page_vec(struct iser_conn *ib_conn,
}
/**
- * Unregister (previosuly registered) memory.
+ * Unregister (previosuly registered using FMR) memory.
+ * If memory is non-FMR does nothing.
*/
-void iser_unreg_mem(struct iser_mem_reg *reg)
+void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
+ enum iser_data_dir cmd_dir)
{
+ struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
int ret;
+ if (!reg->is_mr)
+ return;
+
iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n",reg->mem_h);
ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h);
@@ -652,17 +940,34 @@ void iser_unreg_mem(struct iser_mem_reg *reg)
reg->mem_h = NULL;
}
+void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
+ enum iser_data_dir cmd_dir)
+{
+ struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
+ struct iser_conn *ib_conn = iser_task->ib_conn;
+ struct fast_reg_descriptor *desc = reg->mem_h;
+
+ if (!reg->is_mr)
+ return;
+
+ reg->mem_h = NULL;
+ reg->is_mr = 0;
+ spin_lock_bh(&ib_conn->lock);
+ list_add_tail(&desc->list, &ib_conn->fastreg.pool);
+ spin_unlock_bh(&ib_conn->lock);
+}
+
int iser_post_recvl(struct iser_conn *ib_conn)
{
struct ib_recv_wr rx_wr, *rx_wr_failed;
struct ib_sge sge;
int ib_ret;
- sge.addr = ib_conn->login_dma;
+ sge.addr = ib_conn->login_resp_dma;
sge.length = ISER_RX_LOGIN_SIZE;
sge.lkey = ib_conn->device->mr->lkey;
- rx_wr.wr_id = (unsigned long)ib_conn->login_buf;
+ rx_wr.wr_id = (unsigned long)ib_conn->login_resp_buf;
rx_wr.sg_list = &sge;
rx_wr.num_sge = 1;
rx_wr.next = NULL;
@@ -689,7 +994,7 @@ int iser_post_recvm(struct iser_conn *ib_conn, int count)
rx_wr->sg_list = &rx_desc->rx_sg;
rx_wr->num_sge = 1;
rx_wr->next = rx_wr + 1;
- my_rx_head = (my_rx_head + 1) & (ISER_QP_MAX_RECV_DTOS - 1);
+ my_rx_head = (my_rx_head + 1) & ib_conn->qp_max_recv_dtos_mask;
}
rx_wr--;
@@ -749,7 +1054,7 @@ static void iser_handle_comp_error(struct iser_tx_desc *desc,
* perspective. */
if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP,
ISER_CONN_TERMINATING))
- iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
+ iscsi_conn_failure(ib_conn->iscsi_conn,
ISCSI_ERR_CONN_FAILED);
/* no more non completed posts to the QP, complete the
@@ -759,9 +1064,9 @@ static void iser_handle_comp_error(struct iser_tx_desc *desc,
}
}
-static int iser_drain_tx_cq(struct iser_device *device)
+static int iser_drain_tx_cq(struct iser_device *device, int cq_index)
{
- struct ib_cq *cq = device->tx_cq;
+ struct ib_cq *cq = device->tx_cq[cq_index];
struct ib_wc wc;
struct iser_tx_desc *tx_desc;
struct iser_conn *ib_conn;
@@ -778,9 +1083,11 @@ static int iser_drain_tx_cq(struct iser_device *device)
IB_WC_SEND, wc.opcode);
} else {
iser_err("tx id %llx status %d vend_err %x\n",
- wc.wr_id, wc.status, wc.vendor_err);
- atomic_dec(&ib_conn->post_send_buf_count);
- iser_handle_comp_error(tx_desc, ib_conn);
+ wc.wr_id, wc.status, wc.vendor_err);
+ if (wc.wr_id != ISER_FASTREG_LI_WRID) {
+ atomic_dec(&ib_conn->post_send_buf_count);
+ iser_handle_comp_error(tx_desc, ib_conn);
+ }
}
completed_tx++;
}
@@ -790,14 +1097,20 @@ static int iser_drain_tx_cq(struct iser_device *device)
static void iser_cq_tasklet_fn(unsigned long data)
{
- struct iser_device *device = (struct iser_device *)data;
- struct ib_cq *cq = device->rx_cq;
+ struct iser_cq_desc *cq_desc = (struct iser_cq_desc *)data;
+ struct iser_device *device = cq_desc->device;
+ int cq_index = cq_desc->cq_index;
+ struct ib_cq *cq = device->rx_cq[cq_index];
struct ib_wc wc;
struct iser_rx_desc *desc;
unsigned long xfer_len;
struct iser_conn *ib_conn;
- int completed_tx, completed_rx;
- completed_tx = completed_rx = 0;
+ int completed_tx, completed_rx = 0;
+
+ /* First do tx drain, so in a case where we have rx flushes and a successful
+ * tx completion we will still go through completion error handling.
+ */
+ completed_tx = iser_drain_tx_cq(device, cq_index);
while (ib_poll_cq(cq, 1, &wc) == 1) {
desc = (struct iser_rx_desc *) (unsigned long) wc.wr_id;
@@ -819,19 +1132,68 @@ static void iser_cq_tasklet_fn(unsigned long data)
}
completed_rx++;
if (!(completed_rx & 63))
- completed_tx += iser_drain_tx_cq(device);
+ completed_tx += iser_drain_tx_cq(device, cq_index);
}
/* #warning "it is assumed here that arming CQ only once its empty" *
* " would not cause interrupts to be missed" */
ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
- completed_tx += iser_drain_tx_cq(device);
iser_dbg("got %d rx %d tx completions\n", completed_rx, completed_tx);
}
static void iser_cq_callback(struct ib_cq *cq, void *cq_context)
{
- struct iser_device *device = (struct iser_device *)cq_context;
+ struct iser_cq_desc *cq_desc = (struct iser_cq_desc *)cq_context;
+ struct iser_device *device = cq_desc->device;
+ int cq_index = cq_desc->cq_index;
- tasklet_schedule(&device->cq_tasklet);
+ tasklet_schedule(&device->cq_tasklet[cq_index]);
+}
+
+u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
+ enum iser_data_dir cmd_dir, sector_t *sector)
+{
+ struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
+ struct fast_reg_descriptor *desc = reg->mem_h;
+ unsigned long sector_size = iser_task->sc->device->sector_size;
+ struct ib_mr_status mr_status;
+ int ret;
+
+ if (desc && desc->reg_indicators & ISER_FASTREG_PROTECTED) {
+ desc->reg_indicators &= ~ISER_FASTREG_PROTECTED;
+ ret = ib_check_mr_status(desc->pi_ctx->sig_mr,
+ IB_MR_CHECK_SIG_STATUS, &mr_status);
+ if (ret) {
+ pr_err("ib_check_mr_status failed, ret %d\n", ret);
+ goto err;
+ }
+
+ if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
+ sector_t sector_off = mr_status.sig_err.sig_err_offset;
+
+ do_div(sector_off, sector_size + 8);
+ *sector = scsi_get_lba(iser_task->sc) + sector_off;
+
+ pr_err("PI error found type %d at sector %llx "
+ "expected %x vs actual %x\n",
+ mr_status.sig_err.err_type,
+ (unsigned long long)*sector,
+ mr_status.sig_err.expected,
+ mr_status.sig_err.actual);
+
+ switch (mr_status.sig_err.err_type) {
+ case IB_SIG_BAD_GUARD:
+ return 0x1;
+ case IB_SIG_BAD_REFTAG:
+ return 0x3;
+ case IB_SIG_BAD_APPTAG:
+ return 0x2;
+ }
+ }
+ }
+
+ return 0;
+err:
+ /* Not alot we can do here, return ambiguous guard error */
+ return 0x1;
}