From 3f0dd4194b3866c605906130f57d536e48287d1c Mon Sep 17 00:00:00 2001 From: Vladimir Sokolovsky Date: Thu, 19 Jan 2017 13:03:40 +0200 Subject: [PATCH] Updated ISER and SRP backports Signed-off-by: Vladimir Sokolovsky --- patches/0008-BACKPORT-ib_srp.patch | 198 ++++++-- patches/0009-BACKPORT-ib_iser.patch | 724 +++++++++++++++++++++++++++- 2 files changed, 884 insertions(+), 38 deletions(-) diff --git a/patches/0008-BACKPORT-ib_srp.patch b/patches/0008-BACKPORT-ib_srp.patch index adae045..a7918ca 100644 --- a/patches/0008-BACKPORT-ib_srp.patch +++ b/patches/0008-BACKPORT-ib_srp.patch @@ -1,30 +1,45 @@ From: Israel Rukshin Subject: [PATCH] BACKPORT: ib_srp +Change-Id: Ic90bc43f6bd61818530da7fb700962a8e1ef4aa5 Signed-off-by: Israel Rukshin --- - drivers/infiniband/ulp/srp/ib_srp.c | 105 ++++++++++++++++++++++++++++++++++++ - drivers/infiniband/ulp/srp/ib_srp.h | 7 +++ - 2 files changed, 112 insertions(+) + drivers/infiniband/ulp/srp/ib_srp.c | 175 ++++++++++++++++++++++++++++++++++++ + drivers/infiniband/ulp/srp/ib_srp.h | 7 ++ + 2 files changed, 182 insertions(+) diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index xxxxxxx..xxxxxxx xxxxxx --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c -@@ -129,10 +129,12 @@ MODULE_PARM_DESC(dev_loss_tmo, - " if fast_io_fail_tmo has not been set. \"off\" means that" - " this functionality is disabled."); +@@ -81,8 +81,13 @@ MODULE_PARM_DESC(cmd_sg_entries, + "Default number of gather/scatter entries in the SRP command (default is 12, max 255)"); + + module_param(indirect_sg_entries, uint, 0444); ++#ifdef HAVE_SG_MAX_SEGMENTS + MODULE_PARM_DESC(indirect_sg_entries, + "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")"); ++#else ++MODULE_PARM_DESC(indirect_sg_entries, ++ "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")"); ++#endif + + module_param(allow_ext_sg, bool, 0444); + MODULE_PARM_DESC(allow_ext_sg, +@@ -131,8 +136,12 @@ MODULE_PARM_DESC(dev_loss_tmo, -+#ifdef HAVE_BLK_MQ_UNIQUE_TAG static unsigned ch_count; module_param(ch_count, uint, 0444); ++#ifdef HAVE_BLK_MQ_UNIQUE_TAG MODULE_PARM_DESC(ch_count, "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA."); ++#else ++MODULE_PARM_DESC(ch_count, "Number of RDMA channels to use for communication with an SRP target. [deprecated (using 1 channel)]"); +#endif static void srp_add_one(struct ib_device *device); static void srp_remove_one(struct ib_device *device, void *client_data); -@@ -846,6 +848,9 @@ static int srp_alloc_req_data(struct srp_rdma_ch *ch) +@@ -846,6 +855,9 @@ static int srp_alloc_req_data(struct srp_rdma_ch *ch) dma_addr_t dma_addr; int i, ret = -ENOMEM; @@ -34,7 +49,7 @@ index xxxxxxx..xxxxxxx xxxxxx ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring), GFP_KERNEL); if (!ch->req_ring) -@@ -877,6 +882,10 @@ static int srp_alloc_req_data(struct srp_rdma_ch *ch) +@@ -877,6 +889,10 @@ static int srp_alloc_req_data(struct srp_rdma_ch *ch) goto out; req->indirect_dma_addr = dma_addr; @@ -45,7 +60,7 @@ index xxxxxxx..xxxxxxx xxxxxx } ret = 0; -@@ -1130,6 +1139,9 @@ static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req, +@@ -1130,6 +1146,9 @@ static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req, spin_lock_irqsave(&ch->lock, flags); ch->req_lim += req_lim_delta; @@ -55,7 +70,7 @@ index xxxxxxx..xxxxxxx xxxxxx spin_unlock_irqrestore(&ch->lock, flags); } -@@ -1874,11 +1886,16 @@ static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp) +@@ -1874,11 +1893,16 @@ static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp) ch->tsk_mgmt_status = rsp->data[3]; complete(&ch->tsk_mgmt_done); } else { @@ -72,7 +87,21 @@ index xxxxxxx..xxxxxxx xxxxxx if (!scmnd) { shost_printk(KERN_ERR, target->scsi_host, "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n", -@@ -2084,8 +2101,10 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) +@@ -1974,8 +1998,13 @@ static void srp_process_aer_req(struct srp_rdma_ch *ch, + }; + s32 delta = be32_to_cpu(req->req_lim_delta); + ++#ifdef HAVE_SCSI_DEVICE_U64_LUN + shost_printk(KERN_ERR, target->scsi_host, PFX + "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun)); ++#else ++ shost_printk(KERN_ERR, target->scsi_host, PFX ++ "ignoring AER for LUN %u\n", scsilun_to_int(&req->lun)); ++#endif + + if (srp_response_common(ch, delta, &rsp, sizeof(rsp))) + shost_printk(KERN_ERR, target->scsi_host, PFX +@@ -2084,8 +2113,10 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) struct srp_cmd *cmd; struct ib_device *dev; unsigned long flags; @@ -83,7 +112,7 @@ index xxxxxxx..xxxxxxx xxxxxx int len, ret; const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler; -@@ -2102,6 +2121,7 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) +@@ -2102,6 +2133,7 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) if (unlikely(scmnd->result)) goto err; @@ -91,7 +120,7 @@ index xxxxxxx..xxxxxxx xxxxxx WARN_ON_ONCE(scmnd->request->tag < 0); tag = blk_mq_unique_tag(scmnd->request); ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)]; -@@ -2109,15 +2129,26 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) +@@ -2109,15 +2141,27 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n", dev_name(&shost->shost_gendev), tag, idx, target->req_ring_size); @@ -101,9 +130,9 @@ index xxxxxxx..xxxxxxx xxxxxx spin_lock_irqsave(&ch->lock, flags); iu = __srp_get_tx_iu(ch, SRP_IU_CMD); ++#ifdef HAVE_BLK_MQ_UNIQUE_TAG spin_unlock_irqrestore(&ch->lock, flags); -+#ifdef HAVE_BLK_MQ_UNIQUE_TAG if (!iu) goto err; @@ -114,11 +143,12 @@ index xxxxxxx..xxxxxxx xxxxxx + + req = list_first_entry(&ch->free_reqs, struct srp_request, list); + list_del(&req->list); ++ spin_unlock_irqrestore(&ch->lock, flags); +#endif dev = target->srp_host->srp_dev->dev; ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len, DMA_TO_DEVICE); -@@ -2129,7 +2160,11 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) +@@ -2129,7 +2173,11 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) cmd->opcode = SRP_CMD; int_to_scsilun(scmnd->device->lun, &cmd->lun); @@ -130,7 +160,7 @@ index xxxxxxx..xxxxxxx xxxxxx memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); req->scmnd = scmnd; -@@ -2178,6 +2213,14 @@ err_iu: +@@ -2178,6 +2226,14 @@ err_iu: */ req->scmnd = NULL; @@ -145,7 +175,39 @@ index xxxxxxx..xxxxxxx xxxxxx err: if (scmnd->result) { scmnd->scsi_done(scmnd); -@@ -2500,13 +2543,40 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) +@@ -2493,6 +2549,31 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) + return 0; + } + ++#if defined(HAVE_SCSI_HOST_TEMPLATE_CHANGE_QUEUE_TYPE) && \ ++ !defined(HAVE_SCSI_TCQ_SCSI_CHANGE_QUEUE_TYPE) ++/** ++ * srp_change_queue_type - changing device queue tag type ++ * @sdev: scsi device struct ++ * @tag_type: requested tag type ++ * ++ * Returns queue tag type. ++ */ ++static int ++srp_change_queue_type(struct scsi_device *sdev, int tag_type) ++{ ++ if (sdev->tagged_supported) { ++ scsi_set_tag_type(sdev, tag_type); ++ if (tag_type) ++ scsi_activate_tcq(sdev, sdev->queue_depth); ++ else ++ scsi_deactivate_tcq(sdev, sdev->queue_depth); ++ } else ++ tag_type = 0; ++ ++ return tag_type; ++} ++#endif ++ + /** + * srp_change_queue_depth - setting device queue depth + * @sdev: scsi device struct +@@ -2500,13 +2581,40 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) * * Returns queue depth. */ @@ -155,12 +217,12 @@ index xxxxxxx..xxxxxxx xxxxxx { if (!sdev->tagged_supported) qdepth = 1; -+#ifdef HAVE_SCSCI_CHANGE_QUEUE_DEPTH ++#ifdef HAVE_SCSI_CHANGE_QUEUE_DEPTH return scsi_change_queue_depth(sdev, qdepth); +#else + scsi_adjust_queue_depth(sdev, qdepth); + return sdev->queue_depth; -+#endif //HAVE_SCSCI_CHANGE_QUEUE_DEPTH ++#endif //HAVE_SCSI_CHANGE_QUEUE_DEPTH } +#else +static int @@ -186,7 +248,7 @@ index xxxxxxx..xxxxxxx xxxxxx static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun, u8 func) -@@ -2569,8 +2639,10 @@ static int srp_abort(struct scsi_cmnd *scmnd) +@@ -2569,8 +2677,10 @@ static int srp_abort(struct scsi_cmnd *scmnd) { struct srp_target_port *target = host_to_target(scmnd->device->host); struct srp_request *req = (struct srp_request *) scmnd->host_scribble; @@ -197,7 +259,7 @@ index xxxxxxx..xxxxxxx xxxxxx struct srp_rdma_ch *ch; int ret; -@@ -2578,6 +2650,7 @@ static int srp_abort(struct scsi_cmnd *scmnd) +@@ -2578,6 +2688,7 @@ static int srp_abort(struct scsi_cmnd *scmnd) if (!req) return SUCCESS; @@ -205,7 +267,7 @@ index xxxxxxx..xxxxxxx xxxxxx tag = blk_mq_unique_tag(scmnd->request); ch_idx = blk_mq_unique_tag_to_hwq(tag); if (WARN_ON_ONCE(ch_idx >= target->ch_count)) -@@ -2589,6 +2662,16 @@ static int srp_abort(struct scsi_cmnd *scmnd) +@@ -2589,6 +2700,16 @@ static int srp_abort(struct scsi_cmnd *scmnd) "Sending SRP abort for tag %#x\n", tag); if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun, SRP_TSK_ABORT_TASK) == 0) @@ -222,7 +284,15 @@ index xxxxxxx..xxxxxxx xxxxxx ret = SUCCESS; else if (target->rport->state == SRP_RPORT_LOST) ret = FAST_IO_FAIL; -@@ -2645,8 +2728,12 @@ static int srp_slave_alloc(struct scsi_device *sdev) +@@ -2637,6 +2758,7 @@ static int srp_reset_host(struct scsi_cmnd *scmnd) + return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED; + } + ++#if defined(HAVE_QUEUE_FLAG_SG_GAPS) || defined(HAVE_BLK_QUEUE_VIRT_BOUNDARY) + static int srp_slave_alloc(struct scsi_device *sdev) + { + struct Scsi_Host *shost = sdev->host; +@@ -2645,11 +2767,16 @@ static int srp_slave_alloc(struct scsi_device *sdev) struct ib_device *ibdev = srp_dev->dev; if (!(ibdev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)) @@ -235,17 +305,48 @@ index xxxxxxx..xxxxxxx xxxxxx return 0; } -@@ -2857,7 +2944,9 @@ static struct scsi_host_template srp_template = { ++#endif + + static int srp_slave_configure(struct scsi_device *sdev) + { +@@ -2842,11 +2969,20 @@ static struct scsi_host_template srp_template = { + .module = THIS_MODULE, + .name = "InfiniBand SRP initiator", + .proc_name = DRV_NAME, ++#if defined(HAVE_QUEUE_FLAG_SG_GAPS) || defined(HAVE_BLK_QUEUE_VIRT_BOUNDARY) + .slave_alloc = srp_slave_alloc, ++#endif + .slave_configure = srp_slave_configure, + .info = srp_target_info, + .queuecommand = srp_queuecommand, + .change_queue_depth = srp_change_queue_depth, ++#ifdef HAVE_SCSI_HOST_TEMPLATE_CHANGE_QUEUE_TYPE ++#ifdef HAVE_SCSI_TCQ_SCSI_CHANGE_QUEUE_TYPE ++ .change_queue_type = scsi_change_queue_type, ++#else ++ .change_queue_type = srp_change_queue_type, ++#endif ++#endif + .eh_abort_handler = srp_abort, + .eh_device_reset_handler = srp_reset_device, + .eh_host_reset_handler = srp_reset_host, +@@ -2857,7 +2993,15 @@ static struct scsi_host_template srp_template = { .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = srp_host_attrs, ++#ifdef HAVE_SCSI_HOST_TEMPLATE_USE_HOST_WIDE_TAGS ++ .use_host_wide_tags = 1, ++#endif ++#ifdef HAVE_SCSI_HOST_TEMPLATE_USE_BLK_TAGS ++ .use_blk_tags = 1, ++#endif +#ifdef HAVE_SCSI_HOST_TEMPLATE_TRACK_QUEUE_DEPTH .track_queue_depth = 1, +#endif }; static int srp_sdev_count(struct Scsi_Host *host) -@@ -2906,8 +2995,13 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target) +@@ -2906,8 +3050,13 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target) list_add_tail(&target->list, &host->target_list); spin_unlock(&host->target_lock); @@ -259,21 +360,54 @@ index xxxxxxx..xxxxxxx xxxxxx if (srp_connected_ch(target) < target->ch_count || target->qp_in_error) { -@@ -3184,8 +3278,13 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target) +@@ -3184,12 +3333,21 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target) break; case SRP_OPT_SG_TABLESIZE: +#ifdef HAVE_SG_MAX_SEGMENTS if (match_int(args, &token) || token < 1 || token > SG_MAX_SEGMENTS) { + pr_warn("bad max sg_tablesize parameter '%s'\n", + p); + goto out; + } +#else + if (match_int(args, &token) || token < 1 || + token > SCSI_MAX_SG_CHAIN_SEGMENTS) { ++ pr_warn("bad max sg_tablesize parameter '%s'\n", ++ p); ++ goto out; ++ } +#endif - pr_warn("bad max sg_tablesize parameter '%s'\n", - p); - goto out; -@@ -3347,11 +3446,15 @@ static ssize_t srp_create_target(struct device *dev, + target->sg_tablesize = token; + break; + +@@ -3257,6 +3415,11 @@ static ssize_t srp_create_target(struct device *dev, + if (!target_host) + return -ENOMEM; + ++#if defined(HAVE_SCSI_HOST_USE_BLK_MQ) && \ ++ defined(HAVE_SCSI_TCQ_SCSI_INIT_SHARED_TAG_MAP) && \ ++ !defined(HAVE_SCSI_HOST_TEMPLATE_USE_BLK_TAGS) ++ target_host->use_blk_mq = true; ++#endif + target_host->transportt = ib_srp_transport_template; + target_host->max_channel = 0; + target_host->max_id = 1; +@@ -3288,6 +3451,12 @@ static ssize_t srp_create_target(struct device *dev, + if (ret) + goto out; + ++#ifdef HAVE_SCSI_HOST_TEMPLATE_USE_BLK_TAGS ++ ret = scsi_init_shared_tag_map(target_host, target_host->can_queue); ++ if (ret) ++ goto out; ++#endif ++ + target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE; + + if (!srp_conn_unique(target->srp_host, target)) { +@@ -3347,11 +3516,15 @@ static ssize_t srp_create_target(struct device *dev, goto out; ret = -ENOMEM; @@ -289,7 +423,7 @@ index xxxxxxx..xxxxxxx xxxxxx target->ch = kcalloc(target->ch_count, sizeof(*target->ch), GFP_KERNEL); if (!target->ch) -@@ -3417,7 +3520,9 @@ static ssize_t srp_create_target(struct device *dev, +@@ -3417,7 +3590,9 @@ static ssize_t srp_create_target(struct device *dev, } connected: diff --git a/patches/0009-BACKPORT-ib_iser.patch b/patches/0009-BACKPORT-ib_iser.patch index 30352b2..c46b073 100644 --- a/patches/0009-BACKPORT-ib_iser.patch +++ b/patches/0009-BACKPORT-ib_iser.patch @@ -1,16 +1,81 @@ From: Vladimir Neyelov Subject: [PATCH] BACKPORT: ib_iser +Change-Id: Ib85ca82399e22f4c6d053b764ce79ff4aefb25d9 Signed-off-by: Vladimir Neyelov --- - drivers/infiniband/ulp/iser/iscsi_iser.c | 13 ++++++++++++- - 1 file changed, 12 insertions(+), 1 deletion(-) + drivers/infiniband/ulp/iser/iscsi_iser.c | 37 ++- + drivers/infiniband/ulp/iser/iscsi_iser.h | 9 + + drivers/infiniband/ulp/iser/iser_initiator.c | 73 +++++- + drivers/infiniband/ulp/iser/iser_memory.c | 364 ++++++++++++++++++++++++++- + drivers/infiniband/ulp/iser/iser_verbs.c | 2 + + 5 files changed, 478 insertions(+), 7 deletions(-) diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index xxxxxxx..xxxxxxx xxxxxx --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c -@@ -985,8 +985,13 @@ static int iscsi_iser_slave_alloc(struct scsi_device *sdev) +@@ -108,7 +108,11 @@ MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)"); + + int iser_pi_guard; + module_param_named(pi_guard, iser_pi_guard, int, S_IRUGO); ++#ifdef HAVE_SCSI_CMND_PROT_FLAGS + MODULE_PARM_DESC(pi_guard, "T10-PI guard_type [deprecated]"); ++#else ++MODULE_PARM_DESC(pi_guard, "T10-PI guard_type, 0:CRC|1:IP_CSUM (default:IP_CSUM)"); ++#endif + + /* + * iscsi_iser_recv() - Process a successful recv completion +@@ -390,6 +394,7 @@ static void iscsi_iser_cleanup_task(struct iscsi_task *task) + } + } + ++#ifdef HAVE_ISCSI_TRANSPORT_CHECK_PROTECTION + /** + * iscsi_iser_check_protection() - check protection information status of task. + * @task: iscsi task +@@ -414,6 +419,7 @@ iscsi_iser_check_protection(struct iscsi_task *task, sector_t *sector) + return iser_check_task_pi_status(iser_task, ISER_DIR_OUT, + sector); + } ++#endif + + /** + * iscsi_iser_conn_create() - create a new iscsi-iser connection +@@ -769,7 +775,13 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s + stats->r2t_pdus = conn->r2t_pdus_cnt; /* always 0 */ + stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; + stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; ++#if defined(HAVE_QUEUE_FLAG_SG_GAPS) || defined(HAVE_BLK_QUEUE_VIRT_BOUNDARY) + stats->custom_length = 0; ++#else ++ stats->custom_length = 1; ++ strcpy(stats->custom[0].desc, "fmr_unalign_cnt"); ++ stats->custom[0].value = conn->fmr_unalign_cnt; ++#endif + } + + static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep, +@@ -964,7 +976,9 @@ static umode_t iser_attr_is_visible(int param_type, int param) + case ISCSI_PARAM_TGT_RESET_TMO: + case ISCSI_PARAM_IFACE_NAME: + case ISCSI_PARAM_INITIATOR_NAME: ++#ifdef HAVE_ISCSI_PARAM_DISCOVERY_SESS + case ISCSI_PARAM_DISCOVERY_SESS: ++#endif + return S_IRUGO; + default: + return 0; +@@ -974,6 +988,7 @@ static umode_t iser_attr_is_visible(int param_type, int param) + return 0; + } + ++#if defined(HAVE_QUEUE_FLAG_SG_GAPS) || defined(HAVE_BLK_QUEUE_VIRT_BOUNDARY) + static int iscsi_iser_slave_alloc(struct scsi_device *sdev) + { + struct iscsi_session *session; +@@ -985,16 +1000,26 @@ static int iscsi_iser_slave_alloc(struct scsi_device *sdev) ib_dev = iser_conn->ib_conn.device->ib_device; if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)) @@ -24,12 +89,13 @@ index xxxxxxx..xxxxxxx xxxxxx + } return 0; } ++#endif -@@ -994,7 +999,11 @@ static struct scsi_host_template iscsi_iser_sht = { + static struct scsi_host_template iscsi_iser_sht = { .module = THIS_MODULE, .name = "iSCSI Initiator over iSER", .queuecommand = iscsi_queuecommand, -+#ifdef HAVE_SCSCI_CHANGE_QUEUE_DEPTH ++#ifdef HAVE_SCSI_CHANGE_QUEUE_DEPTH .change_queue_depth = scsi_change_queue_depth, +#else + .change_queue_depth = iscsi_change_queue_depth, @@ -37,8 +103,13 @@ index xxxxxxx..xxxxxxx xxxxxx .sg_tablesize = ISCSI_ISER_DEF_SG_TABLESIZE, .cmd_per_lun = ISER_DEF_CMD_PER_LUN, .eh_abort_handler = iscsi_eh_abort, -@@ -1005,7 +1014,9 @@ static struct scsi_host_template iscsi_iser_sht = { +@@ -1002,16 +1027,24 @@ static struct scsi_host_template iscsi_iser_sht = { + .eh_target_reset_handler = iscsi_eh_recover_target, + .target_alloc = iscsi_target_alloc, + .use_clustering = ENABLE_CLUSTERING, ++#if defined(HAVE_QUEUE_FLAG_SG_GAPS) || defined(HAVE_BLK_QUEUE_VIRT_BOUNDARY) .slave_alloc = iscsi_iser_slave_alloc, ++#endif .proc_name = "iscsi_iser", .this_id = -1, +#ifdef HAVE_SCSI_HOST_TEMPLATE_TRACK_QUEUE_DEPTH @@ -47,3 +118,644 @@ index xxxxxxx..xxxxxxx xxxxxx }; static struct iscsi_transport iscsi_iser_transport = { + .owner = THIS_MODULE, + .name = "iser", ++#ifdef HAVE_DISCOVERY_SESSION + .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_TEXT_NEGO, ++#else ++ .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T, ++#endif + /* session management */ + .create_session = iscsi_iser_session_create, + .destroy_session = iscsi_iser_session_destroy, +@@ -1036,7 +1069,9 @@ static struct iscsi_transport iscsi_iser_transport = { + .xmit_task = iscsi_iser_task_xmit, + .cleanup_task = iscsi_iser_cleanup_task, + .alloc_pdu = iscsi_iser_pdu_alloc, ++#ifdef HAVE_ISCSI_TRANSPORT_CHECK_PROTECTION + .check_protection = iscsi_iser_check_protection, ++#endif + /* recovery */ + .session_recovery_timedout = iscsi_session_recovery_timedout, + +diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h +index xxxxxxx..xxxxxxx xxxxxx +--- a/drivers/infiniband/ulp/iser/iscsi_iser.h ++++ b/drivers/infiniband/ulp/iser/iscsi_iser.h +@@ -68,6 +68,11 @@ + #include + #include + ++#if defined(CONFIG_COMPAT_RHEL_7_3) || defined(CONFIG_COMPAT_RHEL_7_2) ++ #undef HAVE_QUEUE_FLAG_SG_GAPS ++ #undef HAVE_BLK_QUEUE_VIRT_BOUNDARY ++#endif ++ + #define DRV_NAME "iser" + #define PFX DRV_NAME ": " + #define DRV_VER "1.6" +@@ -198,6 +203,10 @@ struct iser_data_buf { + int size; + unsigned long data_len; + unsigned int dma_nents; ++#if !defined(HAVE_QUEUE_FLAG_SG_GAPS) && !defined(HAVE_BLK_QUEUE_VIRT_BOUNDARY) ++ struct scatterlist *orig_sg; ++ unsigned int orig_size; ++#endif + }; + + /* fwd declarations */ +diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c +index xxxxxxx..xxxxxxx xxxxxx +--- a/drivers/infiniband/ulp/iser/iser_initiator.c ++++ b/drivers/infiniband/ulp/iser/iser_initiator.c +@@ -322,7 +322,9 @@ static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req) + { + struct iser_conn *iser_conn = conn->dd_data; + struct ib_conn *ib_conn = &iser_conn->ib_conn; ++#ifdef HAVE_DISCOVERY_SESSION + struct iscsi_session *session = conn->session; ++#endif + + iser_dbg("req op %x flags %x\n", req->opcode, req->flags); + /* check if this is the last login - going to full feature phase */ +@@ -334,14 +336,15 @@ static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req) + * (for the last login response). + */ + WARN_ON(ib_conn->post_recv_buf_count != 1); +- ++ ++#ifdef HAVE_DISCOVERY_SESSION + if (session->discovery_sess) { + iser_info("Discovery session, re-using login RX buffer\n"); + return 0; + } else + iser_info("Normal session, posting batch of RX %d buffers\n", + iser_conn->min_posted_rx); +- ++#endif + /* Initial post receive buffers */ + if (iser_post_recvm(iser_conn, iser_conn->min_posted_rx)) + return -ENOMEM; +@@ -365,7 +368,11 @@ int iser_send_command(struct iscsi_conn *conn, + unsigned long edtl; + int err; + struct iser_data_buf *data_buf, *prot_buf; ++#ifdef HAVE_ISCSI_CMD ++ struct iscsi_cmd *hdr = (struct iscsi_cmd *)task->hdr; ++#else + struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr; ++#endif + struct scsi_cmnd *sc = task->sc; + struct iser_tx_desc *tx_desc = &iser_task->desc; + u8 sig_count = ++iser_conn->ib_conn.sig_count; +@@ -752,7 +759,11 @@ void iser_task_rdma_init(struct iscsi_iser_task *iser_task) + void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task) + { + int prot_count = scsi_prot_sg_count(iser_task->sc); +- ++#if !defined(HAVE_QUEUE_FLAG_SG_GAPS) && !defined(HAVE_BLK_QUEUE_VIRT_BOUNDARY) ++ int is_rdma_data_aligned = 1; ++ int is_rdma_prot_aligned = 1; ++#endif ++#if defined(HAVE_QUEUE_FLAG_SG_GAPS) || defined(HAVE_BLK_QUEUE_VIRT_BOUNDARY) + if (iser_task->dir[ISER_DIR_IN]) { + iser_unreg_rdma_mem(iser_task, ISER_DIR_IN); + iser_dma_unmap_task_data(iser_task, +@@ -774,4 +785,60 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task) + &iser_task->prot[ISER_DIR_OUT], + DMA_TO_DEVICE); + } ++#else ++ /* if we were reading, copy back to unaligned sglist, ++ * anyway dma_unmap and free the copy ++ */ ++ if (iser_task->data[ISER_DIR_IN].orig_sg) { ++ is_rdma_data_aligned = 0; ++ iser_finalize_rdma_unaligned_sg(iser_task, ++ &iser_task->data[ISER_DIR_IN], ++ ISER_DIR_IN); ++ } ++ ++ if (iser_task->data[ISER_DIR_OUT].orig_sg) { ++ is_rdma_data_aligned = 0; ++ iser_finalize_rdma_unaligned_sg(iser_task, ++ &iser_task->data[ISER_DIR_OUT], ++ ISER_DIR_OUT); ++ } ++ ++ if (iser_task->prot[ISER_DIR_IN].orig_sg) { ++ is_rdma_prot_aligned = 0; ++ iser_finalize_rdma_unaligned_sg(iser_task, ++ &iser_task->prot[ISER_DIR_IN], ++ ISER_DIR_IN); ++ } ++ ++ if (iser_task->prot[ISER_DIR_OUT].orig_sg) { ++ is_rdma_prot_aligned = 0; ++ iser_finalize_rdma_unaligned_sg(iser_task, ++ &iser_task->prot[ISER_DIR_OUT], ++ ISER_DIR_OUT); ++ } ++ ++ if (iser_task->dir[ISER_DIR_IN]) { ++ iser_unreg_rdma_mem(iser_task, ISER_DIR_IN); ++ if (is_rdma_data_aligned) ++ iser_dma_unmap_task_data(iser_task, ++ &iser_task->data[ISER_DIR_IN], ++ DMA_FROM_DEVICE); ++ if (prot_count && is_rdma_prot_aligned) ++ iser_dma_unmap_task_data(iser_task, ++ &iser_task->prot[ISER_DIR_IN], ++ DMA_FROM_DEVICE); ++ } ++ ++ if (iser_task->dir[ISER_DIR_OUT]) { ++ iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT); ++ if (is_rdma_data_aligned) ++ iser_dma_unmap_task_data(iser_task, ++ &iser_task->data[ISER_DIR_OUT], ++ DMA_TO_DEVICE); ++ if (prot_count && is_rdma_prot_aligned) ++ iser_dma_unmap_task_data(iser_task, ++ &iser_task->prot[ISER_DIR_OUT], ++ DMA_TO_DEVICE); ++ } ++#endif + } +diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c +index xxxxxxx..xxxxxxx xxxxxx +--- a/drivers/infiniband/ulp/iser/iser_memory.c ++++ b/drivers/infiniband/ulp/iser/iser_memory.c +@@ -67,6 +67,258 @@ static const struct iser_reg_ops fmr_ops = { + .reg_desc_put = iser_reg_desc_put_fmr, + }; + ++#if !defined(HAVE_QUEUE_FLAG_SG_GAPS) && !defined(HAVE_BLK_QUEUE_VIRT_BOUNDARY) ++#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0) ++static void ++iser_free_bounce_sg(struct iser_data_buf *data) ++{ ++ struct scatterlist *sg; ++ int count; ++ ++ for_each_sg(data->sg, sg, data->size, count) ++ __free_page(sg_page(sg)); ++ ++ kfree(data->sg); ++ ++ data->sg = data->orig_sg; ++ data->size = data->orig_size; ++ data->orig_sg = NULL; ++ data->orig_size = 0; ++} ++ ++static int ++iser_alloc_bounce_sg(struct iser_data_buf *data) ++{ ++ struct scatterlist *sg; ++ struct page *page; ++ unsigned long length = data->data_len; ++ int i = 0, nents = DIV_ROUND_UP(length, PAGE_SIZE); ++ ++ sg = kcalloc(nents, sizeof(*sg), GFP_ATOMIC); ++ if (!sg) ++ goto err; ++ ++ sg_init_table(sg, nents); ++ while (length) { ++ u32 page_len = min_t(u32, length, PAGE_SIZE); ++ ++ page = alloc_page(GFP_ATOMIC); ++ if (!page) ++ goto err; ++ ++ sg_set_page(&sg[i], page, page_len, 0); ++ length -= page_len; ++ i++; ++ } ++ ++ data->orig_sg = data->sg; ++ data->orig_size = data->size; ++ data->sg = sg; ++ data->size = nents; ++ ++ return 0; ++ ++err: ++ for (; i > 0; i--) ++ __free_page(sg_page(&sg[i - 1])); ++ kfree(sg); ++ ++ return -ENOMEM; ++} ++ ++static void ++iser_copy_bounce(struct iser_data_buf *data, bool to_buffer) ++{ ++ struct scatterlist *osg, *bsg = data->sg; ++ void *oaddr, *baddr; ++ unsigned int left = data->data_len; ++ unsigned int bsg_off = 0; ++ int i; ++ ++ for_each_sg(data->orig_sg, osg, data->orig_size, i) { ++ unsigned int copy_len, osg_off = 0; ++ ++#ifdef HAVE_KM_TYPE ++ if (to_buffer) ++ oaddr = kmap_atomic(sg_page(osg), KM_USER0) + osg->offset; ++ else ++ oaddr = kmap_atomic(sg_page(osg), KM_SOFTIRQ0) + osg->offset; ++#else ++ oaddr = kmap_atomic(sg_page(osg)) + osg->offset; ++#endif ++ copy_len = min(left, osg->length); ++ while (copy_len) { ++ unsigned int len = min(copy_len, bsg->length - bsg_off); ++ ++#ifdef HAVE_KM_TYPE ++ if (to_buffer) ++ baddr = kmap_atomic(sg_page(bsg), KM_USER0) + bsg->offset; ++ else ++ baddr = kmap_atomic(sg_page(bsg), KM_SOFTIRQ0) + bsg->offset; ++#else ++ baddr = kmap_atomic(sg_page(bsg)) + bsg->offset; ++#endif ++ if (to_buffer) ++ memcpy(baddr + bsg_off, oaddr + osg_off, len); ++ else ++ memcpy(oaddr + osg_off, baddr + bsg_off, len); ++ ++#ifdef HAVE_KM_TYPE ++ if (to_buffer) ++ kunmap_atomic(baddr - bsg->offset, KM_USER0); ++ else ++ kunmap_atomic(baddr - bsg->offset, KM_SOFTIRQ0); ++#else ++ kunmap_atomic(baddr - bsg->offset); ++#endif ++ osg_off += len; ++ bsg_off += len; ++ copy_len -= len; ++ ++ if (bsg_off >= bsg->length) { ++ bsg = sg_next(bsg); ++ bsg_off = 0; ++ } ++ } ++#ifdef HAVE_KM_TYPE ++ if (to_buffer) ++ kunmap_atomic(oaddr - osg->offset, KM_USER0); ++ else ++ kunmap_atomic(oaddr - osg->offset, KM_SOFTIRQ0); ++#else ++ kunmap_atomic(oaddr - osg->offset); ++#endif ++ left -= osg_off; ++ } ++} ++ ++static inline void ++iser_copy_from_bounce(struct iser_data_buf *data) ++{ ++ iser_copy_bounce(data, false); ++} ++ ++static inline void ++iser_copy_to_bounce(struct iser_data_buf *data) ++{ ++ iser_copy_bounce(data, true); ++} ++ ++/** ++ * iser_start_rdma_unaligned_sg ++*/ ++static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, ++ struct iser_data_buf *data, ++ enum iser_data_dir cmd_dir) ++{ ++ struct ib_device *dev = iser_task->iser_conn->ib_conn.device->ib_device; ++ int rc; ++ ++ rc = iser_alloc_bounce_sg(data); ++ if (rc) { ++ iser_err("Failed to allocate bounce for data len %lu\n", ++ data->data_len); ++ return rc; ++ } ++ ++ if (cmd_dir == ISER_DIR_OUT) ++ iser_copy_to_bounce(data); ++ ++ data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size, ++ (cmd_dir == ISER_DIR_OUT) ? ++ DMA_TO_DEVICE : DMA_FROM_DEVICE); ++ if (!data->dma_nents) { ++ iser_err("Got dma_nents %d, something went wrong...\n", ++ data->dma_nents); ++ rc = -ENOMEM; ++ goto err; ++ } ++ ++ return 0; ++err: ++ iser_free_bounce_sg(data); ++ return rc; ++} ++ ++/** ++ * iser_finalize_rdma_unaligned_sg ++ */ ++ ++void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, ++ struct iser_data_buf *data, ++ enum iser_data_dir cmd_dir) ++{ ++ struct ib_device *dev = iser_task->iser_conn->ib_conn.device->ib_device; ++ ++ ib_dma_unmap_sg(dev, data->sg, data->size, ++ (cmd_dir == ISER_DIR_OUT) ? ++ DMA_TO_DEVICE : DMA_FROM_DEVICE); ++ ++ if (cmd_dir == ISER_DIR_IN) ++ iser_copy_from_bounce(data); ++ ++ iser_free_bounce_sg(data); ++} ++ ++/** ++ * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned ++ * for RDMA sub-list of a scatter-gather list of memory buffers, and returns ++ * the number of entries which are aligned correctly. Supports the case where ++ * consecutive SG elements are actually fragments of the same physcial page. ++ */ ++static int iser_data_buf_aligned_len(struct iser_data_buf *data, ++ struct ib_device *ibdev, ++ unsigned sg_tablesize) ++{ ++ struct scatterlist *sg, *sgl, *next_sg = NULL; ++ u64 start_addr, end_addr; ++ int i, ret_len, start_check = 0; ++ ++ if (data->dma_nents == 1) ++ return 1; ++ ++ sgl = data->sg; ++ start_addr = ib_sg_dma_address(ibdev, sgl); ++ ++ if (unlikely(sgl[0].offset && ++ data->data_len >= sg_tablesize * PAGE_SIZE)) { ++ iser_dbg("can't register length %lx with offset %x " ++ "fall to bounce buffer\n", data->data_len, ++ sgl[0].offset); ++ return 0; ++ } ++ ++ for_each_sg(sgl, sg, data->dma_nents, i) { ++ if (start_check && !IS_4K_ALIGNED(start_addr)) ++ break; ++ ++ next_sg = sg_next(sg); ++ if (!next_sg) ++ break; ++ ++ end_addr = start_addr + ib_sg_dma_len(ibdev, sg); ++ start_addr = ib_sg_dma_address(ibdev, next_sg); ++ ++ if (end_addr == start_addr) { ++ start_check = 0; ++ continue; ++ } else ++ start_check = 1; ++ ++ if (!IS_4K_ALIGNED(end_addr)) ++ break; ++ } ++ ret_len = (next_sg) ? i : i+1; ++ ++ if (unlikely(ret_len != data->dma_nents)) ++ iser_warn("rdma alignment violation (%d/%d aligned)\n", ++ ret_len, data->dma_nents); ++ ++ return ret_len; ++} ++ ++#endif ++ + void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc) + { + iser_err_comp(wc, "memreg"); +@@ -160,6 +412,54 @@ static void iser_dump_page_vec(struct iser_page_vec *page_vec) + iser_err("vec[%d]: %llx\n", i, page_vec->pages[i]); + } + ++#if !defined(HAVE_QUEUE_FLAG_SG_GAPS) && !defined(HAVE_BLK_QUEUE_VIRT_BOUNDARY) ++static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task, ++ struct iser_data_buf *mem, ++ enum iser_data_dir cmd_dir) ++{ ++ struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn; ++ struct iser_device *device = iser_task->iser_conn->ib_conn.device; ++ ++ iscsi_conn->fmr_unalign_cnt++; ++ ++ if (iser_debug_level > 0) ++ iser_data_buf_dump(mem, device->ib_device); ++ ++ /* unmap the command data before accessing it */ ++ iser_dma_unmap_task_data(iser_task, mem, ++ (cmd_dir == ISER_DIR_OUT) ? ++ DMA_TO_DEVICE : DMA_FROM_DEVICE); ++ ++ /* allocate copy buf, if we are writing, copy the */ ++ /* unaligned scatterlist, dma map the copy */ ++ if (iser_start_rdma_unaligned_sg(iser_task, mem, cmd_dir) != 0) ++ return -ENOMEM; ++ ++ return 0; ++} ++ ++static int ++iser_handle_unaligned_buf(struct iscsi_iser_task *task, ++ struct iser_data_buf *mem, ++ enum iser_data_dir dir) ++{ ++ struct iser_conn *iser_conn = task->iser_conn; ++ struct iser_device *device = iser_conn->ib_conn.device; ++ int err, aligned_len; ++ ++ aligned_len = iser_data_buf_aligned_len(mem, device->ib_device, ++ iser_conn->scsi_sg_tablesize); ++ if (aligned_len != mem->dma_nents) { ++ err = fall_to_bounce_buf(task, mem, dir); ++ if (err) ++ return err; ++ } ++ ++ return 0; ++} ++ ++#endif ++ + int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, + struct iser_data_buf *data, + enum iser_data_dir iser_dir, +@@ -306,8 +606,13 @@ iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs, + struct ib_sig_domain *domain) + { + domain->sig_type = IB_SIG_TYPE_T10_DIF; ++#ifdef HAVE_SCSI_CMND_PROT_FLAGS + domain->sig.dif.pi_interval = scsi_prot_interval(sc); + domain->sig.dif.ref_tag = scsi_prot_ref_tag(sc); ++#else ++ domain->sig.dif.pi_interval = sc->device->sector_size; ++ domain->sig.dif.ref_tag = scsi_get_lba(sc) & 0xffffffff; ++#endif + /* + * At the moment we hard code those, but in the future + * we will take them from sc. +@@ -315,9 +620,15 @@ iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs, + domain->sig.dif.apptag_check_mask = 0xffff; + domain->sig.dif.app_escape = true; + domain->sig.dif.ref_escape = true; ++#ifdef HAVE_SCSI_CMND_PROT_FLAGS + if (sc->prot_flags & SCSI_PROT_REF_INCREMENT) + domain->sig.dif.ref_remap = true; +-}; ++#else ++ if (scsi_get_prot_type(sc) == SCSI_PROT_DIF_TYPE1 || ++ scsi_get_prot_type(sc) == SCSI_PROT_DIF_TYPE2) ++ domain->sig.dif.ref_remap = true; ++#endif ++} + + static int + iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs) +@@ -333,16 +644,26 @@ iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs) + case SCSI_PROT_WRITE_STRIP: + sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE; + iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem); ++#ifdef HAVE_SCSI_CMND_PROT_FLAGS + sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ? + IB_T10DIF_CSUM : IB_T10DIF_CRC; ++#else ++ sig_attrs->mem.sig.dif.bg_type = iser_pi_guard ? IB_T10DIF_CSUM : ++ IB_T10DIF_CRC; ++#endif + break; + case SCSI_PROT_READ_PASS: + case SCSI_PROT_WRITE_PASS: + iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire); + sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC; + iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem); ++#ifdef HAVE_SCSI_CMND_PROT_FLAGS + sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ? + IB_T10DIF_CSUM : IB_T10DIF_CRC; ++#else ++ sig_attrs->mem.sig.dif.bg_type = iser_pi_guard ? IB_T10DIF_CSUM : ++ IB_T10DIF_CRC; ++#endif + break; + default: + iser_err("Unsupported PI operation %d\n", +@@ -353,6 +674,7 @@ iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs) + return 0; + } + ++#ifdef HAVE_SCSI_CMND_PROT_FLAGS + static inline void + iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask) + { +@@ -362,6 +684,30 @@ iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask) + if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) + *mask |= ISER_CHECK_GUARD; + } ++#else ++static int ++iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask) ++{ ++ switch (scsi_get_prot_type(sc)) { ++ case SCSI_PROT_DIF_TYPE0: ++ *mask = 0x0; ++ break; ++ case SCSI_PROT_DIF_TYPE1: ++ case SCSI_PROT_DIF_TYPE2: ++ *mask = ISER_CHECK_GUARD | ISER_CHECK_REFTAG; ++ break; ++ case SCSI_PROT_DIF_TYPE3: ++ *mask = ISER_CHECK_GUARD; ++ break; ++ default: ++ iser_err("Unsupported protection type %d\n", ++ scsi_get_prot_type(sc)); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++#endif + + static inline void + iser_inv_rkey(struct ib_send_wr *inv_wr, +@@ -417,12 +763,14 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task, + IB_ACCESS_REMOTE_READ | + IB_ACCESS_REMOTE_WRITE; + pi_ctx->sig_mr_valid = 1; +- + sig_reg->sge.lkey = mr->lkey; + sig_reg->rkey = mr->rkey; + sig_reg->sge.addr = 0; ++#ifdef HAVE_SCSI_TRANSFER_LENGTH + sig_reg->sge.length = scsi_transfer_length(iser_task->sc); +- ++#else ++ sig_reg->sge.length = scsi_bufflen(iser_task->sc); ++#endif + iser_dbg("lkey=0x%x rkey=0x%x addr=0x%llx length=%u\n", + sig_reg->sge.lkey, sig_reg->rkey, sig_reg->sge.addr, + sig_reg->sge.length); +@@ -520,6 +868,11 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task, + bool use_dma_key; + int err; + ++#if !defined(HAVE_QUEUE_FLAG_SG_GAPS) && !defined(HAVE_BLK_QUEUE_VIRT_BOUNDARY) ++ err = iser_handle_unaligned_buf(task, mem, dir); ++ if (unlikely(err)) ++ return err; ++#endif + use_dma_key = mem->dma_nents == 1 && (all_imm || !iser_always_reg) && + scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL; + +@@ -542,6 +895,11 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task, + + if (scsi_prot_sg_count(task->sc)) { + mem = &task->prot[dir]; ++#if !defined(HAVE_QUEUE_FLAG_SG_GAPS) && !defined(HAVE_BLK_QUEUE_VIRT_BOUNDARY) ++ err = iser_handle_unaligned_buf(task, mem, dir); ++ if (unlikely(err)) ++ goto err_reg; ++#endif + err = iser_reg_prot_sg(task, mem, desc, + use_dma_key, prot_reg); + if (unlikely(err)) +diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c +index xxxxxxx..xxxxxxx xxxxxx +--- a/drivers/infiniband/ulp/iser/iser_verbs.c ++++ b/drivers/infiniband/ulp/iser/iser_verbs.c +@@ -1109,6 +1109,7 @@ int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc, + return ib_ret; + } + ++#ifdef HAVE_ISCSI_TRANSPORT_CHECK_PROTECTION + u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task, + enum iser_data_dir cmd_dir, sector_t *sector) + { +@@ -1156,6 +1157,7 @@ err: + /* Not alot we can do here, return ambiguous guard error */ + return 0x1; + } ++#endif + + void iser_err_comp(struct ib_wc *wc, const char *type) + { -- 2.41.0