]> git.openfabrics.org - compat-rdma/compat-rdma.git/commitdiff
Added SRP backport for RHEL7.6
authorVladimir Sokolovsky <vlad@mellanox.com>
Wed, 28 Aug 2019 19:31:00 +0000 (14:31 -0500)
committerVladimir Sokolovsky <vlad@mellanox.com>
Wed, 28 Aug 2019 19:31:00 +0000 (14:31 -0500)
Signed-off-by: Vladimir Sokolovsky <vlad@mellanox.com>
patches/0007-BACKPORT-srp.patch [new file with mode: 0644]

diff --git a/patches/0007-BACKPORT-srp.patch b/patches/0007-BACKPORT-srp.patch
new file mode 100644 (file)
index 0000000..aeaed0d
--- /dev/null
@@ -0,0 +1,706 @@
+From: Vladimir Sokolovsky <vlad@mellanox.com>
+Subject: [PATCH] BACKPORT: srp
+
+Signed-off-by: Vladimir Sokolovsky <vlad@mellanox.com>
+---
+ drivers/infiniband/ulp/srp/ib_srp.c | 246 ++++++++++++++++++++++++++++
+ drivers/infiniband/ulp/srp/ib_srp.h |  36 ++++
+ drivers/scsi/scsi_transport_srp.c   |   9 +
+ 3 files changed, 291 insertions(+)
+
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
+index xxxxxxx..xxxxxxx 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -30,6 +30,9 @@
+  * SOFTWARE.
+  */
++#ifdef pr_fmt
++#undef pr_fmt
++#endif
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ #include <linux/module.h>
+@@ -67,6 +70,13 @@ MODULE_LICENSE("Dual BSD/GPL");
+ #define DYNAMIC_DEBUG_BRANCH(descriptor) false
+ #endif
++#ifndef DEFINE_DYNAMIC_DEBUG_METADATA
++#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt)
++#endif
++#ifndef DYNAMIC_DEBUG_BRANCH
++#define DYNAMIC_DEBUG_BRANCH(descriptor) false
++#endif
++
+ static unsigned int srp_sg_tablesize;
+ static unsigned int cmd_sg_entries;
+ static unsigned int indirect_sg_entries;
+@@ -84,8 +94,13 @@ MODULE_PARM_DESC(cmd_sg_entries,
+                "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
+ module_param(indirect_sg_entries, uint, 0444);
++#ifdef HAVE_SG_MAX_SEGMENTS
+ MODULE_PARM_DESC(indirect_sg_entries,
+                "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")");
++#else
++MODULE_PARM_DESC(indirect_sg_entries,
++               "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
++#endif
+ module_param(allow_ext_sg, bool, 0444);
+ MODULE_PARM_DESC(allow_ext_sg,
+@@ -106,6 +121,7 @@ MODULE_PARM_DESC(register_always,
+ module_param(never_register, bool, 0444);
+ MODULE_PARM_DESC(never_register, "Never register memory");
++#ifdef HAVE_MODULEPARAM_KERNEL_PARAM_OPS
+ static const struct kernel_param_ops srp_tmo_ops;
+ static int srp_reconnect_delay = 10;
+@@ -124,6 +140,30 @@ MODULE_PARM_DESC(fast_io_fail_tmo,
+ static int srp_dev_loss_tmo = 600;
+ module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
+               S_IRUGO | S_IWUSR);
++#else
++static int srp_tmo_get(char *buffer, const struct kernel_param *kp);
++static int srp_tmo_set(const char *val, const struct kernel_param *kp);
++
++static int srp_reconnect_delay = 20;
++module_param_call(reconnect_delay, (param_set_fn)srp_tmo_set,
++                (param_get_fn)srp_tmo_get, &srp_reconnect_delay,
++                S_IRUGO | S_IWUSR);
++MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
++
++static int srp_fast_io_fail_tmo = 15;
++module_param_call(fast_io_fail_tmo, (param_set_fn)srp_tmo_set,
++                (param_get_fn)srp_tmo_get, &srp_fast_io_fail_tmo,
++                S_IRUGO | S_IWUSR);
++MODULE_PARM_DESC(fast_io_fail_tmo,
++               "Number of seconds between the observation of a transport"
++               " layer error and failing all I/O. \"off\" means that this"
++               " functionality is disabled.");
++
++static int srp_dev_loss_tmo = 600;
++module_param_call(dev_loss_tmo, (param_set_fn)srp_tmo_set,
++                (param_get_fn)srp_tmo_get, &srp_dev_loss_tmo,
++                S_IRUGO | S_IWUSR);
++#endif
+ MODULE_PARM_DESC(dev_loss_tmo,
+                "Maximum number of seconds that the SRP transport should"
+                " insulate transport layer errors. After this time has been"
+@@ -203,10 +243,12 @@ out:
+       return res;
+ }
++#ifdef HAVE_MODULEPARAM_KERNEL_PARAM_OPS
+ static const struct kernel_param_ops srp_tmo_ops = {
+       .get = srp_tmo_get,
+       .set = srp_tmo_set,
+ };
++#endif
+ static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
+ {
+@@ -1042,6 +1084,9 @@ static int srp_alloc_req_data(struct srp_rdma_ch *ch)
+       dma_addr_t dma_addr;
+       int i, ret = -ENOMEM;
++#ifndef HAVE_BLK_TAGS
++      INIT_LIST_HEAD(&ch->free_reqs);
++#endif
+       ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
+                              GFP_KERNEL);
+       if (!ch->req_ring)
+@@ -1074,6 +1119,10 @@ static int srp_alloc_req_data(struct srp_rdma_ch *ch)
+                       goto out;
+               req->indirect_dma_addr = dma_addr;
++#ifndef HAVE_BLK_TAGS
++              req->tag = build_srp_tag(ch - target->ch, i);
++              list_add_tail(&req->list, &ch->free_reqs);
++#endif
+       }
+       ret = 0;
+@@ -1120,6 +1169,10 @@ static void srp_remove_target(struct srp_target_port *target)
+               ch = &target->ch[i];
+               srp_free_req_data(target, ch);
+       }
++#ifndef HAVE_BLK_TAGS
++      kfree(target->mq_map);
++      target->mq_map = NULL;
++#endif
+       kfree(target->ch);
+       target->ch = NULL;
+@@ -1328,6 +1381,9 @@ static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
+       spin_lock_irqsave(&ch->lock, flags);
+       ch->req_lim += req_lim_delta;
++#ifndef HAVE_BLK_TAGS
++      list_add_tail(&req->list, &ch->free_reqs);
++#endif
+       spin_unlock_irqrestore(&ch->lock, flags);
+ }
+@@ -1763,6 +1819,7 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
+       return 0;
+ }
++#if defined(DYNAMIC_DATA_DEBUG)
+ static void srp_check_mapping(struct srp_map_state *state,
+                             struct srp_rdma_ch *ch, struct srp_request *req,
+                             struct scatterlist *scat, int count)
+@@ -1786,6 +1843,7 @@ static void srp_check_mapping(struct srp_map_state *state,
+                      scsi_bufflen(req->scmnd), desc_len, mr_len,
+                      state->ndesc, state->nmdesc);
+ }
++#endif
+ /**
+  * srp_map_data() - map SCSI data buffer onto an SRP request
+@@ -1902,12 +1960,14 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
+       if (ret < 0)
+               goto unmap;
++#if defined(DYNAMIC_DEBUG)
+       {
+               DEFINE_DYNAMIC_DEBUG_METADATA(ddm,
+                       "Memory mapping consistency check");
+               if (DYNAMIC_DEBUG_BRANCH(ddm))
+                       srp_check_mapping(&state, ch, req, scat, count);
+       }
++#endif
+       /* We've mapped the request, now pull as much of the indirect
+        * descriptor table as we can into the command buffer. If this
+@@ -2118,6 +2178,9 @@ static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
+       struct srp_request *req;
+       struct scsi_cmnd *scmnd;
+       unsigned long flags;
++#ifndef HAVE_BLK_TAGS
++      unsigned i;
++#endif
+       if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
+               spin_lock_irqsave(&ch->lock, flags);
+@@ -2134,6 +2197,7 @@ static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
+               }
+               spin_unlock_irqrestore(&ch->lock, flags);
+       } else {
++#ifdef HAVE_BLK_TAGS
+               scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
+               if (scmnd && scmnd->host_scribble) {
+                       req = (void *)scmnd->host_scribble;
+@@ -2141,6 +2205,18 @@ static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
+               } else {
+                       scmnd = NULL;
+               }
++#else
++              if (srp_tag_ch(rsp->tag) != ch - target->ch)
++                      pr_err("Channel idx mismatch: tag %#llx <> ch %#lx\n",
++                              rsp->tag, ch - target->ch);
++              i = srp_tag_idx(rsp->tag);
++              if (i < target->req_ring_size) {
++                      req = &ch->req_ring[i];
++                      scmnd = srp_claim_req(ch, req, NULL, NULL);
++              } else {
++                      scmnd = NULL;
++              }
++#endif
+               if (!scmnd) {
+                       shost_printk(KERN_ERR, target->scsi_host,
+                                    "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
+@@ -2237,8 +2313,13 @@ static void srp_process_aer_req(struct srp_rdma_ch *ch,
+       };
+       s32 delta = be32_to_cpu(req->req_lim_delta);
++#ifdef HAVE_SCSI_DEVICE_U64_LUN
+       shost_printk(KERN_ERR, target->scsi_host, PFX
+                    "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
++#else
++      shost_printk(KERN_ERR, target->scsi_host, PFX
++                   "ignoring AER for LUN %u\n", scsilun_to_int(&req->lun));
++#endif
+       if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
+               shost_printk(KERN_ERR, target->scsi_host, PFX
+@@ -2337,6 +2418,13 @@ static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
+       target->qp_in_error = true;
+ }
++#ifndef HAVE_BLK_TAGS
++static struct srp_rdma_ch *srp_map_cpu_to_ch(struct srp_target_port *target)
++{
++      return &target->ch[target->mq_map[raw_smp_processor_id()]];
++}
++#endif
++
+ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
+ {
+       struct srp_target_port *target = host_to_target(shost);
+@@ -2347,13 +2435,16 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
+       struct ib_device *dev;
+       unsigned long flags;
+       u32 tag;
++#ifdef HAVE_BLK_TAGS
+       u16 idx;
++#endif
+       int len, ret;
+       scmnd->result = srp_chkready(target->rport);
+       if (unlikely(scmnd->result))
+               goto err;
++#ifdef HAVE_BLK_TAGS
+       WARN_ON_ONCE(scmnd->request->tag < 0);
+       tag = blk_mq_unique_tag(scmnd->request);
+       ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
+@@ -2361,15 +2452,28 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
+       WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
+                 dev_name(&shost->shost_gendev), tag, idx,
+                 target->req_ring_size);
++#else
++      ch = srp_map_cpu_to_ch(target);
++#endif
+       spin_lock_irqsave(&ch->lock, flags);
+       iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
++#ifdef HAVE_BLK_TAGS
+       spin_unlock_irqrestore(&ch->lock, flags);
+       if (!iu)
+               goto err;
+       req = &ch->req_ring[idx];
++#else
++      if (!iu)
++              goto err_unlock;
++
++      req = list_first_entry(&ch->free_reqs, struct srp_request, list);
++      list_del(&req->list);
++      tag = req->tag;
++      spin_unlock_irqrestore(&ch->lock, flags);
++#endif
+       dev = target->srp_host->srp_dev->dev;
+       ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_it_iu_len,
+                                  DMA_TO_DEVICE);
+@@ -2431,6 +2535,14 @@ err_iu:
+        */
+       req->scmnd = NULL;
++#ifndef HAVE_BLK_TAGS
++      spin_lock_irqsave(&ch->lock, flags);
++      list_add(&req->list, &ch->free_reqs);
++
++err_unlock:
++      spin_unlock_irqrestore(&ch->lock, flags);
++
++#endif
+ err:
+       if (scmnd->result) {
+               scmnd->scsi_done(scmnd);
+@@ -2897,13 +3009,40 @@ static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
+  *
+  * Returns queue depth.
+  */
++#ifdef HAVE_SCSI_HOST_TEMPLATE_TRACK_QUEUE_DEPTH
+ static int
+ srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
+ {
+       if (!sdev->tagged_supported)
+               qdepth = 1;
++#ifdef HAVE_SCSI_CHANGE_QUEUE_DEPTH
+       return scsi_change_queue_depth(sdev, qdepth);
++#else
++      scsi_adjust_queue_depth(sdev, qdepth);
++      return sdev->queue_depth;
++#endif //HAVE_SCSI_CHANGE_QUEUE_DEPTH
+ }
++#else
++static int
++srp_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
++{
++      struct Scsi_Host *shost = sdev->host;
++      int max_depth;
++      if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP) {
++              max_depth = shost->can_queue;
++              if (!sdev->tagged_supported)
++                      max_depth = 1;
++              if (qdepth > max_depth)
++                      qdepth = max_depth;
++              scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
++      } else if (reason == SCSI_QDEPTH_QFULL)
++              scsi_track_queue_full(sdev, qdepth);
++      else
++              return -EOPNOTSUPP;
++
++      return sdev->queue_depth;
++}
++#endif //HAVE_SCSI_HOST_TEMPLATE_TRACK_QUEUE_DEPTH
+ static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
+                            u8 func, u8 *status)
+@@ -2984,8 +3123,13 @@ static int srp_abort(struct scsi_cmnd *scmnd)
+       if (!req)
+               return SUCCESS;
++#ifdef HAVE_BLK_TAGS
+       tag = blk_mq_unique_tag(scmnd->request);
+       ch_idx = blk_mq_unique_tag_to_hwq(tag);
++#else
++      tag = req->tag;
++      ch_idx = srp_tag_ch(tag);
++#endif
+       if (WARN_ON_ONCE(ch_idx >= target->ch_count))
+               return SUCCESS;
+       ch = &target->ch[ch_idx];
+@@ -3046,6 +3190,21 @@ static int srp_target_alloc(struct scsi_target *starget)
+       return 0;
+ }
++#ifndef HAVE_SCSI_HOST_VIRT_BOUNDARY_MASK
++static int srp_slave_alloc(struct scsi_device *sdev)
++{
++      struct Scsi_Host *shost = sdev->host;
++      struct srp_target_port *target = host_to_target(shost);
++      struct srp_device *srp_dev = target->srp_host->srp_dev;
++
++      if (true)
++              blk_queue_virt_boundary(sdev->request_queue,
++                                      ~srp_dev->mr_page_mask);
++
++      return 0;
++}
++#endif
++
+ static int srp_slave_configure(struct scsi_device *sdev)
+ {
+       struct Scsi_Host *shost = sdev->host;
+@@ -3248,6 +3407,9 @@ static struct scsi_host_template srp_template = {
+       .name                           = "InfiniBand SRP initiator",
+       .proc_name                      = DRV_NAME,
+       .target_alloc                   = srp_target_alloc,
++#ifndef HAVE_SCSI_HOST_VIRT_BOUNDARY_MASK
++      .slave_alloc                    = srp_slave_alloc,
++#endif
+       .slave_configure                = srp_slave_configure,
+       .info                           = srp_target_info,
+       .queuecommand                   = srp_queuecommand,
+@@ -3262,7 +3424,9 @@ static struct scsi_host_template srp_template = {
+       .this_id                        = -1,
+       .cmd_per_lun                    = SRP_DEFAULT_CMD_SQ_SIZE,
+       .shost_attrs                    = srp_host_attrs,
++#ifdef HAVE_SCSI_HOST_TEMPLATE_TRACK_QUEUE_DEPTH
+       .track_queue_depth              = 1,
++#endif
+ };
+ static int srp_sdev_count(struct Scsi_Host *host)
+@@ -3311,8 +3475,13 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
+       list_add_tail(&target->list, &host->target_list);
+       spin_unlock(&host->target_lock);
++#ifdef HAVE_SCSI_SCAN_INITIAL
+       scsi_scan_target(&target->scsi_host->shost_gendev,
+                        0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
++#else
++        scsi_scan_target(&target->scsi_host->shost_gendev,
++                         0, target->scsi_id, SCAN_WILD_CARD, 0);
++#endif
+       if (srp_connected_ch(target) < target->ch_count ||
+           target->qp_in_error) {
+@@ -3710,12 +3879,21 @@ static int srp_parse_options(struct net *net, const char *buf,
+                       break;
+               case SRP_OPT_SG_TABLESIZE:
++#ifdef HAVE_SG_MAX_SEGMENTS
+                       if (match_int(args, &token) || token < 1 ||
+                                       token > SG_MAX_SEGMENTS) {
+                               pr_warn("bad max sg_tablesize parameter '%s'\n",
+                                       p);
+                               goto out;
+                       }
++#else
++                      if (match_int(args, &token) || token < 1 ||
++                                      token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
++                              pr_warn("bad max sg_tablesize parameter '%s'\n",
++                                      p);
++                              goto out;
++                      }
++#endif
+                       target->sg_tablesize = token;
+                       break;
+@@ -3775,7 +3953,11 @@ static ssize_t srp_create_target(struct device *dev,
+       struct srp_device *srp_dev = host->srp_dev;
+       struct ib_device *ibdev = srp_dev->dev;
+       int ret, node_idx, node, cpu, i;
++#ifdef HAVE_BLK_QUEUE_VIRT_BOUNDARY
+       unsigned int max_sectors_per_mr, mr_per_cmd = 0;
++#else
++      unsigned int mr_per_cmd = 0;
++#endif
+       bool multich = false;
+       uint32_t max_iu_len;
+@@ -3789,10 +3971,14 @@ static ssize_t srp_create_target(struct device *dev,
+       target_host->max_id      = 1;
+       target_host->max_lun     = -1LL;
+       target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
++#ifdef HAVE_SCSI_HOST_TEMPLATE_MAX_SEGMENT_SIZE
+       target_host->max_segment_size = ib_dma_max_seg_size(ibdev);
++#endif
++#ifdef HAVE_SCSI_HOST_VIRT_BOUNDARY_MASK
+       if (!(ibdev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
+               target_host->virt_boundary_mask = ~srp_dev->mr_page_mask;
++#endif
+       target = host_to_target(target_host);
+@@ -3803,7 +3989,25 @@ static ssize_t srp_create_target(struct device *dev,
+       target->lkey            = host->srp_dev->pd->local_dma_lkey;
+       target->global_rkey     = host->srp_dev->global_rkey;
+       target->cmd_sg_cnt      = cmd_sg_entries;
++#ifndef HAVE_BLK_QUEUE_VIRT_BOUNDARY
++      if (never_register) {
++              target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
++      } else {
++              if (!(ibdev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG) &&
++                  (target->cmd_sg_cnt > 12)) {
++                      target->cmd_sg_cnt = 12;
++                      pr_warn("Clamping cmd_sg_entries and "
++                              "indirect_sg_entries to 12. Because %s is "
++                              "not supported MR with gaps. And values more "
++                              "than 12 can cause allocation errors of the "
++                              "MR pool.\n",
++                              dev_name(&ibdev->dev));
++              }
++              target->sg_tablesize = target->cmd_sg_cnt;
++      }
++#else
+       target->sg_tablesize    = indirect_sg_entries ? : cmd_sg_entries;
++#endif
+       target->allow_ext_sg    = allow_ext_sg;
+       target->tl_retry_count  = 7;
+       target->queue_size      = SRP_DEFAULT_QUEUE_SIZE;
+@@ -3852,9 +4056,12 @@ static ssize_t srp_create_target(struct device *dev,
+               bool gaps_reg = (ibdev->attrs.device_cap_flags &
+                                IB_DEVICE_SG_GAPS_REG);
++#ifdef HAVE_BLK_QUEUE_VIRT_BOUNDARY
+               max_sectors_per_mr = srp_dev->max_pages_per_mr <<
+                                 (ilog2(srp_dev->mr_page_size) - 9);
++#endif
+               if (!gaps_reg) {
++#ifdef HAVE_BLK_QUEUE_VIRT_BOUNDARY
+                       /*
+                        * FR and FMR can only map one HCA page per entry. If
+                        * the start address is not aligned on a HCA page
+@@ -3871,15 +4078,26 @@ static ssize_t srp_create_target(struct device *dev,
+                       mr_per_cmd = register_always +
+                               (target->scsi_host->max_sectors + 1 +
+                                max_sectors_per_mr - 1) / max_sectors_per_mr;
++                      mr_per_cmd = max(2U, mr_per_cmd);
++#else
++                      mr_per_cmd = target->cmd_sg_cnt + register_always;
++#endif
+               } else {
+                       mr_per_cmd = register_always +
+                               (target->sg_tablesize +
+                                srp_dev->max_pages_per_mr - 1) /
+                               srp_dev->max_pages_per_mr;
+               }
++#ifdef HAVE_BLK_QUEUE_VIRT_BOUNDARY
+               pr_debug("max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; max_sectors_per_mr = %u; mr_per_cmd = %u\n",
+                        target->scsi_host->max_sectors, srp_dev->max_pages_per_mr, srp_dev->mr_page_size,
+                        max_sectors_per_mr, mr_per_cmd);
++#else
++              pr_debug("IB_DEVICE_SG_GAPS_REG = %d; max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; mr_per_cmd = %u\n",
++                       gaps_reg, target->scsi_host->max_sectors,
++                       srp_dev->max_pages_per_mr, srp_dev->mr_page_size,
++                       mr_per_cmd);
++#endif
+       }
+       target_host->sg_tablesize = target->sg_tablesize;
+@@ -3907,6 +4125,12 @@ static ssize_t srp_create_target(struct device *dev,
+       if (!target->ch)
+               goto out;
++#ifndef HAVE_BLK_TAGS
++      target->mq_map = kcalloc(nr_cpu_ids, sizeof(*target->mq_map),
++                               GFP_KERNEL);
++      if (!target->mq_map)
++              goto err_free_ch;
++#endif
+       node_idx = 0;
+       for_each_online_node(node) {
+               const int ch_start = (node_idx * target->ch_count /
+@@ -3922,6 +4146,10 @@ static ssize_t srp_create_target(struct device *dev,
+               for_each_online_cpu(cpu) {
+                       if (cpu_to_node(cpu) != node)
+                               continue;
++#ifndef HAVE_BLK_TAGS
++                      target->mq_map[cpu] = ch_start == ch_end ? ch_start :
++                              ch_start + cpu_idx % (ch_end - ch_start);
++#endif
+                       if (ch_start + cpu_idx >= ch_end)
+                               continue;
+                       ch = &target->ch[ch_start + cpu_idx];
+@@ -3961,6 +4189,9 @@ static ssize_t srp_create_target(struct device *dev,
+                               } else {
+                                       srp_free_ch_ib(target, ch);
+                                       srp_free_req_data(target, ch);
++#ifndef HAVE_BLK_TAGS
++                                      target->mq_map[cpu] = 0;
++#endif
+                                       target->ch_count = ch - target->ch;
+                                       goto connected;
+                               }
+@@ -3973,7 +4204,9 @@ static ssize_t srp_create_target(struct device *dev,
+       }
+ connected:
++#ifdef HAVE_SCSI_HOST_NR_HW_QUEUES
+       target->scsi_host->nr_hw_queues = target->ch_count;
++#endif
+       ret = srp_add_target(host, target);
+       if (ret)
+@@ -4028,6 +4261,11 @@ free_ch:
+               srp_free_req_data(target, ch);
+       }
++#ifndef HAVE_BLK_TAGS
++      kfree(target->mq_map);
++
++err_free_ch:
++#endif
+       kfree(target->ch);
+       goto out;
+ }
+@@ -4276,11 +4514,19 @@ static int __init srp_init_module(void)
+               indirect_sg_entries = cmd_sg_entries;
+       }
++#ifdef HAVE_SG_MAX_SEGMENTS
+       if (indirect_sg_entries > SG_MAX_SEGMENTS) {
+               pr_warn("Clamping indirect_sg_entries to %u\n",
+                       SG_MAX_SEGMENTS);
+               indirect_sg_entries = SG_MAX_SEGMENTS;
+       }
++#else
++      if (indirect_sg_entries > SCSI_MAX_SG_CHAIN_SEGMENTS) {
++              pr_warn("Clamping indirect_sg_entries to %u\n",
++                      SCSI_MAX_SG_CHAIN_SEGMENTS);
++              indirect_sg_entries = SCSI_MAX_SG_CHAIN_SEGMENTS;
++      }
++#endif
+       srp_remove_wq = create_workqueue("srp_remove");
+       if (!srp_remove_wq) {
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
+index xxxxxxx..xxxxxxx 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.h
++++ b/drivers/infiniband/ulp/srp/ib_srp.h
+@@ -92,6 +92,30 @@ enum srp_iu_type {
+       SRP_IU_RSP,
+ };
++#ifdef HAVE_BLK_MQ_UNIQUE_TAG
++#if !(defined(RHEL_MAJOR) && RHEL_MAJOR -0 == 7 && \
++      !defined(HAVE_SCSI_HOST_TEMPLATE_USE_HOST_WIDE_TAGS))
++#define HAVE_BLK_TAGS 1
++#endif
++#endif /* HAVE_BLK_MQ_UNIQUE_TAG */
++
++#ifndef HAVE_BLK_TAGS
++static inline u32 build_srp_tag(u16 ch, u16 req_idx)
++{
++      return ch << 16 | req_idx;
++}
++
++static inline u16 srp_tag_ch(u32 tag)
++{
++      return tag >> 16;
++}
++
++static inline u16 srp_tag_idx(u32 tag)
++{
++      return tag & ((1 << 16) - 1);
++}
++#endif
++
+ /*
+  * @mr_page_mask: HCA memory registration page mask.
+  * @mr_page_size: HCA memory registration page size.
+@@ -125,6 +149,9 @@ struct srp_host {
+ };
+ struct srp_request {
++#ifndef HAVE_BLK_TAGS
++      struct list_head        list;
++#endif
+       struct scsi_cmnd       *scmnd;
+       struct srp_iu          *cmd;
+       union {
+@@ -135,6 +162,9 @@ struct srp_request {
+       struct srp_direct_buf  *indirect_desc;
+       dma_addr_t              indirect_dma_addr;
+       short                   nmdesc;
++#ifndef HAVE_BLK_TAGS
++      uint32_t                tag;
++#endif
+       struct ib_cqe           reg_cqe;
+ };
+@@ -147,6 +177,9 @@ struct srp_request {
+ struct srp_rdma_ch {
+       /* These are RW in the hot path, and commonly used together */
+       struct list_head        free_tx;
++#ifndef HAVE_BLK_TAGS
++      struct list_head        free_reqs;
++#endif
+       spinlock_t              lock;
+       s32                     req_lim;
+@@ -206,6 +239,9 @@ struct srp_target_port {
+       u32                     global_rkey;
+       struct srp_rdma_ch      *ch;
+       struct net              *net;
++#ifndef HAVE_BLK_TAGS
++      int                     *mq_map;
++#endif
+       u32                     ch_count;
+       u32                     lkey;
+       enum srp_target_state   state;
+diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
+index xxxxxxx..xxxxxxx 100644
+--- a/drivers/scsi/scsi_transport_srp.c
++++ b/drivers/scsi/scsi_transport_srp.c
+@@ -557,12 +557,21 @@ int srp_reconnect_rport(struct srp_rport *rport)
+                * invoking scsi_target_unblock() won't change the state of
+                * these devices into running so do that explicitly.
+                */
++#ifdef HAVE_SCSI_DEVICE_STATE_MUTEX
+               shost_for_each_device(sdev, shost) {
+                       mutex_lock(&sdev->state_mutex);
++#else
++              spin_lock_irq(shost->host_lock);
++              __shost_for_each_device(sdev, shost)
++#endif
+                       if (sdev->sdev_state == SDEV_OFFLINE)
+                               sdev->sdev_state = SDEV_RUNNING;
++#ifdef HAVE_SCSI_DEVICE_STATE_MUTEX
+                       mutex_unlock(&sdev->state_mutex);
+               }
++#else
++              spin_unlock_irq(shost->host_lock);
++#endif
+       } else if (rport->state == SRP_RPORT_RUNNING) {
+               /*
+                * srp_reconnect_rport() has been invoked with fast_io_fail