From e6c36eac70f4871aaf535365aa5e6c5142d9a1c6 Mon Sep 17 00:00:00 2001 From: Vladimir Sokolovsky Date: Mon, 3 Dec 2018 13:23:16 -0600 Subject: [PATCH] Fixed SRP backport Bug: https://bugs.openfabrics.org/show_bug.cgi?id=2690 Signed-off-by: Vladimir Sokolovsky --- patches/0018-BACKPORT-srp.patch | 560 +++++++++++++++++++++++++++++++- 1 file changed, 554 insertions(+), 6 deletions(-) diff --git a/patches/0018-BACKPORT-srp.patch b/patches/0018-BACKPORT-srp.patch index e004172..1e1122d 100644 --- a/patches/0018-BACKPORT-srp.patch +++ b/patches/0018-BACKPORT-srp.patch @@ -3,9 +3,10 @@ Subject: [PATCH] BACKPORT: srp Signed-off-by: Vladimir Sokolovsky --- - drivers/infiniband/ulp/srp/ib_srp.c | 44 +++++++++++++++++++++++++++++++++++++ - drivers/scsi/scsi_transport_srp.c | 9 ++++++++ - 2 files changed, 53 insertions(+) + drivers/infiniband/ulp/srp/ib_srp.c | 228 ++++++++++++++++++++++++++++++++++++ + drivers/infiniband/ulp/srp/ib_srp.h | 36 ++++++ + drivers/scsi/scsi_transport_srp.c | 9 ++ + 3 files changed, 273 insertions(+) diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index xxxxxxx..xxxxxxx xxxxxx @@ -35,7 +36,173 @@ index xxxxxxx..xxxxxxx xxxxxx static unsigned int srp_sg_tablesize; static unsigned int cmd_sg_entries; static unsigned int indirect_sg_entries; -@@ -2179,8 +2189,13 @@ static void srp_process_aer_req(struct srp_rdma_ch *ch, +@@ -87,8 +97,13 @@ MODULE_PARM_DESC(cmd_sg_entries, + "Default number of gather/scatter entries in the SRP command (default is 12, max 255)"); + + module_param(indirect_sg_entries, uint, 0444); ++#ifdef HAVE_SG_MAX_SEGMENTS + MODULE_PARM_DESC(indirect_sg_entries, + "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")"); ++#else ++MODULE_PARM_DESC(indirect_sg_entries, ++ "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")"); ++#endif + + module_param(allow_ext_sg, bool, 0444); + MODULE_PARM_DESC(allow_ext_sg, +@@ -109,6 +124,7 @@ MODULE_PARM_DESC(register_always, + module_param(never_register, bool, 0444); + MODULE_PARM_DESC(never_register, "Never register memory"); + ++#ifdef HAVE_MODULEPARAM_KERNEL_PARAM_OPS + static const struct kernel_param_ops srp_tmo_ops; + + static int srp_reconnect_delay = 10; +@@ -127,6 +143,30 @@ MODULE_PARM_DESC(fast_io_fail_tmo, + static int srp_dev_loss_tmo = 600; + module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo, + S_IRUGO | S_IWUSR); ++#else ++static int srp_tmo_get(char *buffer, const struct kernel_param *kp); ++static int srp_tmo_set(const char *val, const struct kernel_param *kp); ++ ++static int srp_reconnect_delay = 20; ++module_param_call(reconnect_delay, (param_set_fn)srp_tmo_set, ++ (param_get_fn)srp_tmo_get, &srp_reconnect_delay, ++ S_IRUGO | S_IWUSR); ++MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts"); ++ ++static int srp_fast_io_fail_tmo = 15; ++module_param_call(fast_io_fail_tmo, (param_set_fn)srp_tmo_set, ++ (param_get_fn)srp_tmo_get, &srp_fast_io_fail_tmo, ++ S_IRUGO | S_IWUSR); ++MODULE_PARM_DESC(fast_io_fail_tmo, ++ "Number of seconds between the observation of a transport" ++ " layer error and failing all I/O. \"off\" means that this" ++ " functionality is disabled."); ++ ++static int srp_dev_loss_tmo = 600; ++module_param_call(dev_loss_tmo, (param_set_fn)srp_tmo_set, ++ (param_get_fn)srp_tmo_get, &srp_dev_loss_tmo, ++ S_IRUGO | S_IWUSR); ++#endif + MODULE_PARM_DESC(dev_loss_tmo, + "Maximum number of seconds that the SRP transport should" + " insulate transport layer errors. After this time has been" +@@ -194,10 +234,12 @@ out: + return res; + } + ++#ifdef HAVE_MODULEPARAM_KERNEL_PARAM_OPS + static const struct kernel_param_ops srp_tmo_ops = { + .get = srp_tmo_get, + .set = srp_tmo_set, + }; ++#endif + + static inline struct srp_target_port *host_to_target(struct Scsi_Host *host) + { +@@ -1028,6 +1070,9 @@ static int srp_alloc_req_data(struct srp_rdma_ch *ch) + dma_addr_t dma_addr; + int i, ret = -ENOMEM; + ++#ifndef HAVE_BLK_TAGS ++ INIT_LIST_HEAD(&ch->free_reqs); ++#endif + ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring), + GFP_KERNEL); + if (!ch->req_ring) +@@ -1059,6 +1104,10 @@ static int srp_alloc_req_data(struct srp_rdma_ch *ch) + goto out; + + req->indirect_dma_addr = dma_addr; ++#ifndef HAVE_BLK_TAGS ++ req->tag = build_srp_tag(ch - target->ch, i); ++ list_add_tail(&req->list, &ch->free_reqs); ++#endif + } + ret = 0; + +@@ -1105,6 +1154,10 @@ static void srp_remove_target(struct srp_target_port *target) + ch = &target->ch[i]; + srp_free_req_data(target, ch); + } ++#ifndef HAVE_BLK_TAGS ++ kfree(target->mq_map); ++ target->mq_map = NULL; ++#endif + kfree(target->ch); + target->ch = NULL; + +@@ -1313,6 +1366,9 @@ static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req, + + spin_lock_irqsave(&ch->lock, flags); + ch->req_lim += req_lim_delta; ++#ifndef HAVE_BLK_TAGS ++ list_add_tail(&req->list, &ch->free_reqs); ++#endif + spin_unlock_irqrestore(&ch->lock, flags); + } + +@@ -1332,16 +1388,20 @@ static void srp_terminate_io(struct srp_rport *rport) + { + struct srp_target_port *target = rport->lld_data; + struct srp_rdma_ch *ch; ++#ifdef HAVE_REQUEST_QUEUE_REQUEST_FN_ACTIVE + struct Scsi_Host *shost = target->scsi_host; + struct scsi_device *sdev; ++#endif + int i, j; + + /* + * Invoking srp_terminate_io() while srp_queuecommand() is running + * is not safe. Hence the warning statement below. + */ ++#ifdef HAVE_REQUEST_QUEUE_REQUEST_FN_ACTIVE + shost_for_each_device(sdev, shost) + WARN_ON_ONCE(sdev->request_queue->request_fn_active); ++#endif + + for (i = 0; i < target->ch_count; i++) { + ch = &target->ch[i]; +@@ -2061,6 +2121,9 @@ static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp) + struct srp_request *req; + struct scsi_cmnd *scmnd; + unsigned long flags; ++#ifndef HAVE_BLK_TAGS ++ unsigned i; ++#endif + + if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { + spin_lock_irqsave(&ch->lock, flags); +@@ -2077,6 +2140,7 @@ static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp) + } + spin_unlock_irqrestore(&ch->lock, flags); + } else { ++#ifdef HAVE_BLK_TAGS + scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag); + if (scmnd && scmnd->host_scribble) { + req = (void *)scmnd->host_scribble; +@@ -2084,6 +2148,18 @@ static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp) + } else { + scmnd = NULL; + } ++#else ++ if (srp_tag_ch(rsp->tag) != ch - target->ch) ++ pr_err("Channel idx mismatch: tag %#llx <> ch %#lx\n", ++ rsp->tag, ch - target->ch); ++ i = srp_tag_idx(rsp->tag); ++ if (i < target->req_ring_size) { ++ req = &ch->req_ring[i]; ++ scmnd = srp_claim_req(ch, req, NULL, NULL); ++ } else { ++ scmnd = NULL; ++ } ++#endif + if (!scmnd) { + shost_printk(KERN_ERR, target->scsi_host, + "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n", +@@ -2179,8 +2255,13 @@ static void srp_process_aer_req(struct srp_rdma_ch *ch, }; s32 delta = be32_to_cpu(req->req_lim_delta); @@ -49,7 +216,83 @@ index xxxxxxx..xxxxxxx xxxxxx if (srp_response_common(ch, delta, &rsp, sizeof(rsp))) shost_printk(KERN_ERR, target->scsi_host, PFX -@@ -2839,13 +2854,40 @@ static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id, +@@ -2279,6 +2360,13 @@ static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc, + target->qp_in_error = true; + } + ++#ifndef HAVE_BLK_TAGS ++static struct srp_rdma_ch *srp_map_cpu_to_ch(struct srp_target_port *target) ++{ ++ return &target->ch[target->mq_map[raw_smp_processor_id()]]; ++} ++#endif ++ + static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) + { + struct srp_target_port *target = host_to_target(shost); +@@ -2290,7 +2378,9 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) + struct ib_device *dev; + unsigned long flags; + u32 tag; ++#ifdef HAVE_BLK_TAGS + u16 idx; ++#endif + int len, ret; + const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler; + +@@ -2307,6 +2397,7 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) + if (unlikely(scmnd->result)) + goto err; + ++#ifdef HAVE_BLK_TAGS + WARN_ON_ONCE(scmnd->request->tag < 0); + tag = blk_mq_unique_tag(scmnd->request); + ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)]; +@@ -2314,15 +2405,28 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) + WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n", + dev_name(&shost->shost_gendev), tag, idx, + target->req_ring_size); ++#else ++ ch = srp_map_cpu_to_ch(target); ++#endif + + spin_lock_irqsave(&ch->lock, flags); + iu = __srp_get_tx_iu(ch, SRP_IU_CMD); ++#ifdef HAVE_BLK_TAGS + spin_unlock_irqrestore(&ch->lock, flags); + + if (!iu) + goto err; + + req = &ch->req_ring[idx]; ++#else ++ if (!iu) ++ goto err_unlock; ++ ++ req = list_first_entry(&ch->free_reqs, struct srp_request, list); ++ list_del(&req->list); ++ tag = req->tag; ++ spin_unlock_irqrestore(&ch->lock, flags); ++#endif + dev = target->srp_host->srp_dev->dev; + ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len, + DMA_TO_DEVICE); +@@ -2383,6 +2487,14 @@ err_iu: + */ + req->scmnd = NULL; + ++#ifndef HAVE_BLK_TAGS ++ spin_lock_irqsave(&ch->lock, flags); ++ list_add(&req->list, &ch->free_reqs); ++ ++err_unlock: ++ spin_unlock_irqrestore(&ch->lock, flags); ++ ++#endif + err: + if (scmnd->result) { + scmnd->scsi_done(scmnd); +@@ -2839,13 +2951,40 @@ static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id, * * Returns queue depth. */ @@ -90,7 +333,47 @@ index xxxxxxx..xxxxxxx xxxxxx static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun, u8 func, u8 *status) -@@ -3227,7 +3269,9 @@ static struct scsi_host_template srp_template = { +@@ -2924,8 +3063,13 @@ static int srp_abort(struct scsi_cmnd *scmnd) + + if (!req) + return SUCCESS; ++#ifdef HAVE_BLK_TAGS + tag = blk_mq_unique_tag(scmnd->request); + ch_idx = blk_mq_unique_tag_to_hwq(tag); ++#else ++ tag = req->tag; ++ ch_idx = srp_tag_ch(tag); ++#endif + if (WARN_ON_ONCE(ch_idx >= target->ch_count)) + return SUCCESS; + ch = &target->ch[ch_idx]; +@@ -2996,6 +3140,7 @@ static int srp_target_alloc(struct scsi_target *starget) + return 0; + } + ++#ifdef HAVE_BLK_QUEUE_VIRT_BOUNDARY + static int srp_slave_alloc(struct scsi_device *sdev) + { + struct Scsi_Host *shost = sdev->host; +@@ -3009,6 +3154,7 @@ static int srp_slave_alloc(struct scsi_device *sdev) + + return 0; + } ++#endif + + static int srp_slave_configure(struct scsi_device *sdev) + { +@@ -3211,7 +3357,9 @@ static struct scsi_host_template srp_template = { + .name = "InfiniBand SRP initiator", + .proc_name = DRV_NAME, + .target_alloc = srp_target_alloc, ++#ifdef HAVE_BLK_QUEUE_VIRT_BOUNDARY + .slave_alloc = srp_slave_alloc, ++#endif + .slave_configure = srp_slave_configure, + .info = srp_target_info, + .queuecommand = srp_queuecommand, +@@ -3227,7 +3375,9 @@ static struct scsi_host_template srp_template = { .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = srp_host_attrs, @@ -100,6 +383,271 @@ index xxxxxxx..xxxxxxx xxxxxx }; static int srp_sdev_count(struct Scsi_Host *host) +@@ -3276,8 +3426,13 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target) + list_add_tail(&target->list, &host->target_list); + spin_unlock(&host->target_lock); + ++#ifdef HAVE_SCSI_SCAN_INITIAL + scsi_scan_target(&target->scsi_host->shost_gendev, + 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL); ++#else ++ scsi_scan_target(&target->scsi_host->shost_gendev, ++ 0, target->scsi_id, SCAN_WILD_CARD, 0); ++#endif + + if (srp_connected_ch(target) < target->ch_count || + target->qp_in_error) { +@@ -3663,12 +3818,21 @@ static int srp_parse_options(struct net *net, const char *buf, + break; + + case SRP_OPT_SG_TABLESIZE: ++#ifdef HAVE_SG_MAX_SEGMENTS + if (match_int(args, &token) || token < 1 || + token > SG_MAX_SEGMENTS) { + pr_warn("bad max sg_tablesize parameter '%s'\n", + p); + goto out; + } ++#else ++ if (match_int(args, &token) || token < 1 || ++ token > SCSI_MAX_SG_CHAIN_SEGMENTS) { ++ pr_warn("bad max sg_tablesize parameter '%s'\n", ++ p); ++ goto out; ++ } ++#endif + target->sg_tablesize = token; + break; + +@@ -3728,7 +3892,11 @@ static ssize_t srp_create_target(struct device *dev, + struct srp_device *srp_dev = host->srp_dev; + struct ib_device *ibdev = srp_dev->dev; + int ret, node_idx, node, cpu, i; ++#ifdef HAVE_BLK_QUEUE_VIRT_BOUNDARY + unsigned int max_sectors_per_mr, mr_per_cmd = 0; ++#else ++ unsigned int mr_per_cmd = 0; ++#endif + bool multich = false; + + target_host = scsi_host_alloc(&srp_template, +@@ -3751,7 +3919,25 @@ static ssize_t srp_create_target(struct device *dev, + target->lkey = host->srp_dev->pd->local_dma_lkey; + target->global_rkey = host->srp_dev->global_rkey; + target->cmd_sg_cnt = cmd_sg_entries; ++#ifndef HAVE_BLK_QUEUE_VIRT_BOUNDARY ++ if (never_register) { ++ target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries; ++ } else { ++ if (!(ibdev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG) && ++ (target->cmd_sg_cnt > 12)) { ++ target->cmd_sg_cnt = 12; ++ pr_warn("Clamping cmd_sg_entries and " ++ "indirect_sg_entries to 12. Because %s is " ++ "not supported MR with gaps. And values more " ++ "than 12 can cause allocation errors of the " ++ "MR pool.\n", ++ dev_name(&ibdev->dev)); ++ } ++ target->sg_tablesize = target->cmd_sg_cnt; ++ } ++#else + target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries; ++#endif + target->allow_ext_sg = allow_ext_sg; + target->tl_retry_count = 7; + target->queue_size = SRP_DEFAULT_QUEUE_SIZE; +@@ -3800,9 +3986,12 @@ static ssize_t srp_create_target(struct device *dev, + bool gaps_reg = (ibdev->attrs.device_cap_flags & + IB_DEVICE_SG_GAPS_REG); + ++#ifdef HAVE_BLK_QUEUE_VIRT_BOUNDARY + max_sectors_per_mr = srp_dev->max_pages_per_mr << + (ilog2(srp_dev->mr_page_size) - 9); ++#endif + if (!gaps_reg) { ++#ifdef HAVE_BLK_QUEUE_VIRT_BOUNDARY + /* + * FR and FMR can only map one HCA page per entry. If + * the start address is not aligned on a HCA page +@@ -3819,15 +4008,26 @@ static ssize_t srp_create_target(struct device *dev, + mr_per_cmd = register_always + + (target->scsi_host->max_sectors + 1 + + max_sectors_per_mr - 1) / max_sectors_per_mr; ++ mr_per_cmd = max(2U, mr_per_cmd); ++#else ++ mr_per_cmd = target->cmd_sg_cnt + register_always; ++#endif + } else { + mr_per_cmd = register_always + + (target->sg_tablesize + + srp_dev->max_pages_per_mr - 1) / + srp_dev->max_pages_per_mr; + } ++#ifdef HAVE_BLK_QUEUE_VIRT_BOUNDARY + pr_debug("max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; max_sectors_per_mr = %u; mr_per_cmd = %u\n", + target->scsi_host->max_sectors, srp_dev->max_pages_per_mr, srp_dev->mr_page_size, + max_sectors_per_mr, mr_per_cmd); ++#else ++ pr_debug("IB_DEVICE_SG_GAPS_REG = %d; max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; mr_per_cmd = %u\n", ++ gaps_reg, target->scsi_host->max_sectors, ++ srp_dev->max_pages_per_mr, srp_dev->mr_page_size, ++ mr_per_cmd); ++#endif + } + + target_host->sg_tablesize = target->sg_tablesize; +@@ -3857,6 +4057,12 @@ static ssize_t srp_create_target(struct device *dev, + if (!target->ch) + goto out; + ++#ifndef HAVE_BLK_TAGS ++ target->mq_map = kcalloc(nr_cpu_ids, sizeof(*target->mq_map), ++ GFP_KERNEL); ++ if (!target->mq_map) ++ goto err_free_ch; ++#endif + node_idx = 0; + for_each_online_node(node) { + const int ch_start = (node_idx * target->ch_count / +@@ -3872,6 +4078,10 @@ static ssize_t srp_create_target(struct device *dev, + for_each_online_cpu(cpu) { + if (cpu_to_node(cpu) != node) + continue; ++#ifndef HAVE_BLK_TAGS ++ target->mq_map[cpu] = ch_start == ch_end ? ch_start : ++ ch_start + cpu_idx % (ch_end - ch_start); ++#endif + if (ch_start + cpu_idx >= ch_end) + continue; + ch = &target->ch[ch_start + cpu_idx]; +@@ -3911,6 +4121,9 @@ static ssize_t srp_create_target(struct device *dev, + } else { + srp_free_ch_ib(target, ch); + srp_free_req_data(target, ch); ++#ifndef HAVE_BLK_TAGS ++ target->mq_map[cpu] = 0; ++#endif + target->ch_count = ch - target->ch; + goto connected; + } +@@ -3923,7 +4136,9 @@ static ssize_t srp_create_target(struct device *dev, + } + + connected: ++#ifdef HAVE_SCSI_HOST_NR_HW_QUEUES + target->scsi_host->nr_hw_queues = target->ch_count; ++#endif + + ret = srp_add_target(host, target); + if (ret) +@@ -3978,6 +4193,11 @@ free_ch: + srp_free_req_data(target, ch); + } + ++#ifndef HAVE_BLK_TAGS ++ kfree(target->mq_map); ++ ++err_free_ch: ++#endif + kfree(target->ch); + goto out; + } +@@ -4203,11 +4423,19 @@ static int __init srp_init_module(void) + indirect_sg_entries = cmd_sg_entries; + } + ++#ifdef HAVE_SG_MAX_SEGMENTS + if (indirect_sg_entries > SG_MAX_SEGMENTS) { + pr_warn("Clamping indirect_sg_entries to %u\n", + SG_MAX_SEGMENTS); + indirect_sg_entries = SG_MAX_SEGMENTS; + } ++#else ++ if (indirect_sg_entries > SCSI_MAX_SG_CHAIN_SEGMENTS) { ++ pr_warn("Clamping indirect_sg_entries to %u\n", ++ SCSI_MAX_SG_CHAIN_SEGMENTS); ++ indirect_sg_entries = SCSI_MAX_SG_CHAIN_SEGMENTS; ++ } ++#endif + + srp_remove_wq = create_workqueue("srp_remove"); + if (!srp_remove_wq) { +diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h +index xxxxxxx..xxxxxxx xxxxxx +--- a/drivers/infiniband/ulp/srp/ib_srp.h ++++ b/drivers/infiniband/ulp/srp/ib_srp.h +@@ -81,6 +81,30 @@ enum srp_iu_type { + SRP_IU_RSP, + }; + ++#ifdef HAVE_BLK_MQ_UNIQUE_TAG ++#if !(defined(RHEL_MAJOR) && RHEL_MAJOR -0 == 7 && \ ++ !defined(HAVE_SCSI_HOST_TEMPLATE_USE_HOST_WIDE_TAGS)) ++#define HAVE_BLK_TAGS 1 ++#endif ++#endif /* HAVE_BLK_MQ_UNIQUE_TAG */ ++ ++#ifndef HAVE_BLK_TAGS ++static inline u32 build_srp_tag(u16 ch, u16 req_idx) ++{ ++ return ch << 16 | req_idx; ++} ++ ++static inline u16 srp_tag_ch(u32 tag) ++{ ++ return tag >> 16; ++} ++ ++static inline u16 srp_tag_idx(u32 tag) ++{ ++ return tag & ((1 << 16) - 1); ++} ++#endif ++ + /* + * @mr_page_mask: HCA memory registration page mask. + * @mr_page_size: HCA memory registration page size. +@@ -114,6 +138,9 @@ struct srp_host { + }; + + struct srp_request { ++#ifndef HAVE_BLK_TAGS ++ struct list_head list; ++#endif + struct scsi_cmnd *scmnd; + struct srp_iu *cmd; + union { +@@ -124,6 +151,9 @@ struct srp_request { + struct srp_direct_buf *indirect_desc; + dma_addr_t indirect_dma_addr; + short nmdesc; ++#ifndef HAVE_BLK_TAGS ++ uint32_t tag; ++#endif + struct ib_cqe reg_cqe; + }; + +@@ -134,6 +164,9 @@ struct srp_request { + struct srp_rdma_ch { + /* These are RW in the hot path, and commonly used together */ + struct list_head free_tx; ++#ifndef HAVE_BLK_TAGS ++ struct list_head free_reqs; ++#endif + spinlock_t lock; + s32 req_lim; + +@@ -191,6 +224,9 @@ struct srp_target_port { + u32 global_rkey; + struct srp_rdma_ch *ch; + struct net *net; ++#ifndef HAVE_BLK_TAGS ++ int *mq_map; ++#endif + u32 ch_count; + u32 lkey; + enum srp_target_state state; diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c index xxxxxxx..xxxxxxx xxxxxx --- a/drivers/scsi/scsi_transport_srp.c -- 2.41.0