--- /dev/null
+From: Vladimir Sokolovsky <vlad@mellanox.com>
+Subject: [PATCH] BACKPORT: blk-mq-rdma
+
+Signed-off-by: Vladimir Sokolovsky <vlad@mellanox.com>
+---
+ block/blk-mq-rdma.c | 15 +++++++++------
+ 1 file changed, 9 insertions(+), 6 deletions(-)
+
+diff --git a/block/blk-mq-rdma.c b/block/blk-mq-rdma.c
+index xxxxxxx..xxxxxxx 100644
+--- a/block/blk-mq-rdma.c
++++ b/block/blk-mq-rdma.c
+@@ -1,3 +1,4 @@
++#if defined(HAVE_BLK_MQ_MAP_QUEUES) && defined(HAVE_BLK_MQ_TAG_SET_HAS_MQ_MAP)
+ // SPDX-License-Identifier: GPL-2.0
+ /*
+ * Copyright (c) 2017 Sagi Grimberg.
+@@ -8,8 +9,8 @@
+
+ /**
+ * blk_mq_rdma_map_queues - provide a default queue mapping for rdma device
+- * @map: CPU to hardware queue map.
+- * @dev: rdma device to provide a mapping for.
++ * @set: tagset to provide the mapping for
++ * @dev: rdma device associated with @set.
+ * @first_vec: first interrupt vectors to use for queues (usually 0)
+ *
+ * This function assumes the rdma device @dev has at least as many available
+@@ -21,24 +22,26 @@
+ * @set->nr_hw_queues, or @dev does not provide an affinity mask for a
+ * vector, we fallback to the naive mapping.
+ */
+-int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
++int blk_mq_rdma_map_queues(struct blk_mq_tag_set *set,
+ struct ib_device *dev, int first_vec)
+ {
+ const struct cpumask *mask;
+ unsigned int queue, cpu;
+
+- for (queue = 0; queue < map->nr_queues; queue++) {
++ for (queue = 0; queue < set->nr_hw_queues; queue++) {
+ mask = ib_get_vector_affinity(dev, first_vec + queue);
+ if (!mask)
+ goto fallback;
+
+ for_each_cpu(cpu, mask)
+- map->mq_map[cpu] = map->queue_offset + queue;
++ set->mq_map[cpu] = queue;
+ }
+
+ return 0;
+
+ fallback:
+- return blk_mq_map_queues(map);
++ return blk_mq_map_queues(set);
+ }
+ EXPORT_SYMBOL_GPL(blk_mq_rdma_map_queues);
++
++#endif /* HAVE_BLK_MQ_MAP_QUEUES */