]> git.openfabrics.org - compat-rdma/compat-rdma.git/commitdiff
Added ib_core backport patch
authorVladimir Sokolovsky <vlad@mellanox.com>
Tue, 20 Aug 2019 20:31:38 +0000 (15:31 -0500)
committerVladimir Sokolovsky <vlad@mellanox.com>
Tue, 27 Aug 2019 20:03:21 +0000 (15:03 -0500)
Signed-off-by: Vladimir Sokolovsky <vlad@mellanox.com>
patches/0001-drivers-scsi-Makefile-Remove-unused-components.patch [new file with mode: 0644]
patches/0002-BACKPORT-ib_core.patch [new file with mode: 0644]
patches/README [new file with mode: 0644]

diff --git a/patches/0001-drivers-scsi-Makefile-Remove-unused-components.patch b/patches/0001-drivers-scsi-Makefile-Remove-unused-components.patch
new file mode 100644 (file)
index 0000000..c4ade94
--- /dev/null
@@ -0,0 +1,219 @@
+From: Vladimir Sokolovsky <vlad@mellanox.com>
+Subject: [PATCH] drivers/scsi/Makefile: Remove unused components
+
+Signed-off-by: Vladimir Sokolovsky <vlad@mellanox.com>
+---
+ drivers/scsi/Makefile | 202 ------------------------------------------
+ 1 file changed, 202 deletions(-)
+
+diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
+index xxxxxxx..xxxxxxx 100644
+--- a/drivers/scsi/Makefile
++++ b/drivers/scsi/Makefile
+@@ -1,206 +1,4 @@
+-# SPDX-License-Identifier: GPL-2.0
+-#
+-# Makefile for linux/drivers/scsi
+-#
+-# 30 May 2000, Christoph Hellwig <hch@infradead.org>
+-# Rewritten to use lists instead of if-statements.
+-#
+-# 20 Sep 2000, Torben Mathiasen <tmm@image.dk>
+-# Changed link order to reflect new scsi initialization.
+-#
+-# *!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!
+-# The link order must be, SCSI Core, SCSI HBA drivers, and
+-# lastly SCSI peripheral drivers (disk/tape/cdrom/etc.) to
+-# satisfy certain initialization assumptions in the SCSI layer.
+-# *!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!
+-
+-
+-CFLAGS_aha152x.o =   -DAHA152X_STAT -DAUTOCONF
+-CFLAGS_gdth.o    = # -DDEBUG_GDTH=2 -D__SERIAL__ -D__COM2__ -DGDTH_STATISTICS
+-
+-obj-$(CONFIG_PCMCIA)          += pcmcia/
+-
+-obj-$(CONFIG_SCSI)            += scsi_mod.o
+-obj-$(CONFIG_BLK_SCSI_REQUEST)        += scsi_common.o
+-
+-obj-$(CONFIG_RAID_ATTRS)      += raid_class.o
+-
+-# --- NOTE ORDERING HERE ---
+-# For kernel non-modular link, transport attributes need to
+-# be initialised before drivers
+-# --------------------------
+-obj-$(CONFIG_SCSI_SPI_ATTRS)  += scsi_transport_spi.o
+-obj-$(CONFIG_SCSI_FC_ATTRS)   += scsi_transport_fc.o
+-obj-$(CONFIG_SCSI_ISCSI_ATTRS)        += scsi_transport_iscsi.o
+-obj-$(CONFIG_SCSI_SAS_ATTRS)  += scsi_transport_sas.o
+-obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas/
+ obj-$(CONFIG_SCSI_SRP_ATTRS)  += scsi_transport_srp.o
+-obj-$(CONFIG_SCSI_DH)         += device_handler/
+-
+-obj-$(CONFIG_LIBFC)           += libfc/
+-obj-$(CONFIG_LIBFCOE)         += fcoe/
+-obj-$(CONFIG_FCOE)            += fcoe/
+-obj-$(CONFIG_FCOE_FNIC)               += fnic/
+-obj-$(CONFIG_SCSI_SNIC)               += snic/
+-obj-$(CONFIG_SCSI_BNX2X_FCOE) += libfc/ fcoe/ bnx2fc/
+-obj-$(CONFIG_QEDF)            += qedf/
+-obj-$(CONFIG_ISCSI_TCP)       += libiscsi.o   libiscsi_tcp.o iscsi_tcp.o
+-obj-$(CONFIG_INFINIBAND_ISER)         += libiscsi.o
+-obj-$(CONFIG_ISCSI_BOOT_SYSFS)        += iscsi_boot_sysfs.o
+-obj-$(CONFIG_SCSI_A4000T)     += 53c700.o     a4000t.o
+-obj-$(CONFIG_SCSI_ZORRO7XX)   += 53c700.o     zorro7xx.o
+-obj-$(CONFIG_SCSI_ZORRO_ESP)  += esp_scsi.o   zorro_esp.o
+-obj-$(CONFIG_A3000_SCSI)      += a3000.o      wd33c93.o
+-obj-$(CONFIG_A2091_SCSI)      += a2091.o      wd33c93.o
+-obj-$(CONFIG_GVP11_SCSI)      += gvp11.o      wd33c93.o
+-obj-$(CONFIG_MVME147_SCSI)    += mvme147.o    wd33c93.o
+-obj-$(CONFIG_SGIWD93_SCSI)    += sgiwd93.o    wd33c93.o
+-obj-$(CONFIG_ATARI_SCSI)      += atari_scsi.o
+-obj-$(CONFIG_MAC_SCSI)                += mac_scsi.o
+-obj-$(CONFIG_SCSI_MAC_ESP)    += esp_scsi.o   mac_esp.o
+-obj-$(CONFIG_SUN3_SCSI)               += sun3_scsi.o  sun3_scsi_vme.o
+-obj-$(CONFIG_MVME16x_SCSI)    += 53c700.o     mvme16x_scsi.o
+-obj-$(CONFIG_BVME6000_SCSI)   += 53c700.o     bvme6000_scsi.o
+-obj-$(CONFIG_SCSI_SIM710)     += 53c700.o     sim710.o
+-obj-$(CONFIG_SCSI_ADVANSYS)   += advansys.o
+-obj-$(CONFIG_SCSI_BUSLOGIC)   += BusLogic.o
+-obj-$(CONFIG_SCSI_DPT_I2O)    += dpt_i2o.o
+-obj-$(CONFIG_SCSI_ARCMSR)     += arcmsr/
+-obj-$(CONFIG_SCSI_AHA152X)    += aha152x.o
+-obj-$(CONFIG_SCSI_AHA1542)    += aha1542.o
+-obj-$(CONFIG_SCSI_AHA1740)    += aha1740.o
+-obj-$(CONFIG_SCSI_AIC7XXX)    += aic7xxx/
+-obj-$(CONFIG_SCSI_AIC79XX)    += aic7xxx/
+-obj-$(CONFIG_SCSI_AACRAID)    += aacraid/
+-obj-$(CONFIG_SCSI_AIC94XX)    += aic94xx/
+-obj-$(CONFIG_SCSI_PM8001)     += pm8001/
+-obj-$(CONFIG_SCSI_ISCI)               += isci/
+-obj-$(CONFIG_SCSI_IPS)                += ips.o
+-obj-$(CONFIG_SCSI_FDOMAIN)    += fdomain.o
+-obj-$(CONFIG_SCSI_FDOMAIN_PCI)        += fdomain_pci.o
+-obj-$(CONFIG_SCSI_FDOMAIN_ISA)        += fdomain_isa.o
+-obj-$(CONFIG_SCSI_GENERIC_NCR5380) += g_NCR5380.o
+-obj-$(CONFIG_SCSI_QLOGIC_FAS) += qlogicfas408.o       qlogicfas.o
+-obj-$(CONFIG_PCMCIA_QLOGIC)   += qlogicfas408.o
+-obj-$(CONFIG_SCSI_QLOGIC_1280)        += qla1280.o 
+-obj-$(CONFIG_SCSI_QLA_FC)     += qla2xxx/
+-obj-$(CONFIG_SCSI_QLA_ISCSI)  += libiscsi.o qla4xxx/
+-obj-$(CONFIG_SCSI_LPFC)               += lpfc/
+-obj-$(CONFIG_SCSI_BFA_FC)     += bfa/
+-obj-$(CONFIG_SCSI_CHELSIO_FCOE)       += csiostor/
+-obj-$(CONFIG_SCSI_DMX3191D)   += dmx3191d.o
+-obj-$(CONFIG_SCSI_HPSA)               += hpsa.o
+-obj-$(CONFIG_SCSI_SMARTPQI)   += smartpqi/
+-obj-$(CONFIG_SCSI_SYM53C8XX_2)        += sym53c8xx_2/
+-obj-$(CONFIG_SCSI_ZALON)      += zalon7xx.o
+-obj-$(CONFIG_SCSI_DC395x)     += dc395x.o
+-obj-$(CONFIG_SCSI_AM53C974)   += esp_scsi.o   am53c974.o
+-obj-$(CONFIG_CXLFLASH)                += cxlflash/
+-obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o
+-obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
+-obj-$(CONFIG_MEGARAID_SAS)    += megaraid/
+-obj-$(CONFIG_SCSI_MPT3SAS)    += mpt3sas/
+-obj-$(CONFIG_SCSI_UFSHCD)     += ufs/
+-obj-$(CONFIG_SCSI_ACARD)      += atp870u.o
+-obj-$(CONFIG_SCSI_SUNESP)     += esp_scsi.o   sun_esp.o
+-obj-$(CONFIG_SCSI_GDTH)               += gdth.o
+-obj-$(CONFIG_SCSI_INITIO)     += initio.o
+-obj-$(CONFIG_SCSI_INIA100)    += a100u2w.o
+-obj-$(CONFIG_SCSI_QLOGICPTI)  += qlogicpti.o
+-obj-$(CONFIG_SCSI_MESH)               += mesh.o
+-obj-$(CONFIG_SCSI_MAC53C94)   += mac53c94.o
+-obj-$(CONFIG_SCSI_MYRB)               += myrb.o
+-obj-$(CONFIG_SCSI_MYRS)               += myrs.o
+-obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o
+-obj-$(CONFIG_SCSI_3W_9XXX)    += 3w-9xxx.o
+-obj-$(CONFIG_SCSI_3W_SAS)     += 3w-sas.o
+-obj-$(CONFIG_SCSI_PPA)                += ppa.o
+-obj-$(CONFIG_SCSI_IMM)                += imm.o
+-obj-$(CONFIG_JAZZ_ESP)                += esp_scsi.o   jazz_esp.o
+-obj-$(CONFIG_SUN3X_ESP)               += esp_scsi.o   sun3x_esp.o
+-obj-$(CONFIG_SCSI_LASI700)    += 53c700.o lasi700.o
+-obj-$(CONFIG_SCSI_SNI_53C710) += 53c700.o sni_53c710.o
+-obj-$(CONFIG_SCSI_NSP32)      += nsp32.o
+-obj-$(CONFIG_SCSI_IPR)                += ipr.o
+-obj-$(CONFIG_SCSI_IBMVSCSI)   += ibmvscsi/
+-obj-$(CONFIG_SCSI_IBMVSCSIS)  += ibmvscsi_tgt/
+-obj-$(CONFIG_SCSI_IBMVFC)     += ibmvscsi/
+-obj-$(CONFIG_SCSI_HPTIOP)     += hptiop.o
+-obj-$(CONFIG_SCSI_STEX)               += stex.o
+-obj-$(CONFIG_SCSI_MVSAS)      += mvsas/
+-obj-$(CONFIG_SCSI_MVUMI)      += mvumi.o
+-obj-$(CONFIG_PS3_ROM)         += ps3rom.o
+ obj-$(CONFIG_SCSI_CXGB3_ISCSI)        += libiscsi.o libiscsi_tcp.o cxgbi/
+ obj-$(CONFIG_SCSI_CXGB4_ISCSI)        += libiscsi.o libiscsi_tcp.o cxgbi/
+-obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
+ obj-$(CONFIG_QEDI)          += libiscsi.o qedi/
+-obj-$(CONFIG_BE2ISCSI)                += libiscsi.o be2iscsi/
+-obj-$(CONFIG_SCSI_ESAS2R)     += esas2r/
+-obj-$(CONFIG_SCSI_PMCRAID)    += pmcraid.o
+-obj-$(CONFIG_SCSI_VIRTIO)     += virtio_scsi.o
+-obj-$(CONFIG_VMWARE_PVSCSI)   += vmw_pvscsi.o
+-obj-$(CONFIG_XEN_SCSI_FRONTEND)       += xen-scsifront.o
+-obj-$(CONFIG_HYPERV_STORAGE)  += hv_storvsc.o
+-obj-$(CONFIG_SCSI_WD719X)     += wd719x.o
+-
+-obj-$(CONFIG_ARM)             += arm/
+-
+-obj-$(CONFIG_CHR_DEV_ST)      += st.o
+-obj-$(CONFIG_BLK_DEV_SD)      += sd_mod.o
+-obj-$(CONFIG_BLK_DEV_SR)      += sr_mod.o
+-obj-$(CONFIG_CHR_DEV_SG)      += sg.o
+-obj-$(CONFIG_CHR_DEV_SCH)     += ch.o
+-obj-$(CONFIG_SCSI_ENCLOSURE)  += ses.o
+-
+-obj-$(CONFIG_SCSI_HISI_SAS) += hisi_sas/
+-
+-# This goes last, so that "real" scsi devices probe earlier
+-obj-$(CONFIG_SCSI_DEBUG)      += scsi_debug.o
+-scsi_mod-y                    += scsi.o hosts.o scsi_ioctl.o \
+-                                 scsicam.o scsi_error.o scsi_lib.o
+-scsi_mod-$(CONFIG_SCSI_CONSTANTS) += constants.o
+-scsi_mod-$(CONFIG_SCSI_DMA)   += scsi_lib_dma.o
+-scsi_mod-y                    += scsi_scan.o scsi_sysfs.o scsi_devinfo.o
+-scsi_mod-$(CONFIG_SCSI_NETLINK)       += scsi_netlink.o
+-scsi_mod-$(CONFIG_SYSCTL)     += scsi_sysctl.o
+-scsi_mod-$(CONFIG_SCSI_PROC_FS)       += scsi_proc.o
+-scsi_mod-$(CONFIG_BLK_DEBUG_FS)       += scsi_debugfs.o
+-scsi_mod-y                    += scsi_trace.o scsi_logging.o
+-scsi_mod-$(CONFIG_PM)         += scsi_pm.o
+-scsi_mod-$(CONFIG_SCSI_DH)    += scsi_dh.o
+-
+-hv_storvsc-y                  := storvsc_drv.o
+-
+-sd_mod-objs   := sd.o
+-sd_mod-$(CONFIG_BLK_DEV_INTEGRITY) += sd_dif.o
+-sd_mod-$(CONFIG_BLK_DEV_ZONED) += sd_zbc.o
+-
+-sr_mod-objs   := sr.o sr_ioctl.o sr_vendor.o
+-ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
+-              := -DCONFIG_NCR53C8XX_PREFETCH -DSCSI_NCR_BIG_ENDIAN \
+-                      -DCONFIG_SCSI_NCR53C8XX_NO_WORD_TRANSFERS
+-CFLAGS_ncr53c8xx.o    := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m)
+-zalon7xx-objs := zalon.o ncr53c8xx.o
+-
+-# Files generated that shall be removed upon make clean
+-clean-files :=        53c700_d.h 53c700_u.h scsi_devinfo_tbl.c
+-
+-$(obj)/53c700.o: $(obj)/53c700_d.h
+-
+-$(obj)/scsi_sysfs.o: $(obj)/scsi_devinfo_tbl.c
+-
+-quiet_cmd_bflags = GEN     $@
+-      cmd_bflags = sed -n 's/.*define *BLIST_\([A-Z0-9_]*\) *.*/BLIST_FLAG_NAME(\1),/p' $< > $@
+-
+-$(obj)/scsi_devinfo_tbl.c: include/scsi/scsi_devinfo.h
+-      $(call if_changed,bflags)
+-
+-# If you want to play with the firmware, uncomment
+-# GENERATE_FIRMWARE := 1
+-
+-ifdef GENERATE_FIRMWARE
+-
+-$(obj)/53c700_d.h: $(src)/53c700.scr $(src)/script_asm.pl
+-      $(PERL) -s $(src)/script_asm.pl -ncr7x0_family $@ $(@:_d.h=_u.h) < $<
+-
+-endif
diff --git a/patches/0002-BACKPORT-ib_core.patch b/patches/0002-BACKPORT-ib_core.patch
new file mode 100644 (file)
index 0000000..268ac52
--- /dev/null
@@ -0,0 +1,3738 @@
+From: Vladimir Sokolovsky <vlad@mellanox.com>
+Subject: [PATCH] BACKPORT: ib_core
+
+Signed-off-by: Vladimir Sokolovsky <vlad@mellanox.com>
+---
+ drivers/infiniband/core/addr.c          |  51 +++-
+ drivers/infiniband/core/cma.c           |  27 ++
+ drivers/infiniband/core/cma_configfs.c  | 104 ++++++++
+ drivers/infiniband/core/core_priv.h     |  38 +++
+ drivers/infiniband/core/cq.c            |  35 ++-
+ drivers/infiniband/core/device.c        |  75 ++++++
+ drivers/infiniband/core/fmr_pool.c      |  55 +++++
+ drivers/infiniband/core/iwpm_util.c     |   8 +
+ drivers/infiniband/core/mad.c           |   7 +
+ drivers/infiniband/core/netlink.c       |  29 +++
+ drivers/infiniband/core/nldev.c         | 316 ++++++++++++++++++++++++
+ drivers/infiniband/core/rdma_core.c     | 131 +++++++++-
+ drivers/infiniband/core/restrack.c      |   4 +
+ drivers/infiniband/core/restrack.h      |  15 ++
+ drivers/infiniband/core/roce_gid_mgmt.c |  58 +++++
+ drivers/infiniband/core/rw.c            |   6 +
+ drivers/infiniband/core/sa_query.c      |  87 ++++++-
+ drivers/infiniband/core/ucma.c          |  19 ++
+ drivers/infiniband/core/umem.c          |  78 ++++++
+ drivers/infiniband/core/user_mad.c      |  61 +++++
+ drivers/infiniband/core/uverbs.h        |   6 +
+ drivers/infiniband/core/uverbs_cmd.c    |  13 +
+ drivers/infiniband/core/uverbs_ioctl.c  |   6 +
+ drivers/infiniband/core/uverbs_main.c   |  75 ++++++
+ drivers/infiniband/core/uverbs_uapi.c   |   8 +
+ drivers/infiniband/core/verbs.c         |  14 ++
+ include/rdma/ib_addr.h                  |  31 +++
+ include/rdma/ib_verbs.h                 | 155 ++++++++++++
+ include/rdma/rdma_netlink.h             |   4 +
+ include/rdma/restrack.h                 |   9 +
+ include/rdma/uverbs_ioctl.h             |   3 +
+ include/trace/events/ib_mad.h           |   7 +
+ 32 files changed, 1528 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
+index xxxxxxx..xxxxxxx 100644
+--- a/drivers/infiniband/core/addr.c
++++ b/drivers/infiniband/core/addr.c
+@@ -42,7 +42,9 @@
+ #include <net/neighbour.h>
+ #include <net/route.h>
+ #include <net/netevent.h>
++#ifdef HAVE_IPV6_STUBS
+ #include <net/ipv6_stubs.h>
++#endif
+ #include <net/ip6_route.h>
+ #include <rdma/ib_addr.h>
+ #include <rdma/ib_cache.h>
+@@ -87,7 +89,11 @@ static inline bool ib_nl_is_good_ip_resp(const struct nlmsghdr *nlh)
+       if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
+               return false;
++#ifdef HAVE_NLA_PARSE_DEPRECATED
+       ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
++#else
++      ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
++#endif
+                                  nlmsg_len(nlh), ib_nl_addr_policy, NULL);
+       if (ret)
+               return false;
+@@ -129,11 +135,26 @@ static void ib_nl_process_good_ip_rsep(const struct nlmsghdr *nlh)
+ }
+ int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
++#ifdef HAVE_NETLINK_EXT_ACK
+                            struct nlmsghdr *nlh,
+                            struct netlink_ext_ack *extack)
+ {
++#else
++                           struct netlink_callback *cb)
++{
++      const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
++
++#endif
+       if ((nlh->nlmsg_flags & NLM_F_REQUEST) ||
++#ifdef HAVE_NETLINK_CAPABLE
++#ifdef HAVE_NETLINK_SKB_PARMS_SK
+           !(NETLINK_CB(skb).sk))
++#else
++          !(NETLINK_CB(skb).ssk))
++#endif
++#else
++          sock_net(skb->sk) != &init_net)
++#endif
+               return -EPERM;
+       if (ib_nl_is_good_ip_resp(nlh))
+@@ -177,7 +198,8 @@ static int ib_nl_ip_send_msg(struct rdma_dev_addr *dev_addr,
+       }
+       /* Construct the family header first */
+-      header = skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
++      header = (struct rdma_ls_ip_resolve_header *)
++              skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
+       header->ifindex = dev_addr->bound_dev_if;
+       nla_put(skb, attrtype, size, daddr);
+@@ -345,14 +367,24 @@ static int dst_fetch_ha(const struct dst_entry *dst,
+       return ret;
+ }
++#ifdef HAVE_RT_USES_GATEWAY
+ static bool has_gateway(const struct dst_entry *dst, sa_family_t family)
++#else
++static bool has_gateway(const struct dst_entry *dst, const void *daddr, sa_family_t family)
++#endif
+ {
+       struct rtable *rt;
+       struct rt6_info *rt6;
+       if (family == AF_INET) {
+               rt = container_of(dst, struct rtable, dst);
++#ifdef HAVE_RT_GW_FAMILY
+               return rt->rt_gw_family == AF_INET;
++#elif defined (HAVE_RT_USES_GATEWAY)
++              return rt->rt_uses_gateway;
++#else
++              return (rt->rt_gateway != *(__be32 *)daddr);
++#endif
+       }
+       rt6 = container_of(dst, struct rt6_info, dst);
+@@ -371,8 +403,12 @@ static int fetch_ha(const struct dst_entry *dst, struct rdma_dev_addr *dev_addr,
+               (const void *)&dst_in6->sin6_addr;
+       sa_family_t family = dst_in->sa_family;
++#ifndef HAVE_RT_USES_GATEWAY
++      if (seq && has_gateway(dst, daddr, family) && dst->dev->type == ARPHRD_INFINIBAND)
++#else
+       /* If we have a gateway in IB mode then it must be an IB network */
+       if (has_gateway(dst, family) && dev_addr->network == RDMA_NETWORK_IB)
++#endif
+               return ib_nl_fetch_ha(dev_addr, daddr, seq, family);
+       else
+               return dst_fetch_ha(dst, dev_addr, daddr);
+@@ -475,6 +511,15 @@ static int copy_src_l2_addr(struct rdma_dev_addr *dev_addr,
+                           const struct net_device *ndev)
+ {
+       int ret = 0;
++#ifndef HAVE_RT_USES_GATEWAY
++      const struct sockaddr_in *dst_in4 =
++              (const struct sockaddr_in *)dst_in;
++      const struct sockaddr_in6 *dst_in6 =
++              (const struct sockaddr_in6 *)dst_in;
++      const void *daddr = (dst_in->sa_family == AF_INET) ?
++              (const void *)&dst_in4->sin_addr.s_addr :
++              (const void *)&dst_in6->sin6_addr;
++#endif
+       if (dst->dev->flags & IFF_LOOPBACK)
+               ret = rdma_translate_ip(dst_in, dev_addr);
+@@ -486,7 +531,11 @@ static int copy_src_l2_addr(struct rdma_dev_addr *dev_addr,
+        * we're definitely in RoCE v2 (as RoCE v1 isn't routable) set the
+        * network type accordingly.
+        */
++#ifdef HAVE_RT_USES_GATEWAY
+       if (has_gateway(dst, dst_in->sa_family) &&
++#else
++      if (has_gateway(dst, daddr, dst_in->sa_family) &&
++#endif
+           ndev->type != ARPHRD_INFINIBAND)
+               dev_addr->network = dst_in->sa_family == AF_INET ?
+                                               RDMA_NETWORK_IPV4 :
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index xxxxxxx..xxxxxxx 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -1428,7 +1428,11 @@ static bool validate_ipv4_net_dev(struct net_device *net_dev,
+       fl4.saddr = saddr;
+       rcu_read_lock();
++#ifdef HAVE_FIB_LOOKUP_4_PARAMS
+       err = fib_lookup(dev_net(net_dev), &fl4, &res, 0);
++#else
++      err = fib_lookup(dev_net(net_dev), &fl4, &res);
++#endif
+       ret = err == 0 && FIB_RES_DEV(res) == net_dev;
+       rcu_read_unlock();
+@@ -1444,7 +1448,11 @@ static bool validate_ipv6_net_dev(struct net_device *net_dev,
+                          IPV6_ADDR_LINKLOCAL;
+       struct rt6_info *rt = rt6_lookup(dev_net(net_dev), &dst_addr->sin6_addr,
+                                        &src_addr->sin6_addr, net_dev->ifindex,
++#ifdef HAVE_RT6_LOOKUP_6_PARAMS
+                                        NULL, strict);
++#else
++                                       strict);
++#endif
+       bool ret;
+       if (!rt)
+@@ -2827,6 +2835,7 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv)
+       return 0;
+ }
++#if defined(HAVE_VLAN_DEV_GET_EGRESS_QOS_MASK) && defined(HAVE_NETDEV_GET_PRIO_TC_MAP)
+ static int iboe_tos_to_sl(struct net_device *ndev, int tos)
+ {
+       int prio;
+@@ -2844,6 +2853,7 @@ static int iboe_tos_to_sl(struct net_device *ndev, int tos)
+ #endif
+       return 0;
+ }
++#endif
+ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
+ {
+@@ -2889,7 +2899,16 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
+       route->path_rec->reversible = 1;
+       route->path_rec->pkey = cpu_to_be16(0xffff);
+       route->path_rec->mtu_selector = IB_SA_EQ;
++#if defined(HAVE_VLAN_DEV_GET_EGRESS_QOS_MASK) && defined(HAVE_NETDEV_GET_PRIO_TC_MAP)
+       route->path_rec->sl = iboe_tos_to_sl(ndev, tos);
++#elif defined(HAVE_NETDEV_GET_PRIO_TC_MAP)
++      route->path_rec->sl = netdev_get_prio_tc_map(
++                      ndev->priv_flags & IFF_802_1Q_VLAN ?
++                              vlan_dev_real_dev(ndev) : ndev,
++                      rt_tos2priority(tos));
++#else
++      route->path_rec->sl = tos >> 5;
++#endif
+       route->path_rec->traffic_class = tos;
+       route->path_rec->mtu = iboe_get_mtu(ndev->mtu);
+       route->path_rec->rate_selector = IB_SA_EQ;
+@@ -3327,7 +3346,11 @@ static int cma_alloc_any_port(enum rdma_ucm_port_space ps,
+       unsigned int rover;
+       struct net *net = id_priv->id.route.addr.dev_addr.net;
++#ifdef HAVE_INET_GET_LOCAL_PORT_RANGE_3_PARAMS
+       inet_get_local_port_range(net, &low, &high);
++#else
++      inet_get_local_port_range(&low, &high);
++#endif
+       remaining = (high - low) + 1;
+       rover = prandom_u32() % remaining + low;
+ retry:
+@@ -4287,7 +4310,11 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
+                                               id_priv->id.port_num, &rec,
+                                               comp_mask, GFP_KERNEL,
+                                               cma_ib_mc_handler, mc);
++#ifdef HAVE_PTR_ERR_OR_ZERO
+       return PTR_ERR_OR_ZERO(mc->multicast.ib);
++#else
++      return PTR_RET(mc->multicast.ib);
++#endif
+ }
+ static void iboe_mcast_work_handler(struct work_struct *work)
+diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c
+index xxxxxxx..xxxxxxx 100644
+--- a/drivers/infiniband/core/cma_configfs.c
++++ b/drivers/infiniband/core/cma_configfs.c
+@@ -38,6 +38,10 @@
+ #include "core_priv.h"
+ #include "cma_priv.h"
++#ifndef CONFIGFS_ATTR
++#define HAVE_OLD_CONFIGFS_API
++#endif
++
+ struct cma_device;
+ struct cma_dev_group;
+@@ -55,6 +59,23 @@ struct cma_dev_group {
+       struct cma_dev_port_group       *ports;
+ };
++#ifdef HAVE_OLD_CONFIGFS_API
++struct cma_configfs_attr {
++      struct configfs_attribute       attr;
++      ssize_t                         (*show)(struct config_item *item,
++                                              char *buf);
++      ssize_t                         (*store)(struct config_item *item,
++                                               const char *buf, size_t count);
++};
++#define CONFIGFS_ATTR(dummy, _name)                           \
++static struct cma_configfs_attr attr_##_name =        \
++      __CONFIGFS_ATTR(_name, S_IRUGO | S_IWUSR, _name##_show, _name##_store)
++
++#define CONFIGFS_ATTR_ADD(name) &name.attr
++#else
++#define CONFIGFS_ATTR_ADD(name) &name
++#endif /* HAVE_OLD_CONFIGFS_API */
++
+ static struct cma_dev_port_group *to_dev_port_group(struct config_item *item)
+ {
+       struct config_group *group;
+@@ -71,6 +92,34 @@ static bool filter_by_name(struct ib_device *ib_dev, void *cookie)
+       return !strcmp(dev_name(&ib_dev->dev), cookie);
+ }
++#ifdef HAVE_OLD_CONFIGFS_API
++static ssize_t cma_configfs_attr_show(struct config_item *item,
++                                    struct configfs_attribute *attr,
++                                    char *buf)
++{
++      struct cma_configfs_attr *ca =
++              container_of(attr, struct cma_configfs_attr, attr);
++
++      if (ca->show)
++              return ca->show(item, buf);
++
++      return -EINVAL;
++}
++
++static ssize_t cma_configfs_attr_store(struct config_item *item,
++                                     struct configfs_attribute *attr,
++                                     const char *buf, size_t count)
++{
++      struct cma_configfs_attr *ca =
++              container_of(attr, struct cma_configfs_attr, attr);
++
++      if (ca->store)
++              return ca->store(item, buf, count);
++
++      return -EINVAL;
++}
++#endif /* HAVE_OLD_CONFIGFS_API */
++
+ static int cma_configfs_params_get(struct config_item *item,
+                                  struct cma_device **pcma_dev,
+                                  struct cma_dev_port_group **pgroup)
+@@ -189,7 +238,22 @@ static struct configfs_attribute *cma_configfs_attributes[] = {
+       NULL,
+ };
++#ifdef HAVE_OLD_CONFIGFS_API
++static struct configfs_item_operations cma_item_ops = {
++      .show_attribute         = cma_configfs_attr_show,
++      .store_attribute        = cma_configfs_attr_store,
++};
++#else /* HAVE_OLD_CONFIGFS_API */
++static struct configfs_item_operations cma_item_ops = {
++};
++#endif
++
++#ifdef CONFIG_GROUP_INIT_TYPE_NAME_PARAM_3_IS_CONST
+ static const struct config_item_type cma_port_group_type = {
++#else
++static struct config_item_type cma_port_group_type = {
++#endif
++      .ct_item_ops    = &cma_item_ops,
+       .ct_attrs       = cma_configfs_attributes,
+       .ct_owner       = THIS_MODULE
+ };
+@@ -217,6 +281,14 @@ static int make_cma_ports(struct cma_dev_group *cma_dev_group,
+               goto free;
+       }
++#ifndef HAVE_CONFIGFS_DEFAULT_GROUPS_LIST
++      cma_dev_group->ports_group.default_groups = kcalloc((ports_num + 1),
++                                                          sizeof(struct config_group *),
++                                                          GFP_KERNEL);
++      if (!cma_dev_group->ports_group.default_groups)
++              goto free;
++#endif
++
+       for (i = 0; i < ports_num; i++) {
+               char port_str[10];
+@@ -226,10 +298,17 @@ static int make_cma_ports(struct cma_dev_group *cma_dev_group,
+               config_group_init_type_name(&ports[i].group,
+                                           port_str,
+                                           &cma_port_group_type);
++#ifdef HAVE_CONFIGFS_DEFAULT_GROUPS_LIST
+               configfs_add_default_group(&ports[i].group,
+                               &cma_dev_group->ports_group);
++#else
++              cma_dev_group->ports_group.default_groups[i] = &ports[i].group;
++#endif
+       }
++#ifndef HAVE_CONFIGFS_DEFAULT_GROUPS_LIST
++      cma_dev_group->ports_group.default_groups[i] = NULL;
++#endif
+       cma_dev_group->ports = ports;
+       return 0;
+@@ -266,7 +345,11 @@ static struct configfs_item_operations cma_ports_item_ops = {
+       .release = release_cma_ports_group
+ };
++#ifdef CONFIG_GROUP_INIT_TYPE_NAME_PARAM_3_IS_CONST
+ static const struct config_item_type cma_ports_group_type = {
++#else
++static struct config_item_type cma_ports_group_type = {
++#endif
+       .ct_item_ops    = &cma_ports_item_ops,
+       .ct_owner       = THIS_MODULE
+ };
+@@ -275,7 +358,11 @@ static struct configfs_item_operations cma_device_item_ops = {
+       .release = release_cma_dev
+ };
++#ifdef CONFIG_GROUP_INIT_TYPE_NAME_PARAM_3_IS_CONST
+ static const struct config_item_type cma_device_group_type = {
++#else
++static struct config_item_type cma_device_group_type = {
++#endif
+       .ct_item_ops    = &cma_device_item_ops,
+       .ct_owner       = THIS_MODULE
+ };
+@@ -305,16 +392,29 @@ static struct config_group *make_cma_dev(struct config_group *group,
+       err = make_cma_ports(cma_dev_group, cma_dev);
+       if (err)
++#ifdef HAVE_CONFIGFS_DEFAULT_GROUPS_LIST
+               goto fail;
++#else
++              goto fail_free;
++#endif
+       config_group_init_type_name(&cma_dev_group->device_group, name,
+                                   &cma_device_group_type);
++#ifdef HAVE_CONFIGFS_DEFAULT_GROUPS_LIST
+       configfs_add_default_group(&cma_dev_group->ports_group,
+                       &cma_dev_group->device_group);
++#else
++      cma_dev_group->device_group.default_groups[0] = &cma_dev_group->ports_group;
++      cma_dev_group->device_group.default_groups[1] = NULL;
++#endif
+       cma_deref_dev(cma_dev);
+       return &cma_dev_group->device_group;
++#ifndef HAVE_CONFIGFS_DEFAULT_GROUPS_LIST
++fail_free:
++      kfree(cma_dev_group->device_group.default_groups);
++#endif
+ fail:
+       if (cma_dev)
+               cma_deref_dev(cma_dev);
+@@ -326,7 +426,11 @@ static struct configfs_group_operations cma_subsys_group_ops = {
+       .make_group     = make_cma_dev,
+ };
++#ifdef CONFIG_GROUP_INIT_TYPE_NAME_PARAM_3_IS_CONST
+ static const struct config_item_type cma_subsys_type = {
++#else
++static struct config_item_type cma_subsys_type = {
++#endif
+       .ct_group_ops   = &cma_subsys_group_ops,
+       .ct_owner       = THIS_MODULE,
+ };
+diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
+index xxxxxxx..xxxxxxx 100644
+--- a/drivers/infiniband/core/core_priv.h
++++ b/drivers/infiniband/core/core_priv.h
+@@ -130,6 +130,7 @@ int ib_cache_setup_one(struct ib_device *device);
+ void ib_cache_cleanup_one(struct ib_device *device);
+ void ib_cache_release_one(struct ib_device *device);
++#ifdef HAVE_CGROUP_RDMA_H
+ #ifdef CONFIG_CGROUP_RDMA
+ void ib_device_register_rdmacg(struct ib_device *device);
+ void ib_device_unregister_rdmacg(struct ib_device *device);
+@@ -163,11 +164,39 @@ static inline void ib_rdmacg_uncharge(struct ib_rdmacg_object *cg_obj,
+ {
+ }
+ #endif
++#endif /* HAVE_CGROUP_RDMA_H */
+ static inline bool rdma_is_upper_dev_rcu(struct net_device *dev,
+                                        struct net_device *upper)
+ {
++#if defined(HAVE_NETDEV_HAS_UPPER_DEV_ALL_RCU)
+       return netdev_has_upper_dev_all_rcu(dev, upper);
++#elif defined(HAVE_NETDEV_FOR_EACH_ALL_UPPER_DEV_RCU)
++      struct net_device *_upper = NULL;
++      struct list_head *iter;
++
++      netdev_for_each_all_upper_dev_rcu(dev, _upper, iter)
++              if (_upper == upper)
++                      break;
++
++      return _upper == upper;
++#else
++      struct net_device *rdev_upper;
++      struct net_device *master;
++      bool ret;
++
++      if (!upper || !dev)
++              ret = false;
++
++      rdev_upper = rdma_vlan_dev_real_dev(upper);
++      master = netdev_master_upper_dev_get_rcu(dev);
++
++      ret = (upper == master) ||
++            (rdev_upper && (rdev_upper == master)) ||
++            (rdev_upper == dev);
++
++      return ret;
++#endif
+ }
+ int addr_init(void);
+@@ -182,6 +211,7 @@ void ib_sa_cleanup(void);
+ int rdma_nl_init(void);
+ void rdma_nl_exit(void);
++#ifdef HAVE_NETLINK_EXT_ACK
+ int ib_nl_handle_resolve_resp(struct sk_buff *skb,
+                             struct nlmsghdr *nlh,
+                             struct netlink_ext_ack *extack);
+@@ -191,6 +221,14 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb,
+ int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
+                            struct nlmsghdr *nlh,
+                            struct netlink_ext_ack *extack);
++#else
++int ib_nl_handle_resolve_resp(struct sk_buff *skb,
++                            struct netlink_callback *cb);
++int ib_nl_handle_set_timeout(struct sk_buff *skb,
++                            struct netlink_callback *cb);
++int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
++                            struct netlink_callback *cb);
++#endif
+ int ib_get_cached_subnet_prefix(struct ib_device *device,
+                               u8                port_num,
+diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
+index xxxxxxx..xxxxxxx 100644
+--- a/drivers/infiniband/core/cq.c
++++ b/drivers/infiniband/core/cq.c
+@@ -122,6 +122,7 @@ static void ib_cq_completion_direct(struct ib_cq *cq, void *private)
+       WARN_ONCE(1, "got unsolicited completion for CQ 0x%p\n", cq);
+ }
++#if defined(HAVE_IRQ_POLL_H) && IS_ENABLED(CONFIG_IRQ_POLL)
+ static int ib_poll_handler(struct irq_poll *iop, int budget)
+ {
+       struct ib_cq *cq = container_of(iop, struct ib_cq, iop);
+@@ -145,6 +146,30 @@ static void ib_cq_completion_softirq(struct ib_cq *cq, void *private)
+ {
+       irq_poll_sched(&cq->iop);
+ }
++#else
++static int ib_poll_handler(struct blk_iopoll *iop, int budget)
++{
++      struct ib_cq *cq = container_of(iop, struct ib_cq, iop);
++      int completed;
++
++      completed = __ib_process_cq(cq, budget);
++      if (completed < budget) {
++              blk_iopoll_complete(&cq->iop);
++              if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) {
++                      if (!blk_iopoll_sched_prep(&cq->iop))
++                              blk_iopoll_sched(&cq->iop);
++              }
++      }
++
++      return completed;
++}
++
++static void ib_cq_completion_softirq(struct ib_cq *cq, void *private)
++{
++      if (!blk_iopoll_sched_prep(&cq->iop))
++              blk_iopoll_sched(&cq->iop);
++}
++#endif
+ static void ib_cq_poll_work(struct work_struct *work)
+ {
+@@ -222,8 +247,12 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
+               break;
+       case IB_POLL_SOFTIRQ:
+               cq->comp_handler = ib_cq_completion_softirq;
+-
++#if defined(HAVE_IRQ_POLL_H) && IS_ENABLED(CONFIG_IRQ_POLL)
+               irq_poll_init(&cq->iop, IB_POLL_BUDGET_IRQ, ib_poll_handler);
++#else
++              blk_iopoll_init(&cq->iop, IB_POLL_BUDGET_IRQ, ib_poll_handler);
++              blk_iopoll_enable(&cq->iop);
++#endif
+               ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+               break;
+       case IB_POLL_WORKQUEUE:
+@@ -266,7 +295,11 @@ void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
+       case IB_POLL_DIRECT:
+               break;
+       case IB_POLL_SOFTIRQ:
++#if defined(HAVE_IRQ_POLL_H) && IS_ENABLED(CONFIG_IRQ_POLL)
+               irq_poll_disable(&cq->iop);
++#else
++              blk_iopoll_disable(&cq->iop);
++#endif
+               break;
+       case IB_POLL_WORKQUEUE:
+       case IB_POLL_UNBOUND_WORKQUEUE:
+diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
+index xxxxxxx..xxxxxxx 100644
+--- a/drivers/infiniband/core/device.c
++++ b/drivers/infiniband/core/device.c
+@@ -199,6 +199,7 @@ static DECLARE_HASHTABLE(ndev_hash, 5);
+ static void free_netdevs(struct ib_device *ib_dev);
+ static void ib_unregister_work(struct work_struct *work);
+ static void __ib_unregister_device(struct ib_device *device);
++#if defined(HAVE_REGISTER_LSM_NOTIFIER) || defined(HAVE_REGISTER_BLOCKING_LSM_NOTIFIER)
+ static int ib_security_change(struct notifier_block *nb, unsigned long event,
+                             void *lsm_data);
+ static void ib_policy_change_task(struct work_struct *work);
+@@ -267,6 +268,7 @@ define_ibdev_printk_level(ibdev_info, KERN_INFO);
+ static struct notifier_block ibdev_lsm_nb = {
+       .notifier_call = ib_security_change,
+ };
++#endif
+ static int rdma_dev_change_netns(struct ib_device *device, struct net *cur_net,
+                                struct net *net);
+@@ -467,17 +469,29 @@ static int alloc_name(struct ib_device *ibdev, const char *name)
+ {
+       struct ib_device *device;
+       unsigned long index;
++#ifdef HAVE_IDA_ALLOC
+       struct ida inuse;
+       int rc;
+       int i;
++#else
++      unsigned long *inuse;
++      int i;
++
++      inuse = (unsigned long *) get_zeroed_page(GFP_KERNEL);
++      if (!inuse)
++              return -ENOMEM;
++#endif
+       lockdep_assert_held_write(&devices_rwsem);
++#ifdef HAVE_IDA_ALLOC
+       ida_init(&inuse);
++#endif
+       xa_for_each (&devices, index, device) {
+               char buf[IB_DEVICE_NAME_MAX];
+               if (sscanf(dev_name(&device->dev), name, &i) != 1)
+                       continue;
++#ifdef HAVE_IDA_ALLOC
+               if (i < 0 || i >= INT_MAX)
+                       continue;
+               snprintf(buf, sizeof buf, name, i);
+@@ -497,6 +511,17 @@ static int alloc_name(struct ib_device *ibdev, const char *name)
+ out:
+       ida_destroy(&inuse);
+       return rc;
++#else
++      if (i < 0 || i >= PAGE_SIZE * 8)
++              continue;
++      snprintf(buf, sizeof buf, name, i);
++      if (!strcmp(buf, dev_name(&device->dev)))
++              set_bit(i, inuse);
++      }
++      i = find_first_zero_bit(inuse, PAGE_SIZE * 8);
++      free_page((unsigned long) inuse);
++      return dev_set_name(&ibdev->dev, name, i);
++#endif
+ }
+ static void ib_device_release(struct device *device)
+@@ -819,6 +844,7 @@ void ib_get_device_fw_str(struct ib_device *dev, char *str)
+ }
+ EXPORT_SYMBOL(ib_get_device_fw_str);
++#if defined(HAVE_REGISTER_LSM_NOTIFIER) || defined(HAVE_REGISTER_BLOCKING_LSM_NOTIFIER)
+ static void ib_policy_change_task(struct work_struct *work)
+ {
+       struct ib_device *dev;
+@@ -843,7 +869,9 @@ static void ib_policy_change_task(struct work_struct *work)
+       }
+       up_read(&devices_rwsem);
+ }
++#endif /* HAVE_REGISTER_LSM_NOTIFIER */
++#if defined(HAVE_REGISTER_LSM_NOTIFIER) || defined(HAVE_REGISTER_BLOCKING_LSM_NOTIFIER)
+ static int ib_security_change(struct notifier_block *nb, unsigned long event,
+                             void *lsm_data)
+ {
+@@ -855,6 +883,7 @@ static int ib_security_change(struct notifier_block *nb, unsigned long event,
+       return NOTIFY_OK;
+ }
++#endif /* HAVE_REGISTER_LSM_NOTIFIER */
+ static void compatdev_release(struct device *dev)
+ {
+@@ -1169,6 +1198,7 @@ out:
+ static void setup_dma_device(struct ib_device *device)
+ {
++#ifdef HAVE_DEVICE_DMA_OPS
+       struct device *parent = device->dev.parent;
+       WARN_ON_ONCE(device->dma_device);
+@@ -1200,6 +1230,15 @@ static void setup_dma_device(struct ib_device *device)
+               WARN_ON_ONCE(!parent);
+               device->dma_device = parent;
+       }
++#else /* HAVE_DEVICE_DMA_OPS */
++      WARN_ON_ONCE(!device->dev.parent && !device->dma_device);
++      WARN_ON_ONCE(device->dev.parent && device->dma_device
++                   && device->dev.parent != device->dma_device);
++      if (!device->dev.parent)
++              device->dev.parent = device->dma_device;
++      if (!device->dma_device)
++              device->dma_device = device->dev.parent;
++#endif /* HAVE_DEVICE_DMA_OPS */
+       /* Setup default max segment size for all IB devices */
+       dma_set_max_seg_size(device->dma_device, SZ_2G);
+@@ -1348,7 +1387,9 @@ int ib_register_device(struct ib_device *device, const char *name)
+               return ret;
+       }
++#ifdef HAVE_CGROUP_RDMA_H
+       ib_device_register_rdmacg(device);
++#endif
+       rdma_counter_init(device);
+@@ -1401,7 +1442,9 @@ dev_cleanup:
+       device_del(&device->dev);
+ cg_cleanup:
+       dev_set_uevent_suppress(&device->dev, false);
++#ifdef HAVE_CGROUP_RDMA_H
+       ib_device_unregister_rdmacg(device);
++#endif
+       ib_cache_cleanup_one(device);
+       return ret;
+ }
+@@ -1428,7 +1471,9 @@ static void __ib_unregister_device(struct ib_device *ib_dev)
+       ib_device_unregister_sysfs(ib_dev);
+       device_del(&ib_dev->dev);
++#ifdef HAVE_CGROUP_RDMA_H
+       ib_device_unregister_rdmacg(ib_dev);
++#endif
+       ib_cache_cleanup_one(ib_dev);
+       /*
+@@ -2617,15 +2662,27 @@ EXPORT_SYMBOL(ib_set_device_ops);
+ static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
+       [RDMA_NL_LS_OP_RESOLVE] = {
++#ifdef HAVE_NETLINK_EXT_ACK
+               .doit = ib_nl_handle_resolve_resp,
++#else
++              .dump = ib_nl_handle_resolve_resp,
++#endif
+               .flags = RDMA_NL_ADMIN_PERM,
+       },
+       [RDMA_NL_LS_OP_SET_TIMEOUT] = {
++#ifdef HAVE_NETLINK_EXT_ACK
+               .doit = ib_nl_handle_set_timeout,
++#else
++              .dump = ib_nl_handle_set_timeout,
++#endif
+               .flags = RDMA_NL_ADMIN_PERM,
+       },
+       [RDMA_NL_LS_OP_IP_RESOLVE] = {
++#ifdef HAVE_NETLINK_EXT_ACK
+               .doit = ib_nl_handle_ip_res_resp,
++#else
++              .dump = ib_nl_handle_ip_res_resp,
++#endif
+               .flags = RDMA_NL_ADMIN_PERM,
+       },
+ };
+@@ -2684,11 +2741,17 @@ static int __init ib_core_init(void)
+               goto err_mad;
+       }
++#if defined(HAVE_REGISTER_LSM_NOTIFIER) || defined(HAVE_REGISTER_BLOCKING_LSM_NOTIFIER)
++#ifdef HAVE_REGISTER_BLOCKING_LSM_NOTIFIER
+       ret = register_blocking_lsm_notifier(&ibdev_lsm_nb);
++#else
++       ret = register_lsm_notifier(&ibdev_lsm_nb);
++#endif
+       if (ret) {
+               pr_warn("Couldn't register LSM notifier. ret %d\n", ret);
+               goto err_sa;
+       }
++#endif
+       ret = register_pernet_device(&rdma_dev_net_ops);
+       if (ret) {
+@@ -2703,9 +2766,15 @@ static int __init ib_core_init(void)
+       return 0;
+ err_compat:
++#if defined(HAVE_REGISTER_LSM_NOTIFIER) || defined(HAVE_REGISTER_BLOCKING_LSM_NOTIFIER)
++#ifdef HAVE_REGISTER_BLOCKING_LSM_NOTIFIER
+       unregister_blocking_lsm_notifier(&ibdev_lsm_nb);
++#else
++      unregister_lsm_notifier(&ibdev_lsm_nb);
++#endif
+ err_sa:
+       ib_sa_cleanup();
++#endif
+ err_mad:
+       ib_mad_cleanup();
+ err_addr:
+@@ -2729,7 +2798,13 @@ static void __exit ib_core_cleanup(void)
+       nldev_exit();
+       rdma_nl_unregister(RDMA_NL_LS);
+       unregister_pernet_device(&rdma_dev_net_ops);
++#if defined(HAVE_REGISTER_LSM_NOTIFIER) || defined(HAVE_REGISTER_BLOCKING_LSM_NOTIFIER)
++#ifdef HAVE_REGISTER_BLOCKING_LSM_NOTIFIER
+       unregister_blocking_lsm_notifier(&ibdev_lsm_nb);
++#else
++      unregister_lsm_notifier(&ibdev_lsm_nb);
++#endif
++#endif
+       ib_sa_cleanup();
+       ib_mad_cleanup();
+       addr_cleanup();
+diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
+index xxxxxxx..xxxxxxx 100644
+--- a/drivers/infiniband/core/fmr_pool.c
++++ b/drivers/infiniband/core/fmr_pool.c
+@@ -96,8 +96,12 @@ struct ib_fmr_pool {
+                                                  void *              arg);
+       void                     *flush_arg;
++#ifdef HAVE_KTHREAD_QUEUE_WORK
+       struct kthread_worker     *worker;
+       struct kthread_work       work;
++#else
++      struct task_struct       *thread;
++#endif
+       atomic_t                  req_ser;
+       atomic_t                  flush_ser;
+@@ -175,6 +179,7 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
+       spin_unlock_irq(&pool->pool_lock);
+ }
++#ifdef HAVE_KTHREAD_QUEUE_WORK
+ static void ib_fmr_cleanup_func(struct kthread_work *work)
+ {
+       struct ib_fmr_pool *pool = container_of(work, struct ib_fmr_pool, work);
+@@ -189,6 +194,32 @@ static void ib_fmr_cleanup_func(struct kthread_work *work)
+       if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0)
+               kthread_queue_work(pool->worker, &pool->work);
+ }
++#else /* HAVE_KTHREAD_QUEUE_WORK */
++static int ib_fmr_cleanup_thread(void *pool_ptr)
++{
++      struct ib_fmr_pool *pool = pool_ptr;
++
++      do {
++              if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
++                      ib_fmr_batch_release(pool);
++
++                      atomic_inc(&pool->flush_ser);
++                      wake_up_interruptible(&pool->force_wait);
++
++                      if (pool->flush_function)
++                              pool->flush_function(pool, pool->flush_arg);
++              }
++
++              set_current_state(TASK_INTERRUPTIBLE);
++              if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
++                  !kthread_should_stop())
++                      schedule();
++              __set_current_state(TASK_RUNNING);
++      } while (!kthread_should_stop());
++
++      return 0;
++}
++#endif /* HAVE_KTHREAD_QUEUE_WORK */
+ /**
+  * ib_create_fmr_pool - Create an FMR pool
+@@ -257,6 +288,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd             *pd,
+       atomic_set(&pool->flush_ser, 0);
+       init_waitqueue_head(&pool->force_wait);
++#ifdef HAVE_KTHREAD_QUEUE_WORK
+       pool->worker =
+               kthread_create_worker(0, "ib_fmr(%s)", dev_name(&device->dev));
+       if (IS_ERR(pool->worker)) {
+@@ -265,6 +297,17 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd             *pd,
+               goto out_free_pool;
+       }
+       kthread_init_work(&pool->work, ib_fmr_cleanup_func);
++#else
++      pool->thread = kthread_run(ib_fmr_cleanup_thread,
++                                 pool,
++                                 "ib_fmr(%s)",
++                                 device->name);
++      if (IS_ERR(pool->thread)) {
++              pr_warn(PFX "couldn't start cleanup thread\n");
++              ret = PTR_ERR(pool->thread);
++              goto out_free_pool;
++      }
++#endif
+       {
+               struct ib_pool_fmr *fmr;
+@@ -329,7 +372,11 @@ void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
+       LIST_HEAD(fmr_list);
+       int                 i;
++#ifdef HAVE_KTHREAD_QUEUE_WORK
+       kthread_destroy_worker(pool->worker);
++#else
++      kthread_stop(pool->thread);
++#endif
+       ib_fmr_batch_release(pool);
+       i = 0;
+@@ -379,7 +426,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
+       spin_unlock_irq(&pool->pool_lock);
+       serial = atomic_inc_return(&pool->req_ser);
++#ifdef HAVE_KTHREAD_QUEUE_WORK
+       kthread_queue_work(pool->worker, &pool->work);
++#else
++      wake_up_process(pool->thread);
++#endif
+       if (wait_event_interruptible(pool->force_wait,
+                                    atomic_read(&pool->flush_ser) - serial >= 0))
+@@ -491,7 +542,11 @@ void ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
+                       list_add_tail(&fmr->list, &pool->dirty_list);
+                       if (++pool->dirty_len >= pool->dirty_watermark) {
+                               atomic_inc(&pool->req_ser);
++#ifdef HAVE_KTHREAD_QUEUE_WORK
+                               kthread_queue_work(pool->worker, &pool->work);
++#else
++                              wake_up_process(pool->thread);
++#endif
+                       }
+               }
+       }
+diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
+index xxxxxxx..xxxxxxx 100644
+--- a/drivers/infiniband/core/iwpm_util.c
++++ b/drivers/infiniband/core/iwpm_util.c
+@@ -506,13 +506,21 @@ int iwpm_parse_nlmsg(struct netlink_callback *cb, int policy_max,
+       int ret;
+       const char *err_str = "";
++#ifdef HAVE_NLMSG_VALIDATE_DEPRECATED
+       ret = nlmsg_validate_deprecated(cb->nlh, nlh_len, policy_max - 1,
+                                       nlmsg_policy, NULL);
++#else
++       ret = nlmsg_validate(cb->nlh, nlh_len, policy_max - 1, nlmsg_policy, NULL);
++#endif
+       if (ret) {
+               err_str = "Invalid attribute";
+               goto parse_nlmsg_error;
+       }
++#ifdef HAVE_NLMSG_PARSE_DEPRECATED
+       ret = nlmsg_parse_deprecated(cb->nlh, nlh_len, nltb, policy_max - 1,
++#else
++      ret = nlmsg_parse(cb->nlh, nlh_len, nltb, policy_max - 1,
++#endif
+                                    nlmsg_policy, NULL);
+       if (ret) {
+               err_str = "Unable to parse the nlmsg";
+diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
+index xxxxxxx..xxxxxxx 100644
+--- a/drivers/infiniband/core/mad.c
++++ b/drivers/infiniband/core/mad.c
+@@ -35,6 +35,9 @@
+  *
+  */
++#ifdef pr_fmt
++#undef pr_fmt
++#endif
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ #include <linux/dma-mapping.h>
+@@ -57,7 +60,11 @@
+ #ifdef CONFIG_TRACEPOINTS
+ static void create_mad_addr_info(struct ib_mad_send_wr_private *mad_send_wr,
+                         struct ib_mad_qp_info *qp_info,
++#ifdef HAVE_TRACE_EVENT_RAW_IB_MAD_SEND_TEMPLATE
+                         struct trace_event_raw_ib_mad_send_template *entry)
++#else
++                        struct ftrace_raw_ib_mad_send_template *entry)
++#endif
+ {
+       u16 pkey;
+       struct ib_device *dev = qp_info->port_priv->device;
+diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
+index xxxxxxx..xxxxxxx 100644
+--- a/drivers/infiniband/core/netlink.c
++++ b/drivers/infiniband/core/netlink.c
+@@ -31,6 +31,9 @@
+  * SOFTWARE.
+  */
++#ifdef pr_fmt
++#undef pr_fmt
++#endif
+ #define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__
+ #include <linux/export.h>
+@@ -153,8 +156,12 @@ int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh,
+ }
+ EXPORT_SYMBOL(ibnl_put_attr);
++#ifdef HAVE_NETLINK_EXT_ACK
+ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
+                          struct netlink_ext_ack *extack)
++#else
++static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
++#endif
+ {
+       int type = nlh->nlmsg_type;
+       unsigned int index = RDMA_NL_GET_CLIENT(type);
+@@ -176,7 +183,11 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
+        */
+       if (index == RDMA_NL_LS) {
+               if (cb_table[op].doit)
++#ifdef HAVE_NETLINK_EXT_ACK
+                       return cb_table[op].doit(skb, nlh, extack);
++#else
++                      return cb_table[op].doit(skb, nlh);
++#endif
+               return -EINVAL;
+       }
+       /* FIXME: Convert IWCM to properly handle doit callbacks */
+@@ -190,7 +201,11 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
+       }
+       if (cb_table[op].doit)
++#ifdef HAVE_NETLINK_EXT_ACK
+               return cb_table[op].doit(skb, nlh, extack);
++#else
++              return cb_table[op].doit(skb, nlh);
++#endif
+       return 0;
+ }
+@@ -202,10 +217,16 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
+  * for that consumer only.
+  */
+ static int rdma_nl_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
++#ifdef HAVE_NETLINK_EXT_ACK
+                                                  struct nlmsghdr *,
+                                                  struct netlink_ext_ack *))
++#else
++                                                 struct nlmsghdr *))
++#endif
+ {
++#ifdef HAVE_NETLINK_EXT_ACK
+       struct netlink_ext_ack extack = {};
++#endif
+       struct nlmsghdr *nlh;
+       int err;
+@@ -233,13 +254,21 @@ static int rdma_nl_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
+               if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
+                       goto ack;
++#ifdef HAVE_NETLINK_EXT_ACK
+               err = cb(skb, nlh, &extack);
++#else
++              err = cb(skb, nlh);
++#endif
+               if (err == -EINTR)
+                       goto skip;
+ ack:
+               if (nlh->nlmsg_flags & NLM_F_ACK || err)
++#ifdef HAVE_NETLINK_EXT_ACK
+                       netlink_ack(skb, nlh, err, &extack);
++#else
++                      netlink_ack(skb, nlh, err);
++#endif
+ skip:
+               msglen = NLMSG_ALIGN(nlh->nlmsg_len);
+diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
+index xxxxxxx..xxxxxxx 100644
+--- a/drivers/infiniband/core/nldev.c
++++ b/drivers/infiniband/core/nldev.c
+@@ -339,8 +339,12 @@ static int fill_res_info_entry(struct sk_buff *msg,
+ {
+       struct nlattr *entry_attr;
++#ifdef HAVE_NLA_NEST_START_NOFLAG
+       entry_attr = nla_nest_start_noflag(msg,
+                                          RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY);
++#else
++      entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY);
++#endif
+       if (!entry_attr)
+               return -EMSGSIZE;
+@@ -375,7 +379,11 @@ static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
+       if (fill_nldev_handle(msg, device))
+               return -EMSGSIZE;
++#ifdef HAVE_NLA_NEST_START_NOFLAG
+       table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_RES_SUMMARY);
++#else
++      table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY);
++#endif
+       if (!table_attr)
+               return -EMSGSIZE;
+@@ -795,8 +803,12 @@ static int fill_res_counter_entry(struct sk_buff *msg, bool has_cap_net_admin,
+       return 0;
+ }
++#ifdef HAVE_NETLINK_EXT_ACK
+ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+                         struct netlink_ext_ack *extack)
++#else
++static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
++#endif
+ {
+       struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+       struct ib_device *device;
+@@ -804,8 +816,16 @@ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+       u32 index;
+       int err;
++#ifdef HAVE_NLMSG_PARSE_DEPRECATED
+       err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
++#else
++      err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
++#endif/*HAVE_NLMSG_PARSE_DEPRECATED*/
++#ifdef HAVE_NETLINK_EXT_ACK
+                                    nldev_policy, extack);
++#else
++                                   nldev_policy, NULL);
++#endif
+       if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
+               return -EINVAL;
+@@ -821,7 +841,11 @@ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+               goto err;
+       }
++#ifdef HAVE_NETLINK_SKB_PARMS_PORTID
+       nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
++#else
++      nlh = nlmsg_put(msg, NETLINK_CB(skb).pid, nlh->nlmsg_seq,
++#endif
+                       RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
+                       0, 0);
+@@ -832,7 +856,11 @@ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+       nlmsg_end(msg, nlh);
+       ib_device_put(device);
++#ifdef HAVE_NETLINK_SKB_PARMS_PORTID
+       return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
++#else
++      return rdma_nl_unicast(msg, NETLINK_CB(skb).pid);
++#endif
+ err_free:
+       nlmsg_free(msg);
+@@ -841,16 +869,29 @@ err:
+       return err;
+ }
++#ifdef HAVE_NETLINK_EXT_ACK
+ static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+                         struct netlink_ext_ack *extack)
++#else
++static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
++#endif
+ {
+       struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+       struct ib_device *device;
+       u32 index;
+       int err;
++#ifdef HAVE_NETLINK_EXT_ACK
++#ifdef HAVE_NLMSG_PARSE_DEPRECATED
+       err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+                                    nldev_policy, extack);
++#else
++      err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
++                        nldev_policy, extack);
++#endif /*HAVE_NLMSG_PARSE_DEPRECATED*/
++#else
++      err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, NULL);
++#endif
+       if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
+               return -EINVAL;
+@@ -901,7 +942,11 @@ static int _nldev_get_dumpit(struct ib_device *device,
+       if (idx < start)
+               return 0;
++#ifdef HAVE_NETLINK_SKB_PARMS_PORTID
+       nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
++#else
++      nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
++#endif
+                       RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
+                       0, NLM_F_MULTI);
+@@ -927,8 +972,12 @@ static int nldev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+       return ib_enum_all_devs(_nldev_get_dumpit, skb, cb);
+ }
++#ifdef HAVE_NETLINK_EXT_ACK
+ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+                              struct netlink_ext_ack *extack)
++#else
++static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
++#endif
+ {
+       struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+       struct ib_device *device;
+@@ -937,8 +986,16 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+       u32 port;
+       int err;
++#ifdef HAVE_NLMSG_PARSE_DEPRECATED
+       err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
++#else
++      err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
++#endif /*HAVE_NLMSG_PARSE_DEPRECATED*/
++#ifdef HAVE_NETLINK_EXT_ACK
+                                    nldev_policy, extack);
++#else
++                                   nldev_policy, NULL);
++#endif
+       if (err ||
+           !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
+           !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
+@@ -961,7 +1018,11 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+               goto err;
+       }
++#ifdef HAVE_NETLINK_SKB_PARMS_PORTID
+       nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
++#else
++      nlh = nlmsg_put(msg, NETLINK_CB(skb).pid, nlh->nlmsg_seq,
++#endif
+                       RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
+                       0, 0);
+@@ -972,7 +1033,11 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+       nlmsg_end(msg, nlh);
+       ib_device_put(device);
++#ifdef HAVE_NETLINK_SKB_PARMS_PORTID
+       return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
++#else
++      return rdma_nl_unicast(msg, NETLINK_CB(skb).pid);
++#endif
+ err_free:
+       nlmsg_free(msg);
+@@ -993,7 +1058,11 @@ static int nldev_port_get_dumpit(struct sk_buff *skb,
+       int err;
+       unsigned int p;
++#ifdef HAVE_NLMSG_PARSE_DEPRECATED
+       err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
++#else
++      err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
++#endif
+                                    nldev_policy, NULL);
+       if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
+               return -EINVAL;
+@@ -1019,7 +1088,11 @@ static int nldev_port_get_dumpit(struct sk_buff *skb,
+                       continue;
+               }
++#ifdef HAVE_NETLINK_SKB_PARMS_PORTID
+               nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
++#else
++              nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).pid,
++#endif
+                               cb->nlh->nlmsg_seq,
+                               RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
+                                                RDMA_NLDEV_CMD_PORT_GET),
+@@ -1039,8 +1112,12 @@ out:
+       return skb->len;
+ }
++#ifdef HAVE_NETLINK_EXT_ACK
+ static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+                             struct netlink_ext_ack *extack)
++#else
++static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
++#endif
+ {
+       struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+       struct ib_device *device;
+@@ -1048,8 +1125,16 @@ static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+       u32 index;
+       int ret;
++#ifdef HAVE_NLMSG_PARSE_DEPRECATED
+       ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
++#else
++      ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
++#endif /*HAVE_NLMSG_PARSE_DEPRECATED*/
++#ifdef HAVE_NETLINK_EXT_ACK
+                                    nldev_policy, extack);
++#else
++                                   nldev_policy, NULL);
++#endif
+       if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
+               return -EINVAL;
+@@ -1064,7 +1149,11 @@ static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+               goto err;
+       }
++#ifdef HAVE_NETLINK_SKB_PARMS_PORTID
+       nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
++#else
++      nlh = nlmsg_put(msg, NETLINK_CB(skb).pid, nlh->nlmsg_seq,
++#endif
+                       RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
+                       0, 0);
+@@ -1074,7 +1163,11 @@ static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+       nlmsg_end(msg, nlh);
+       ib_device_put(device);
++#ifdef HAVE_NETLINK_SKB_PARMS_PORTID
+       return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
++#else
++      return rdma_nl_unicast(msg, NETLINK_CB(skb).pid);
++#endif
+ err_free:
+       nlmsg_free(msg);
+@@ -1094,7 +1187,11 @@ static int _nldev_res_get_dumpit(struct ib_device *device,
+       if (idx < start)
+               return 0;
++#ifdef HAVE_NETLINK_SKB_PARMS_PORTID
+       nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
++#else
++      nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
++#endif
+                       RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
+                       0, NLM_F_MULTI);
+@@ -1180,7 +1277,9 @@ static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
+ };
+ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
++#ifdef HAVE_NETLINK_EXT_ACK
+                              struct netlink_ext_ack *extack,
++#endif
+                              enum rdma_restrack_type res_type)
+ {
+       const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
+@@ -1192,8 +1291,16 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+       struct sk_buff *msg;
+       int ret;
++#ifdef HAVE_NLMSG_PARSE_DEPRECATED
+       ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
++#else
++      ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
++#endif
++#ifdef HAVE_NETLINK_EXT_ACK
+                                    nldev_policy, extack);
++#else
++                                   nldev_policy, NULL);
++#endif
+       if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !fe->id || !tb[fe->id])
+               return -EINVAL;
+@@ -1243,7 +1350,11 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+               goto err_free;
+       }
++#ifdef HAVE_NETLINK_CAPABLE
+       has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN);
++#else
++      has_cap_net_admin =  (sock_net(skb->sk) == &init_net);
++#endif
+       ret = fe->fill_res_func(msg, has_cap_net_admin, res, port);
+       rdma_restrack_put(res);
+       if (ret)
+@@ -1269,19 +1380,30 @@ static int res_get_common_dumpit(struct sk_buff *skb,
+       const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
+       struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+       struct rdma_restrack_entry *res;
++#ifdef HAVE_XARRAY
+       struct rdma_restrack_root *rt;
++#endif
+       int err, ret = 0, idx = 0;
+       struct nlattr *table_attr;
++#ifdef HAVE_XARRAY
+       struct nlattr *entry_attr;
++#endif
+       struct ib_device *device;
+       int start = cb->args[0];
+       bool has_cap_net_admin;
+       struct nlmsghdr *nlh;
++#ifdef HAVE_XARRAY
+       unsigned long id;
++#endif
+       u32 index, port = 0;
+       bool filled = false;
++      COMPAT_HL_NODE
++#ifdef HAVE_NLMSG_PARSE_DEPRECATED
+       err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
++#else
++      err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
++#endif
+                                    nldev_policy, NULL);
+       /*
+        * Right now, we are expecting the device index to get res information,
+@@ -1310,7 +1432,11 @@ static int res_get_common_dumpit(struct sk_buff *skb,
+               }
+       }
++#ifdef HAVE_NETLINK_SKB_PARMS_PORTID
+       nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
++#else
++      nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
++#endif
+                       RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, fe->nldev_cmd),
+                       0, NLM_F_MULTI);
+@@ -1319,14 +1445,23 @@ static int res_get_common_dumpit(struct sk_buff *skb,
+               goto err;
+       }
++#ifdef HAVE_NLA_NEST_START_NOFLAG
+       table_attr = nla_nest_start_noflag(skb, fe->nldev_attr);
++#else
++      table_attr = nla_nest_start(skb, fe->nldev_attr);
++#endif
+       if (!table_attr) {
+               ret = -EMSGSIZE;
+               goto err;
+       }
++#ifdef HAVE_NETLINK_CAPABLE
+       has_cap_net_admin = netlink_capable(cb->skb, CAP_NET_ADMIN);
++#else
++      has_cap_net_admin =  (sock_net(cb->skb->sk) == &init_net);
++#endif
++#ifdef HAVE_XARRAY
+       rt = &device->res[res_type];
+       xa_lock(&rt->xa);
+       /*
+@@ -1345,7 +1480,11 @@ static int res_get_common_dumpit(struct sk_buff *skb,
+               filled = true;
++#ifdef HAVE_NLA_NEST_START_NOFLAG
+               entry_attr = nla_nest_start_noflag(skb, fe->entry);
++#else
++              entry_attr = nla_nest_start(skb, fe->entry);
++#endif
+               if (!entry_attr) {
+                       ret = -EMSGSIZE;
+                       rdma_restrack_put(res);
+@@ -1386,7 +1525,79 @@ msg_full:
+ res_err:
+       nla_nest_cancel(skb, table_attr);
++#else /*HAVE_XARRAY */
++      down_read(&device->res->rwsem);
++#ifdef HAVE_HLIST_FOR_EACH_ENTRY_3_PARAMS
++      hash_for_each_possible(device->res->hash, res, node, res_type) {
++#else
++      hash_for_each_possible(device->res->hash, res, hlnode, node, res_type) {
++#endif
++              if (idx < start)
++                      goto next;
++
++              if ((rdma_is_kernel_res(res) &&
++                   task_active_pid_ns(current) != &init_pid_ns) ||
++                  (!rdma_is_kernel_res(res) && task_active_pid_ns(current) !=
++                   task_active_pid_ns(res->task)))
++                      /*
++                       * 1. Kern resources should be visible in init
++                       *    namspace only
++                       * 2. Present only resources visible in the current
++                       *    namespace
++                       */
++                      goto next;
++
++              if (!rdma_restrack_get(res))
++                      /*
++                       * Resource is under release now, but we are not
++                       * relesing lock now, so it will be released in
++                       * our next pass, once we will get ->next pointer.
++                       */
++                      goto next;
++
++              filled = true;
++              up_read(&device->res->rwsem);
++              ret = fe->fill_res_func(skb, cb, res, port);
++              down_read(&device->res->rwsem);
++              /*
++               * Return resource back, but it won't be released till
++               * the &device->res.rwsem will be released for write.
++               */
++              rdma_restrack_put(res);
++
++              if (ret == -EMSGSIZE)
++                      /*
++                       * There is a chance to optimize here.
++                       * It can be done by using list_prepare_entry
++                       * and list_for_each_entry_continue afterwards.
++                       */
++                      break;
++              if (ret)
++                      goto res_err;
++next:         idx++;
++      }
++      up_read(&device->res->rwsem);
++
++      nla_nest_end(skb, table_attr);
++      nlmsg_end(skb, nlh);
++      cb->args[0] = idx;
++
++      /*
++       * No more entries to fill, cancel the message and
++       * return 0 to mark end of dumpit.
++       */
++      if (!filled)
++              goto err;
++
++      ib_device_put(device);
++      return skb->len;
++
++res_err:
++      nla_nest_cancel(skb, table_attr);
++      up_read(&device->res->rwsem);
++
++#endif  /*HAVE_XARRAY */
+ err:
+       nlmsg_cancel(skb, nlh);
+@@ -1395,6 +1606,7 @@ err_index:
+       return ret;
+ }
++#ifdef HAVE_NETLINK_EXT_ACK
+ #define RES_GET_FUNCS(name, type)                                              \
+       static int nldev_res_get_##name##_dumpit(struct sk_buff *skb,          \
+                                                struct netlink_callback *cb)  \
+@@ -1407,6 +1619,19 @@ err_index:
+       {                                                                      \
+               return res_get_common_doit(skb, nlh, extack, type);            \
+       }
++#else
++#define RES_GET_FUNCS(name, type)                                              \
++      static int nldev_res_get_##name##_dumpit(struct sk_buff *skb,          \
++                                               struct netlink_callback *cb)  \
++      {                                                                      \
++              return res_get_common_dumpit(skb, cb, type);                   \
++      }                                                                      \
++      static int nldev_res_get_##name##_doit(struct sk_buff *skb,            \
++                                             struct nlmsghdr *nlh)           \
++      {                                                                      \
++              return res_get_common_doit(skb, nlh, type);            \
++      }
++#endif
+ RES_GET_FUNCS(qp, RDMA_RESTRACK_QP);
+ RES_GET_FUNCS(cm_id, RDMA_RESTRACK_CM_ID);
+@@ -1450,8 +1675,12 @@ void rdma_link_unregister(struct rdma_link_ops *ops)
+ }
+ EXPORT_SYMBOL(rdma_link_unregister);
++#ifdef HAVE_NETLINK_EXT_ACK
+ static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
+                         struct netlink_ext_ack *extack)
++#else
++static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh)
++#endif
+ {
+       struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+       char ibdev_name[IB_DEVICE_NAME_MAX];
+@@ -1461,8 +1690,16 @@ static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
+       char type[IFNAMSIZ];
+       int err;
++#ifdef HAVE_NLMSG_PARSE_DEPRECATED
+       err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
++#else
++      err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
++#endif
++#ifdef HAVE_NETLINK_EXT_ACK
+                                    nldev_policy, extack);
++#else
++                                   nldev_policy, NULL);
++#endif
+       if (err || !tb[RDMA_NLDEV_ATTR_DEV_NAME] ||
+           !tb[RDMA_NLDEV_ATTR_LINK_TYPE] || !tb[RDMA_NLDEV_ATTR_NDEV_NAME])
+               return -EINVAL;
+@@ -1497,16 +1734,28 @@ static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
+       return err;
+ }
++#ifdef HAVE_NETLINK_EXT_ACK
+ static int nldev_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
+                         struct netlink_ext_ack *extack)
++#else
++static int nldev_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
++#endif
+ {
+       struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+       struct ib_device *device;
+       u32 index;
+       int err;
++#ifdef HAVE_NLMSG_PARSE_DEPRECATED
+       err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
++#else
++      err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
++#endif
++#ifdef HAVE_NETLINK_EXT_ACK
+                                    nldev_policy, extack);
++#else
++                                   nldev_policy, NULL);
++#endif
+       if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
+               return -EINVAL;
+@@ -1524,8 +1773,12 @@ static int nldev_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
+       return 0;
+ }
++#ifdef HAVE_NETLINK_EXT_ACK
+ static int nldev_get_chardev(struct sk_buff *skb, struct nlmsghdr *nlh,
+                            struct netlink_ext_ack *extack)
++#else
++static int nldev_get_chardev(struct sk_buff *skb, struct nlmsghdr *nlh)
++#endif
+ {
+       struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+       char client_name[RDMA_NLDEV_ATTR_CHARDEV_TYPE_SIZE];
+@@ -1535,8 +1788,13 @@ static int nldev_get_chardev(struct sk_buff *skb, struct nlmsghdr *nlh,
+       u32 index;
+       int err;
++#ifdef HAVE_NETLINK_EXT_ACK
+       err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy,
+                         extack);
++#else
++      err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy,
++                        NULL);
++#endif
+       if (err || !tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE])
+               return -EINVAL;
+@@ -1608,15 +1866,23 @@ out_put:
+       return err;
+ }
++#ifdef HAVE_NETLINK_EXT_ACK
+ static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+                             struct netlink_ext_ack *extack)
++#else
++static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
++#endif
+ {
+       struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+       struct sk_buff *msg;
+       int err;
+       err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
++#ifdef HAVE_NETLINK_EXT_ACK
+                         nldev_policy, extack);
++#else
++                        nldev_policy, NULL);
++#endif
+       if (err)
+               return err;
+@@ -1639,15 +1905,23 @@ static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+       return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+ }
++#ifdef HAVE_NETLINK_EXT_ACK
+ static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+                                 struct netlink_ext_ack *extack)
++#else
++static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
++#endif
+ {
+       struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+       u8 enable;
+       int err;
+       err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
++#ifdef HAVE_NETLINK_EXT_ACK
+                         nldev_policy, extack);
++#else
++                        nldev_policy, NULL);
++#endif
+       if (err || !tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE])
+               return -EINVAL;
+@@ -1660,8 +1934,12 @@ static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+       return err;
+ }
++#ifdef HAVE_NETLINK_EXT_ACK
+ static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+                              struct netlink_ext_ack *extack)
++#else
++static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
++#endif
+ {
+       u32 index, port, mode, mask = 0, qpn, cntn = 0;
+       struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+@@ -1670,7 +1948,11 @@ static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+       int ret;
+       ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
++#ifdef HAVE_NETLINK_EXT_ACK
+                         nldev_policy, extack);
++#else
++                        nldev_policy, NULL);
++#endif
+       /* Currently only counter for QP is supported */
+       if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES] ||
+           !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
+@@ -1745,8 +2027,12 @@ err:
+       return ret;
+ }
++#ifdef HAVE_NETLINK_EXT_ACK
+ static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+                              struct netlink_ext_ack *extack)
++#else
++static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
++#endif
+ {
+       struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+       struct ib_device *device;
+@@ -1755,7 +2041,11 @@ static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+       int ret;
+       ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
++#ifdef HAVE_NETLINK_EXT_ACK
+                         nldev_policy, extack);
++#else
++                        nldev_policy, NULL);
++#endif
+       if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES] ||
+           !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX] ||
+           !tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID] ||
+@@ -1815,7 +2105,9 @@ err:
+ static int stat_get_doit_default_counter(struct sk_buff *skb,
+                                        struct nlmsghdr *nlh,
++#ifdef HAVE_NETLINK_EXT_ACK
+                                        struct netlink_ext_ack *extack,
++#endif
+                                        struct nlattr *tb[])
+ {
+       struct rdma_hw_stats *stats;
+@@ -1907,7 +2199,11 @@ err:
+ }
+ static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh,
++#ifdef HAVE_NETLINK_EXT_ACK
+                           struct netlink_ext_ack *extack, struct nlattr *tb[])
++#else
++                          struct nlattr *tb[])
++#endif
+ {
+       static enum rdma_nl_counter_mode mode;
+@@ -1918,7 +2214,11 @@ static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh,
+       int ret;
+       if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID])
++#ifdef HAVE_NETLINK_EXT_ACK
+               return nldev_res_get_counter_doit(skb, nlh, extack);
++#else
++              return nldev_res_get_counter_doit(skb, nlh);
++#endif
+       if (!tb[RDMA_NLDEV_ATTR_STAT_MODE] ||
+           !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
+@@ -1970,23 +2270,39 @@ err:
+       return ret;
+ }
++#ifdef HAVE_NETLINK_EXT_ACK
+ static int nldev_stat_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+                              struct netlink_ext_ack *extack)
++#else
++static int nldev_stat_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
++#endif
+ {
+       struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+       int ret;
+       ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
++#ifdef HAVE_NETLINK_EXT_ACK
+                         nldev_policy, extack);
++#else
++                        nldev_policy, NULL);
++#endif
+       if (ret)
+               return -EINVAL;
+       if (!tb[RDMA_NLDEV_ATTR_STAT_RES])
++#ifdef HAVE_NETLINK_EXT_ACK
+               return stat_get_doit_default_counter(skb, nlh, extack, tb);
++#else
++              return stat_get_doit_default_counter(skb, nlh, tb);
++#endif
+       switch (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES])) {
+       case RDMA_NLDEV_ATTR_RES_QP:
++#ifdef HAVE_NETLINK_EXT_ACK
+               ret = stat_get_doit_qp(skb, nlh, extack, tb);
++#else
++              ret = stat_get_doit_qp(skb, nlh, tb);
++#endif
+               break;
+       default:
+diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
+index xxxxxxx..xxxxxxx 100644
+--- a/drivers/infiniband/core/rdma_core.c
++++ b/drivers/infiniband/core/rdma_core.c
+@@ -79,7 +79,13 @@ static int uverbs_try_lock_object(struct ib_uobject *uobj,
+        */
+       switch (mode) {
+       case UVERBS_LOOKUP_READ:
++#ifdef HAVE_ATOMIC_FETCH_ADD_UNLESS
+               return atomic_fetch_add_unless(&uobj->usecnt, 1, -1) == -1 ?
++#elif defined(HAVE___ATOMIC_ADD_UNLESS)
++              return __atomic_add_unless(&uobj->usecnt, 1, -1) == -1 ?
++#else
++              return atomic_add_unless(&uobj->usecnt, 1, -1) == -1 ?
++#endif
+                       -EBUSY : 0;
+       case UVERBS_LOOKUP_WRITE:
+               /* lock is exclusive */
+@@ -297,13 +303,50 @@ static struct ib_uobject *alloc_uobj(struct ib_uverbs_file *ufile,
+ static int idr_add_uobj(struct ib_uobject *uobj)
+ {
++      int ret;
+        /*
+         * We start with allocating an idr pointing to NULL. This represents an
+         * object which isn't initialized yet. We'll replace it later on with
+         * the real object once we commit.
+         */
++#ifdef HAVE_XARRAY
+       return xa_alloc(&uobj->ufile->idr, &uobj->id, NULL, xa_limit_32b,
+                       GFP_KERNEL);
++#elif defined(HAVE_IDR_ALLOC)
++      idr_preload(GFP_KERNEL);
++      spin_lock(&uobj->ufile->idr_lock);
++
++      /*
++       *          * We start with allocating an idr pointing to NULL. This
++       *          represents an
++       *                   * object which isn't initialized yet. We'll replace
++       *                   it later on with
++       *                            * the real object once we commit.
++       *                                     */
++      ret = idr_alloc(&uobj->ufile->idr, NULL, 0,
++                      min_t(unsigned long, U32_MAX - 1, INT_MAX), GFP_NOWAIT);
++      if (ret >= 0)
++              uobj->id = ret;
++
++      spin_unlock(&uobj->ufile->idr_lock);
++      idr_preload_end();
++
++      return ret < 0 ? ret : 0;
++#else
++retry:
++      if (!idr_pre_get(&uobj->ufile->idr, GFP_KERNEL))
++              return -ENOMEM;
++
++      spin_lock(&uobj->ufile->idr_lock);
++      ret = idr_get_new(&uobj->ufile->idr, NULL, &uobj->id);
++      spin_unlock(&uobj->ufile->idr_lock);
++
++      if (ret == -EAGAIN)
++              goto retry;
++
++      return ret;
++
++#endif
+ }
+ /* Returns the ib_uobject or an error. The caller should check for IS_ERR. */
+@@ -313,11 +356,13 @@ lookup_get_idr_uobject(const struct uverbs_api_object *obj,
+                      enum rdma_lookup_mode mode)
+ {
+       struct ib_uobject *uobj;
++      unsigned long idrno = id;
+       if (id < 0 || id > ULONG_MAX)
+               return ERR_PTR(-EINVAL);
+       rcu_read_lock();
++#ifdef HAVE_XARRAY
+       /*
+        * The idr_find is guaranteed to return a pointer to something that
+        * isn't freed yet, or NULL, as the free after idr_remove goes through
+@@ -329,6 +374,27 @@ lookup_get_idr_uobject(const struct uverbs_api_object *obj,
+               uobj = ERR_PTR(-ENOENT);
+       rcu_read_unlock();
+       return uobj;
++#else
++      /* object won't be released as we're protected in rcu */
++      uobj = idr_find(&ufile->idr, idrno);
++      if (!uobj) {
++              uobj = ERR_PTR(-ENOENT);
++              goto free;
++      }
++
++      /*
++       * The idr_find is guaranteed to return a pointer to something that
++       * isn't freed yet, or NULL, as the free after idr_remove goes through
++       * kfree_rcu(). However the object may still have been released and
++       * kfree() could be called at any time.
++       */
++      if (!kref_get_unless_zero(&uobj->ref))
++              uobj = ERR_PTR(-ENOENT);
++
++free:
++      rcu_read_unlock();
++      return uobj;
++#endif        
+ }
+ static struct ib_uobject *
+@@ -437,15 +503,25 @@ alloc_begin_idr_uobject(const struct uverbs_api_object *obj,
+       if (ret)
+               goto uobj_put;
++#ifdef HAVE_CGROUP_RDMA_H
+       ret = ib_rdmacg_try_charge(&uobj->cg_obj, uobj->context->device,
+                                  RDMACG_RESOURCE_HCA_OBJECT);
+       if (ret)
+               goto remove;
++#endif
+       return uobj;
++#ifdef HAVE_CGROUP_RDMA_H
+ remove:
++#ifdef HAVE_XARRAY
+       xa_erase(&ufile->idr, uobj->id);
++#else
++      spin_lock(&ufile->idr_lock);
++      idr_remove(&ufile->idr, uobj->id);
++      spin_unlock(&ufile->idr_lock);
++#endif
++#endif
+ uobj_put:
+       uverbs_uobject_put(uobj);
+       return ERR_PTR(ret);
+@@ -503,10 +579,17 @@ struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj,
+ static void alloc_abort_idr_uobject(struct ib_uobject *uobj)
+ {
++#ifdef HAVE_CGROUP_RDMA_H
+       ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device,
+                          RDMACG_RESOURCE_HCA_OBJECT);
+-
++#endif
++#ifdef HAVE_XARRAY
+       xa_erase(&uobj->ufile->idr, uobj->id);
++#else
++      spin_lock(&uobj->ufile->idr_lock);
++      idr_remove(&uobj->ufile->idr, uobj->id);
++      spin_unlock(&uobj->ufile->idr_lock);
++#endif
+ }
+ static int __must_check destroy_hw_idr_uobject(struct ib_uobject *uobj,
+@@ -529,15 +612,22 @@ static int __must_check destroy_hw_idr_uobject(struct ib_uobject *uobj,
+       if (why == RDMA_REMOVE_ABORT)
+               return 0;
++#ifdef HAVE_CGROUP_RDMA_H
+       ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device,
+                          RDMACG_RESOURCE_HCA_OBJECT);
+-
++#endif
+       return 0;
+ }
+ static void remove_handle_idr_uobject(struct ib_uobject *uobj)
+ {
++#ifdef HAVE_XARRAY
+       xa_erase(&uobj->ufile->idr, uobj->id);
++#else
++      spin_lock(&uobj->ufile->idr_lock);
++      idr_remove(&uobj->ufile->idr, uobj->id);
++      spin_unlock(&uobj->ufile->idr_lock);
++#endif
+       /* Matches the kref in alloc_commit_idr_uobject */
+       uverbs_uobject_put(uobj);
+ }
+@@ -568,6 +658,7 @@ static void remove_handle_fd_uobject(struct ib_uobject *uobj)
+ static int alloc_commit_idr_uobject(struct ib_uobject *uobj)
+ {
+       struct ib_uverbs_file *ufile = uobj->ufile;
++#ifdef HAVE_XARRAY
+       void *old;
+       /*
+@@ -579,7 +670,18 @@ static int alloc_commit_idr_uobject(struct ib_uobject *uobj)
+        */
+       old = xa_store(&ufile->idr, uobj->id, uobj, GFP_KERNEL);
+       WARN_ON(old != NULL);
+-
++#else
++      spin_lock(&ufile->idr_lock);
++      /*
++       * We already allocated this IDR with a NULL object, so
++       * this shouldn't fail.
++       *
++       * NOTE: Once we set the IDR we loose ownership of our kref on uobj.
++       * It will be put by remove_commit_idr_uobject()
++       */
++      WARN_ON(idr_replace(&ufile->idr, uobj, uobj->id));
++      spin_unlock(&ufile->idr_lock);
++#endif
+       return 0;
+ }
+@@ -712,13 +814,22 @@ void rdma_lookup_put_uobject(struct ib_uobject *uobj,
+ void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile)
+ {
++#ifdef HAVE_XARRAY
+       xa_init_flags(&ufile->idr, XA_FLAGS_ALLOC);
++#else
++      spin_lock_init(&ufile->idr_lock);
++      idr_init(&ufile->idr);
++#endif
+ }
+ void release_ufile_idr_uobject(struct ib_uverbs_file *ufile)
+ {
+       struct ib_uobject *entry;
+-      unsigned long id;
++#ifdef HAVE_XARRAY
++      long unsigned int id;
++#else
++      int id;
++#endif
+       /*
+        * At this point uverbs_cleanup_ufile() is guaranteed to have run, and
+@@ -728,12 +839,22 @@ void release_ufile_idr_uobject(struct ib_uverbs_file *ufile)
+        *
+        * This is an optimized equivalent to remove_handle_idr_uobject
+        */
++
++#ifdef HAVE_XARRAY
+       xa_for_each(&ufile->idr, id, entry) {
+               WARN_ON(entry->object);
+               uverbs_uobject_put(entry);
+       }
+       xa_destroy(&ufile->idr);
++#else
++      compat_idr_for_each_entry(&ufile->idr, entry, id) {
++              WARN_ON(entry->object);
++              uverbs_uobject_put(entry);
++      }
++
++      idr_destroy(&ufile->idr);
++#endif
+ }
+ const struct uverbs_obj_type_class uverbs_idr_class = {
+@@ -811,8 +932,10 @@ static void ufile_destroy_ucontext(struct ib_uverbs_file *ufile,
+                       ib_dev->ops.disassociate_ucontext(ucontext);
+       }
++#ifdef HAVE_CGROUP_RDMA_H
+       ib_rdmacg_uncharge(&ucontext->cg_obj, ib_dev,
+                          RDMACG_RESOURCE_HCA_HANDLE);
++#endif
+       rdma_restrack_del(&ucontext->res);
+diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
+index xxxxxxx..xxxxxxx 100644
+--- a/drivers/infiniband/core/restrack.c
++++ b/drivers/infiniband/core/restrack.c
+@@ -14,6 +14,10 @@
+ #include "cma_priv.h"
+ #include "restrack.h"
++#ifndef CUT_HERE
++#define CUT_HERE               "------------[ cut here ]------------\n"
++#endif
++
+ /**
+  * rdma_restrack_init() - initialize and allocate resource tracking
+  * @dev:  IB device
+diff --git a/drivers/infiniband/core/restrack.h b/drivers/infiniband/core/restrack.h
+index xxxxxxx..xxxxxxx 100644
+--- a/drivers/infiniband/core/restrack.h
++++ b/drivers/infiniband/core/restrack.h
+@@ -8,11 +8,26 @@
+ #include <linux/mutex.h>
++#ifndef HAVE_XARRAY
++#define RDMA_RESTRACK_HASH_BITS 8
++#endif
++
+ /**
+  * struct rdma_restrack_root - main resource tracking management
+  * entity, per-device
+  */
+ struct rdma_restrack_root {
++#ifndef HAVE_XARRAY 
++      /*
++       *          * @rwsem: Read/write lock to protect lists
++       *                   */
++      struct rw_semaphore     rwsem;
++      /**
++       *          * @hash: global database for all resources
++       *          per-device
++       *                   */
++      DECLARE_HASHTABLE(hash, RDMA_RESTRACK_HASH_BITS);
++#endif
+       /**
+        * @xa: Array of XArray structure to hold restrack entries.
+        */
+diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
+index xxxxxxx..xxxxxxx 100644
+--- a/drivers/infiniband/core/roce_gid_mgmt.c
++++ b/drivers/infiniband/core/roce_gid_mgmt.c
+@@ -130,12 +130,17 @@ static enum bonding_slave_state is_eth_active_slave_of_bonding_rcu(struct net_de
+                                                                  struct net_device *upper)
+ {
+       if (upper && netif_is_bond_master(upper)) {
++#ifdef HAVE_BONDING_H
+               struct net_device *pdev =
+                       bond_option_active_slave_get_rcu(netdev_priv(upper));
+               if (pdev)
+                       return dev == pdev ? BONDING_SLAVE_STATE_ACTIVE :
+                               BONDING_SLAVE_STATE_INACTIVE;
++#else
++      return memcmp(upper->dev_addr, dev->dev_addr, ETH_ALEN) ?
++              BONDING_SLAVE_STATE_INACTIVE : BONDING_SLAVE_STATE_ACTIVE;
++#endif
+       }
+       return BONDING_SLAVE_STATE_NA;
+@@ -481,7 +486,9 @@ static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev,
+        * our feet
+        */
+       rtnl_lock();
++#ifdef HAVE_NET_RWSEM
+       down_read(&net_rwsem);
++#endif
+       for_each_net(net)
+               for_each_netdev(net, ndev) {
+                       /*
+@@ -497,7 +504,9 @@ static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev,
+                                                        rdma_ndev, ndev))
+                               _add_netdev_ips(ib_dev, port, ndev);
+               }
++#ifdef HAVE_NET_RWSEM
+       up_read(&net_rwsem);
++#endif
+       rtnl_unlock();
+ }
+@@ -526,6 +535,7 @@ static void callback_for_addr_gid_device_scan(struct ib_device *device,
+                         &parsed->gid_attr);
+ }
++#ifdef HAVE_NETDEV_WALK_ALL_UPPER_DEV_RCU
+ struct upper_list {
+       struct list_head list;
+       struct net_device *upper;
+@@ -545,7 +555,9 @@ static int netdev_upper_walk(struct net_device *upper, void *data)
+       return 0;
+ }
++#endif /* HAVE_NETDEV_WALK_ALL_UPPER_DEV_RCU */
++#ifdef HAVE_NETDEV_NOTIFIER_CHANGEUPPER_INFO
+ static void handle_netdev_upper(struct ib_device *ib_dev, u8 port,
+                               void *cookie,
+                               void (*handle_netdev)(struct ib_device *ib_dev,
+@@ -553,12 +565,38 @@ static void handle_netdev_upper(struct ib_device *ib_dev, u8 port,
+                                                     struct net_device *ndev))
+ {
+       struct net_device *ndev = cookie;
++#ifndef HAVE_NETDEV_WALK_ALL_UPPER_DEV_RCU
++      struct upper_list {
++              struct list_head list;
++              struct net_device *upper;
++      };
++      struct net_device *upper;
++#endif /* HAVE_NETDEV_WALK_ALL_UPPER_DEV_RCU */
+       struct upper_list *upper_iter;
+       struct upper_list *upper_temp;
+       LIST_HEAD(upper_list);
+       rcu_read_lock();
++#ifndef HAVE_NETDEV_WALK_ALL_UPPER_DEV_RCU
++      for_each_netdev(dev_net(ndev), upper) {
++              struct upper_list *entry;
++
++              if (!rdma_is_upper_dev_rcu(ndev, upper))
++                      continue;
++
++              entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
++              if (!entry) {
++                      pr_info("roce_gid_mgmt: couldn't allocate entry to delete ndev\n");
++                      continue;
++              }
++
++              list_add_tail(&entry->list, &upper_list);
++              dev_hold(upper);
++              entry->upper = upper;
++      }
++#else /* HAVE_NETDEV_WALK_ALL_UPPER_DEV_RCU */
+       netdev_walk_all_upper_dev_rcu(ndev, netdev_upper_walk, &upper_list);
++#endif /* HAVE_NETDEV_WALK_ALL_UPPER_DEV_RCU */
+       rcu_read_unlock();
+       handle_netdev(ib_dev, port, ndev);
+@@ -588,6 +626,7 @@ static void add_netdev_upper_ips(struct ib_device *ib_dev, u8 port,
+ {
+       handle_netdev_upper(ib_dev, port, cookie, _add_netdev_ips);
+ }
++#endif
+ static void del_netdev_default_ips_join(struct ib_device *ib_dev, u8 port,
+                                       struct net_device *rdma_ndev,
+@@ -662,6 +701,7 @@ static const struct netdev_event_work_cmd add_cmd = {
+       .filter = is_eth_port_of_netdev_filter
+ };
++#ifdef HAVE_NETDEV_NOTIFIER_CHANGEUPPER_INFO
+ static const struct netdev_event_work_cmd add_cmd_upper_ips = {
+       .cb     = add_netdev_upper_ips,
+       .filter = is_eth_port_of_netdev_filter
+@@ -681,6 +721,7 @@ ndev_event_unlink(struct netdev_notifier_changeupper_info *changeupper_info,
+       cmds[0].ndev = changeupper_info->upper_dev;
+       cmds[1] = add_cmd;
+ }
++#endif
+ static const struct netdev_event_work_cmd bonding_default_add_cmd = {
+       .cb     = add_default_gids,
+@@ -734,6 +775,10 @@ static const struct netdev_event_work_cmd add_default_gid_cmd = {
+ static int netdevice_event(struct notifier_block *this, unsigned long event,
+                          void *ptr)
+ {
++#ifndef HAVE_NETDEV_NOTIFIER_CHANGEUPPER_INFO
++      static const struct netdev_event_work_cmd add_cmd = {
++              .cb = add_netdev_ips, .filter = is_eth_port_of_netdev};
++#endif
+       static const struct netdev_event_work_cmd del_cmd = {
+               .cb = del_netdev_ips, .filter = pass_all_filter};
+       static const struct netdev_event_work_cmd
+@@ -747,7 +792,11 @@ static int netdevice_event(struct notifier_block *this, unsigned long event,
+                               .filter = is_eth_port_of_netdev_filter
+                       };
+       static const struct netdev_event_work_cmd bonding_event_ips_del_cmd = {
++#ifdef HAVE_NETDEV_NOTIFIER_CHANGEUPPER_INFO
+               .cb = del_netdev_upper_ips, .filter = upper_device_filter};
++#else
++              .cb = del_netdev_ips, .filter = upper_device_filter};
++#endif
+       struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+       struct netdev_event_work_cmd cmds[ROCE_NETDEV_CALLBACK_SZ] = { {NULL} };
+@@ -757,6 +806,9 @@ static int netdevice_event(struct notifier_block *this, unsigned long event,
+       switch (event) {
+       case NETDEV_REGISTER:
+       case NETDEV_UP:
++#ifndef HAVE_NETDEV_NOTIFIER_CHANGEUPPER_INFO
++      case NETDEV_JOIN:
++#endif
+               cmds[0] = bonding_default_del_cmd_join;
+               cmds[1] = add_default_gid_cmd;
+               cmds[2] = add_cmd;
+@@ -777,18 +829,24 @@ static int netdevice_event(struct notifier_block *this, unsigned long event,
+               }
+               break;
++#ifdef HAVE_NETDEV_NOTIFIER_CHANGEUPPER_INFO
+       case NETDEV_CHANGEUPPER:
+               netdevice_event_changeupper(ndev,
+                       container_of(ptr, struct netdev_notifier_changeupper_info, info),
+                       cmds);
+               break;
++#endif
+       case NETDEV_BONDING_FAILOVER:
+               cmds[0] = bonding_event_ips_del_cmd;
+               /* Add default GIDs of the bond device */
+               cmds[1] = bonding_default_add_cmd;
+               /* Add IP based GIDs of the bond device */
++#ifdef HAVE_NETDEV_NOTIFIER_CHANGEUPPER_INFO
+               cmds[2] = add_cmd_upper_ips;
++#else
++              cmds[2] = add_cmd;
++#endif
+               break;
+       default:
+diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
+index xxxxxxx..xxxxxxx 100644
+--- a/drivers/infiniband/core/rw.c
++++ b/drivers/infiniband/core/rw.c
+@@ -4,7 +4,9 @@
+  */
+ #include <linux/moduleparam.h>
+ #include <linux/slab.h>
++#ifdef HAVE_PCI_P2PDMA_H
+ #include <linux/pci-p2pdma.h>
++#endif
+ #include <rdma/mr_pool.h>
+ #include <rdma/rw.h>
+@@ -290,9 +292,11 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
+       struct ib_device *dev = qp->pd->device;
+       int ret;
++#ifdef HAVE_PCI_P2PDMA_H
+       if (is_pci_p2pdma_page(sg_page(sg)))
+               ret = pci_p2pdma_map_sg(dev->dma_device, sg, sg_cnt, dir);
+       else
++#endif
+               ret = ib_dma_map_sg(dev, sg, sg_cnt, dir);
+       if (!ret)
+@@ -584,7 +588,9 @@ void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
+       }
+       /* P2PDMA contexts do not need to be unmapped */
++#ifdef HAVE_PCI_P2PDMA_H
+       if (!is_pci_p2pdma_page(sg_page(sg)))
++#endif
+               ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
+ }
+ EXPORT_SYMBOL(rdma_rw_ctx_destroy);
+diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
+index xxxxxxx..xxxxxxx 100644
+--- a/drivers/infiniband/core/sa_query.c
++++ b/drivers/infiniband/core/sa_query.c
+@@ -186,6 +186,12 @@ static struct ib_client sa_client = {
+ static DEFINE_XARRAY_FLAGS(queries, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
+ static DEFINE_SPINLOCK(tid_lock);
++#ifndef HAVE_XARRAY
++#ifdef HAVE_IDR_ALLOC
++static DEFINE_SPINLOCK(idr_lock);
++static DEFINE_IDR(query_idr);
++#endif
++#endif
+ static u32 tid;
+ #define PATH_REC_FIELD(field) \
+@@ -1012,9 +1018,15 @@ static void ib_nl_request_timeout(struct work_struct *work)
+ }
+ int ib_nl_handle_set_timeout(struct sk_buff *skb,
++#ifdef HAVE_NETLINK_EXT_ACK
+                            struct nlmsghdr *nlh,
+                            struct netlink_ext_ack *extack)
+ {
++#else
++                           struct netlink_callback *cb)
++{
++      const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
++#endif
+       int timeout, delta, abs_delta;
+       const struct nlattr *attr;
+       unsigned long flags;
+@@ -1024,11 +1036,24 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb,
+       int ret;
+       if (!(nlh->nlmsg_flags & NLM_F_REQUEST) ||
++#ifdef HAVE_NETLINK_CAPABLE
++#ifdef HAVE_NETLINK_SKB_PARMS_SK
+           !(NETLINK_CB(skb).sk))
++#else
++          !(NETLINK_CB(skb).ssk))
++#endif
++#else
++          sock_net(skb->sk) != &init_net)
++#endif
+               return -EPERM;
++#ifdef HAVE_NLA_PARSE_DEPRECATED
+       ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
+                                  nlmsg_len(nlh), ib_nl_policy, NULL);
++#else
++      ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
++                              nlmsg_len(nlh), ib_nl_policy, NULL);
++#endif /*HAVE_NLA_PARSE_DEPRECATED*/
+       attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT];
+       if (ret || !attr)
+               goto settimeout_out;
+@@ -1079,8 +1104,13 @@ static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
+       if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
+               return 0;
++#ifdef HAVE_NLA_PARSE_DEPRECATED
+       ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
+                                  nlmsg_len(nlh), ib_nl_policy, NULL);
++#else
++      ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
++                              nlmsg_len(nlh), ib_nl_policy, NULL);
++#endif
+       if (ret)
+               return 0;
+@@ -1088,9 +1118,15 @@ static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
+ }
+ int ib_nl_handle_resolve_resp(struct sk_buff *skb,
++#ifdef HAVE_NETLINK_EXT_ACK
+                             struct nlmsghdr *nlh,
+                             struct netlink_ext_ack *extack)
+ {
++#else
++                            struct netlink_callback *cb)
++{
++      const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
++#endif
+       unsigned long flags;
+       struct ib_sa_query *query;
+       struct ib_mad_send_buf *send_buf;
+@@ -1099,7 +1135,15 @@ int ib_nl_handle_resolve_resp(struct sk_buff *skb,
+       int ret;
+       if ((nlh->nlmsg_flags & NLM_F_REQUEST) ||
++#ifdef HAVE_NETLINK_CAPABLE
++#ifdef HAVE_NETLINK_SKB_PARMS_SK
+           !(NETLINK_CB(skb).sk))
++#else
++          !(NETLINK_CB(skb).ssk)) 
++#endif
++#else
++          sock_net(skb->sk) != &init_net)
++#endif
+               return -EPERM;
+       spin_lock_irqsave(&ib_nl_request_lock, flags);
+@@ -1365,12 +1409,47 @@ static int send_mad(struct ib_sa_query *query, unsigned long timeout_ms,
+       unsigned long flags;
+       int ret, id;
++#ifdef HAVE_XARRAY
+       xa_lock_irqsave(&queries, flags);
+       ret = __xa_alloc(&queries, &id, query, xa_limit_32b, gfp_mask);
+       xa_unlock_irqrestore(&queries, flags);
+       if (ret < 0)
+               return ret;
+-
++#else /* HAVE_XARRAY */
++
++#ifdef HAVE_IDR_ALLOC
++#ifdef __GFP_WAIT
++      bool preload = !!(gfp_mask & __GFP_WAIT);
++#else
++      bool preload = gfpflags_allow_blocking(gfp_mask);
++#endif
++#endif
++
++#ifdef HAVE_IDR_ALLOC
++      if (preload)
++              idr_preload(gfp_mask);
++      spin_lock_irqsave(&idr_lock, flags);
++
++      id = idr_alloc(&query_idr, query, 0, 0, GFP_NOWAIT);
++
++      spin_unlock_irqrestore(&idr_lock, flags);
++      if (preload)
++              idr_preload_end();
++      if (id < 0)
++              return id;
++#else
++retry:
++      if (!idr_pre_get(&query_idr, gfp_mask))
++              return -ENOMEM;
++      spin_lock_irqsave(&idr_lock, flags);
++      ret = idr_get_new(&query_idr, query, &id);
++      spin_unlock_irqrestore(&idr_lock, flags);
++      if (ret == -EAGAIN)
++              goto retry;
++      if (ret)
++              return ret;
++#endif /*HAVE_IDR_ALLOC*/
++#endif /*HAVE_XARRAY*/
+       query->mad_buf->timeout_ms  = timeout_ms;
+       query->mad_buf->context[0] = query;
+       query->id = id;
+@@ -1386,9 +1465,15 @@ static int send_mad(struct ib_sa_query *query, unsigned long timeout_ms,
+       ret = ib_post_send_mad(query->mad_buf, NULL);
+       if (ret) {
++#ifdef HAVE_XARRAY
+               xa_lock_irqsave(&queries, flags);
+               __xa_erase(&queries, id);
+               xa_unlock_irqrestore(&queries, flags);
++#else
++              spin_lock_irqsave(&idr_lock, flags);
++              idr_remove(&query_idr, id);
++              spin_unlock_irqrestore(&idr_lock, flags);
++#endif /*HAVE_XARRAY*/
+       }
+       /*
+diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
+index xxxxxxx..xxxxxxx 100644
+--- a/drivers/infiniband/core/ucma.c
++++ b/drivers/infiniband/core/ucma.c
+@@ -1688,6 +1688,7 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
+       return ret;
+ }
++#ifdef EPOLLIN
+ static __poll_t ucma_poll(struct file *filp, struct poll_table_struct *wait)
+ {
+       struct ucma_file *file = filp->private_data;
+@@ -1700,6 +1701,20 @@ static __poll_t ucma_poll(struct file *filp, struct poll_table_struct *wait)
+       return mask;
+ }
++#else
++static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
++{
++      struct ucma_file *file = filp->private_data;
++      unsigned int mask = 0;
++
++      poll_wait(filp, &file->poll_wait, wait);
++
++      if (!list_empty(&file->event_list))
++              mask = POLLIN | POLLRDNORM;
++
++      return mask;
++}
++#endif
+ /*
+  * ucma_open() does not need the BKL:
+@@ -1732,7 +1747,11 @@ static int ucma_open(struct inode *inode, struct file *filp)
+       filp->private_data = file;
+       file->filp = filp;
++#ifdef HAVE_STREAM_OPEN
+       return stream_open(inode, filp);
++#else
++      return nonseekable_open(inode, filp);
++#endif
+ }
+ static int ucma_close(struct inode *inode, struct file *filp)
+diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
+index xxxxxxx..xxxxxxx 100644
+--- a/drivers/infiniband/core/umem.c
++++ b/drivers/infiniband/core/umem.c
+@@ -200,14 +200,22 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
+       struct ib_umem *umem;
+       struct page **page_list;
+       unsigned long lock_limit;
++#ifdef HAVE_PINNED_VM
+       unsigned long new_pinned;
++#endif
+       unsigned long cur_base;
+       struct mm_struct *mm;
+       unsigned long npages;
+       int ret;
++#ifdef HAVE_STRUCT_DMA_ATTRS
++      DEFINE_DMA_ATTRS(dma_attrs);
++#else
+       unsigned long dma_attrs = 0;
++#endif
+       struct scatterlist *sg;
++#ifdef HAVE_GET_USER_PAGES_GUP_FLAGS
+       unsigned int gup_flags = FOLL_WRITE;
++#endif
+       if (!udata)
+               return ERR_PTR(-EIO);
+@@ -218,7 +226,11 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
+               return ERR_PTR(-EIO);
+       if (dmasync)
++#ifdef HAVE_STRUCT_DMA_ATTRS
++              dma_set_attr(DMA_ATTR_WRITE_BARRIER, &dma_attrs);
++#else
+               dma_attrs |= DMA_ATTR_WRITE_BARRIER;
++#endif
+       /*
+        * If the combination of the addr and size requested for this memory
+@@ -275,9 +287,27 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
+       lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
++#ifdef HAVE_PINNED_VM
++#ifdef HAVE_ATOMIC_PINNED_VM
+       new_pinned = atomic64_add_return(npages, &mm->pinned_vm);
+       if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) {
++#else
++      if (check_add_overflow(mm->pinned_vm, npages, &new_pinned) ||
++          (new_pinned > lock_limit && !capable(CAP_IPC_LOCK))) {
++#endif
++#else
++      current->mm->locked_vm += npages;
++      if ((current->mm->locked_vm > lock_limit) && !capable(CAP_IPC_LOCK)) {
++#endif
++#ifdef HAVE_PINNED_VM
++#ifdef HAVE_ATOMIC_PINNED_VM
+               atomic64_sub(npages, &mm->pinned_vm);
++#else
++              mm->pinned_vm -= npages;
++#endif
++              pr_debug("%s: requested to lock(%lu) while limit is(%lu)\n",
++                     __func__, new_pinned, lock_limit);
++#endif
+               ret = -ENOMEM;
+               goto out;
+       }
+@@ -288,18 +318,46 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
+       if (ret)
+               goto vma;
++#ifdef HAVE_GET_USER_PAGES_GUP_FLAGS
+       if (!umem->writable)
+               gup_flags |= FOLL_FORCE;
++#endif
+       sg = umem->sg_head.sgl;
+       while (npages) {
+               down_read(&mm->mmap_sem);
++#ifdef HAVE_FOLL_LONGTERM
+               ret = get_user_pages(cur_base,
+                                    min_t(unsigned long, npages,
+                                          PAGE_SIZE / sizeof (struct page *)),
+                                    gup_flags | FOLL_LONGTERM,
+                                    page_list, NULL);
++#elif defined(HAVE_GET_USER_PAGES_LONGTERM)
++              ret = get_user_pages_longterm(cur_base,
++                      min_t(unsigned long, npages,
++                      PAGE_SIZE / sizeof (struct page *)),
++                      gup_flags, page_list, NULL);
++#elif defined(HAVE_GET_USER_PAGES_8_PARAMS)
++              ret = get_user_pages(current, current->mm, cur_base,
++                                   min_t(unsigned long, npages,
++                                         PAGE_SIZE / sizeof (struct page *)),
++                                   1, !umem->writable, page_list, NULL);
++#else
++#ifdef HAVE_GET_USER_PAGES_7_PARAMS
++              ret = get_user_pages(current, current->mm, cur_base,
++#else
++              ret = get_user_pages(cur_base,
++#endif
++                                   min_t(unsigned long, npages,
++                                         PAGE_SIZE / sizeof (struct page *)),
++#ifdef HAVE_GET_USER_PAGES_GUP_FLAGS
++                                   gup_flags, page_list, NULL);
++#else
++                                   1, !umem->writable, page_list, NULL);
++#endif
++#endif
++
+               if (ret < 0) {
+                       up_read(&mm->mmap_sem);
+                       goto umem_release;
+@@ -321,7 +379,11 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
+                                 umem->sg_head.sgl,
+                                 umem->sg_nents,
+                                 DMA_BIDIRECTIONAL,
++#ifdef HAVE_STRUCT_DMA_ATTRS
++                                &dma_attrs);
++#else
+                                 dma_attrs);
++#endif
+       if (!umem->nmap) {
+               ret = -ENOMEM;
+@@ -334,7 +396,15 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
+ umem_release:
+       __ib_umem_release(context->device, umem, 0);
+ vma:
++#ifdef HAVE_PINNED_VM
++#ifdef HAVE_ATOMIC_PINNED_VM
+       atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm);
++#else
++      mm->pinned_vm -= ib_umem_num_pages(umem);
++#endif
++#else
++      mm->locked_vm -= ib_umem_num_pages(umem);
++#endif
+ out:
+       free_page((unsigned long) page_list);
+ umem_kfree:
+@@ -372,7 +442,15 @@ void ib_umem_release(struct ib_umem *umem)
+       __ib_umem_release(umem->context->device, umem, 1);
++#ifdef HAVE_PINNED_VM
++#ifdef HAVE_ATOMIC_PINNED_VM
+       atomic64_sub(ib_umem_num_pages(umem), &umem->owning_mm->pinned_vm);
++#else
++      umem->owning_mm->pinned_vm -= ib_umem_num_pages(umem);
++#endif /*HAVE_ATOMIC_PINNED_VM*/
++#else
++      umem->owning_mm->locked_vm -= ib_umem_num_pages(umem);
++#endif
+       __ib_umem_release_tail(umem);
+ }
+ EXPORT_SYMBOL(ib_umem_release);
+diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
+index xxxxxxx..xxxxxxx 100644
+--- a/drivers/infiniband/core/user_mad.c
++++ b/drivers/infiniband/core/user_mad.c
+@@ -33,6 +33,9 @@
+  * SOFTWARE.
+  */
++#ifdef pr_fmt
++#undef pr_fmt
++#endif
+ #define pr_fmt(fmt) "user_mad: " fmt
+ #include <linux/module.h>
+@@ -140,7 +143,11 @@ static const dev_t base_issm_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE) +
+ static dev_t dynamic_umad_dev;
+ static dev_t dynamic_issm_dev;
++#ifdef HAVE_IDA_ALLOC_MAX
+ static DEFINE_IDA(umad_ida);
++#else
++static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS);
++#endif
+ static void ib_umad_add_one(struct ib_device *device);
+ static void ib_umad_remove_one(struct ib_device *device, void *client_data);
+@@ -646,6 +653,7 @@ err:
+       return ret;
+ }
++#ifdef EPOLLIN
+ static __poll_t ib_umad_poll(struct file *filp, struct poll_table_struct *wait)
+ {
+       struct ib_umad_file *file = filp->private_data;
+@@ -660,6 +668,22 @@ static __poll_t ib_umad_poll(struct file *filp, struct poll_table_struct *wait)
+       return mask;
+ }
++#else
++static unsigned int ib_umad_poll(struct file *filp, struct poll_table_struct *wait)
++{
++      struct ib_umad_file *file = filp->private_data;
++
++      /* we will always be able to post a MAD send */
++      unsigned int mask = POLLOUT | POLLWRNORM;
++
++      poll_wait(filp, &file->recv_wait, wait);
++
++      if (!list_empty(&file->recv_list))
++              mask |= POLLIN | POLLRDNORM;
++
++      return mask;
++}
++#endif
+ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg,
+                            int compat_method_mask)
+@@ -1007,7 +1031,11 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
+       list_add_tail(&file->port_list, &port->file_list);
++#ifdef HAVE_STREAM_OPEN
+       stream_open(inode, filp);
++#else
++      nonseekable_open(inode, filp);
++#endif
+ out:
+       mutex_unlock(&port->file_mutex);
+       return ret;
+@@ -1203,11 +1231,17 @@ static struct attribute *umad_class_dev_attrs[] = {
+ };
+ ATTRIBUTE_GROUPS(umad_class_dev);
++#ifndef HAVE_CLASS_GROUPS
++static CLASS_ATTR_STRING(abi_version, S_IRUGO,
++                       __stringify(IB_USER_MAD_ABI_VERSION));
++#endif
++
+ static char *umad_devnode(struct device *dev, umode_t *mode)
+ {
+       return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
+ }
++#ifdef HAVE_CLASS_GROUPS
+ static ssize_t abi_version_show(struct class *class,
+                               struct class_attribute *attr, char *buf)
+ {
+@@ -1220,11 +1254,14 @@ static struct attribute *umad_class_attrs[] = {
+       NULL,
+ };
+ ATTRIBUTE_GROUPS(umad_class);
++#endif
+ static struct class umad_class = {
+       .name           = "infiniband_mad",
+       .devnode        = umad_devnode,
++#ifdef HAVE_CLASS_GROUPS
+       .class_groups   = umad_class_groups,
++#endif
+       .dev_groups     = umad_class_dev_groups,
+ };
+@@ -1257,10 +1294,18 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
+       dev_t base_issm;
+       int ret;
++#ifdef HAVE_IDA_ALLOC_MAX
+       devnum = ida_alloc_max(&umad_ida, IB_UMAD_MAX_PORTS - 1, GFP_KERNEL);
+       if (devnum < 0)
+               return -1;
++#else
++      devnum = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS);
++      if (devnum >= IB_UMAD_MAX_PORTS)
++#endif
+       port->dev_num = devnum;
++#ifndef HAVE_IDA_ALLOC_MAX
++      set_bit(devnum, dev_map);
++#endif
+       if (devnum >= IB_UMAD_NUM_FIXED_MINOR) {
+               base_umad = dynamic_umad_dev + devnum - IB_UMAD_NUM_FIXED_MINOR;
+               base_issm = dynamic_issm_dev + devnum - IB_UMAD_NUM_FIXED_MINOR;
+@@ -1303,7 +1348,11 @@ err_dev:
+       cdev_device_del(&port->cdev, &port->dev);
+ err_cdev:
+       put_device(&port->dev);
++#ifndef HAVE_IDA_ALLOC_MAX
++      clear_bit(devnum, dev_map);
++#else
+       ida_free(&umad_ida, devnum);
++#endif
+       return ret;
+ }
+@@ -1333,7 +1382,11 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
+       cdev_device_del(&port->sm_cdev, &port->sm_dev);
+       cdev_device_del(&port->cdev, &port->dev);
++#ifndef HAVE_IDA_ALLOC_MAX
++      clear_bit(port->dev_num, dev_map);
++#else
+       ida_free(&umad_ida, port->dev_num);
++#endif
+       /* balances device_initialize() */
+       put_device(&port->sm_dev);
+@@ -1428,6 +1481,14 @@ static int __init ib_umad_init(void)
+               goto out_chrdev;
+       }
++#ifndef HAVE_CLASS_GROUPS
++      ret = class_create_file(&umad_class, &class_attr_abi_version.attr);
++      if (ret) {
++              pr_err("couldn't create abi_version attribute\n");
++              goto out_class;
++      }
++#endif
++
+       ret = ib_register_client(&umad_client);
+       if (ret)
+               goto out_class;
+diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
+index xxxxxxx..xxxxxxx 100644
+--- a/drivers/infiniband/core/uverbs.h
++++ b/drivers/infiniband/core/uverbs.h
+@@ -162,7 +162,13 @@ struct ib_uverbs_file {
+       struct list_head umaps;
+       struct page *disassociate_page;
++#ifdef HAVE_XARRAY
+       struct xarray           idr;
++#else
++      struct idr              idr;
++       /* spinlock protects write access to idr */
++      spinlock_t              idr_lock;
++#endif
+ };
+ struct ib_uverbs_event {
+diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
+index xxxxxxx..xxxxxxx 100644
+--- a/drivers/infiniband/core/uverbs_cmd.c
++++ b/drivers/infiniband/core/uverbs_cmd.c
+@@ -210,7 +210,9 @@ static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
+       struct ib_uverbs_get_context_resp resp;
+       struct ib_ucontext               *ucontext;
+       struct file                      *filp;
++#ifdef HAVE_CGROUP_RDMA_H
+       struct ib_rdmacg_object          cg_obj;
++#endif
+       struct ib_device *ib_dev;
+       int ret;
+@@ -231,9 +233,11 @@ static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
+               goto err;
+       }
++#ifdef HAVE_CGROUP_RDMA_H
+       ret = ib_rdmacg_try_charge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE);
+       if (ret)
+               goto err;
++#endif
+       ucontext = rdma_zalloc_drv_obj(ib_dev, ib_ucontext);
+       if (!ucontext) {
+@@ -245,7 +249,9 @@ static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
+       ucontext->res.type = RDMA_RESTRACK_CTX;
+       ucontext->device = ib_dev;
++#ifdef HAVE_CGROUP_RDMA_H
+       ucontext->cg_obj = cg_obj;
++#endif
+       /* ufile is required when some objects are released */
+       ucontext->ufile = file;
+@@ -286,7 +292,12 @@ static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
+        * Make sure that ib_uverbs_get_ucontext() sees the pointer update
+        * only after all writes to setup the ucontext have completed
+        */
++#ifdef HAVE_SMP_LOAD_ACQUIRE
+       smp_store_release(&file->ucontext, ucontext);
++#else
++      smp_wmb();
++      file->ucontext = ucontext;
++#endif
+       mutex_unlock(&file->ucontext_lock);
+@@ -303,7 +314,9 @@ err_free:
+       kfree(ucontext);
+ err_alloc:
++#ifdef HAVE_CGROUP_RDMA_H
+       ib_rdmacg_uncharge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE);
++#endif
+ err:
+       mutex_unlock(&file->ucontext_lock);
+diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
+index xxxxxxx..xxxxxxx 100644
+--- a/drivers/infiniband/core/uverbs_ioctl.c
++++ b/drivers/infiniband/core/uverbs_ioctl.c
+@@ -568,10 +568,16 @@ static int ib_uverbs_cmd_verbs(struct ib_uverbs_file *ufile,
+       if (unlikely(hdr->driver_id != uapi->driver_id))
+               return -EINVAL;
++#ifdef HAVE_RADIX_TREE_ITER_LOOKUP
+       slot = radix_tree_iter_lookup(
+               &uapi->radix, &attrs_iter,
+               uapi_key_obj(hdr->object_id) |
+                       uapi_key_ioctl_method(hdr->method_id));
++#else
++      radix_tree_iter_init(&attrs_iter,  uapi_key_obj(hdr->object_id) |
++                                      uapi_key_ioctl_method(hdr->method_id));
++      slot = radix_tree_next_chunk(&uapi->radix, &attrs_iter, RADIX_TREE_ITER_CONTIG);
++#endif
+       if (unlikely(!slot))
+               return -EPROTONOSUPPORT;
+       method_elm = rcu_dereference_protected(*slot, true);
+diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
+index xxxxxxx..xxxxxxx 100644
+--- a/drivers/infiniband/core/uverbs_main.c
++++ b/drivers/infiniband/core/uverbs_main.c
+@@ -74,7 +74,11 @@ enum {
+ static dev_t dynamic_uverbs_dev;
+ static struct class *uverbs_class;
++#ifndef HAVE_IDA_ALLOC_MAX
++static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
++#else
+ static DEFINE_IDA(uverbs_ida);
++#endif
+ static void ib_uverbs_add_one(struct ib_device *device);
+ static void ib_uverbs_remove_one(struct ib_device *device, void *client_data);
+@@ -298,6 +302,7 @@ static ssize_t ib_uverbs_comp_event_read(struct file *filp, char __user *buf,
+                                   sizeof(struct ib_uverbs_comp_event_desc));
+ }
++#ifdef EPOLLIN
+ static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue,
+                                        struct file *filp,
+                                        struct poll_table_struct *wait)
+@@ -328,6 +333,38 @@ static __poll_t ib_uverbs_comp_event_poll(struct file *filp,
+       return ib_uverbs_event_poll(&comp_ev_file->ev_queue, filp, wait);
+ }
++#else
++static unsigned int ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue,
++                                       struct file *filp,
++                                       struct poll_table_struct *wait)
++{
++      unsigned int pollflags = 0;
++
++      poll_wait(filp, &ev_queue->poll_wait, wait);
++
++      spin_lock_irq(&ev_queue->lock);
++      if (!list_empty(&ev_queue->event_list))
++              pollflags = POLLIN | POLLRDNORM;
++      spin_unlock_irq(&ev_queue->lock);
++
++      return pollflags;
++}
++
++static unsigned int ib_uverbs_async_event_poll(struct file *filp,
++                                             struct poll_table_struct *wait)
++{
++      return ib_uverbs_event_poll(filp->private_data, filp, wait);
++}
++
++static unsigned int ib_uverbs_comp_event_poll(struct file *filp,
++                                            struct poll_table_struct *wait)
++{
++      struct ib_uverbs_completion_event_file *comp_ev_file =
++              filp->private_data;
++
++      return ib_uverbs_event_poll(&comp_ev_file->ev_queue, filp, wait);
++}
++#endif
+ static int ib_uverbs_async_event_fasync(int fd, struct file *filp, int on)
+ {
+@@ -618,8 +655,14 @@ static ssize_t verify_hdr(struct ib_uverbs_cmd_hdr *hdr,
+                       if (hdr->out_words * 8 < method_elm->resp_size)
+                               return -ENOSPC;
++#ifdef HAVE_ACCESS_OK_HAS_3_PARAMS 
++                      if (!access_ok(VERIFY_WRITE,
++                                     u64_to_user_ptr(ex_hdr->response),
++                                     (hdr->out_words + ex_hdr->provider_out_words) * 8))
++#else
+                       if (!access_ok(u64_to_user_ptr(ex_hdr->response),
+                                      (hdr->out_words + ex_hdr->provider_out_words) * 8))
++#endif
+                               return -EFAULT;
+               } else {
+                       if (hdr->out_words || ex_hdr->provider_out_words)
+@@ -885,11 +928,20 @@ static void rdma_umap_close(struct vm_area_struct *vma)
+  * Once the zap_vma_ptes has been called touches to the VMA will come here and
+  * we return a dummy writable zero page for all the pfns.
+  */
++#ifdef HAVE_VM_FAULT_T
++#ifdef HAVE_VM_OPERATIONS_STRUCT_HAS_FAULT
+ static vm_fault_t rdma_umap_fault(struct vm_fault *vmf)
++#else
++static int rdma_umap_fault(struct vm_fault *vmf)
++#endif/*HAVE_VM_OPERATIONS_STRUCT_HAS_FAULT*/
+ {
+       struct ib_uverbs_file *ufile = vmf->vma->vm_file->private_data;
+       struct rdma_umap_priv *priv = vmf->vma->vm_private_data;
++#ifdef HAVE_VM_FAULT_T
+       vm_fault_t ret = 0;
++#else
++      int ret = 0;
++#endif
+       if (!priv)
+               return VM_FAULT_SIGBUS;
+@@ -920,11 +972,14 @@ static vm_fault_t rdma_umap_fault(struct vm_fault *vmf)
+       return ret;
+ }
++#endif
+ static const struct vm_operations_struct rdma_umap_ops = {
+       .open = rdma_umap_open,
+       .close = rdma_umap_close,
++#ifdef HAVE_VM_FAULT_T
+       .fault = rdma_umap_fault,
++#endif
+ };
+ /*
+@@ -1098,7 +1153,11 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
+       setup_ufile_idr_uobject(file);
++#ifdef HAVE_STREAM_OPEN
+       return stream_open(inode, filp);
++#else
++      return nonseekable_open(inode, filp);
++#endif
+ err_module:
+       module_put(ib_dev->ops.owner);
+@@ -1285,11 +1344,19 @@ static void ib_uverbs_add_one(struct ib_device *device)
+       rcu_assign_pointer(uverbs_dev->ib_dev, device);
+       uverbs_dev->num_comp_vectors = device->num_comp_vectors;
++#ifdef HAVE_IDA_ALLOC_MAX
+       devnum = ida_alloc_max(&uverbs_ida, IB_UVERBS_MAX_DEVICES - 1,
+                              GFP_KERNEL);
+       if (devnum < 0)
++#else
++      devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES);
++      if (devnum >= IB_UVERBS_MAX_DEVICES)
++#endif
+               goto err;
+       uverbs_dev->devnum = devnum;
++#ifndef HAVE_IDA_ALLOC_MAX
++      set_bit(devnum, dev_map);
++#endif
+       if (devnum >= IB_UVERBS_NUM_FIXED_MINOR)
+               base = dynamic_uverbs_dev + devnum - IB_UVERBS_NUM_FIXED_MINOR;
+       else
+@@ -1313,7 +1380,11 @@ static void ib_uverbs_add_one(struct ib_device *device)
+       return;
+ err_uapi:
++#ifndef HAVE_IDA_ALLOC_MAX
++      clear_bit(devnum, dev_map);
++#else
+       ida_free(&uverbs_ida, devnum);
++#endif
+ err:
+       if (atomic_dec_and_test(&uverbs_dev->refcount))
+               ib_uverbs_comp_dev(uverbs_dev);
+@@ -1388,7 +1459,11 @@ static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
+               return;
+       cdev_device_del(&uverbs_dev->cdev, &uverbs_dev->dev);
++#ifndef HAVE_IDA_ALLOC_MAX
++      clear_bit(uverbs_dev->devnum, dev_map);
++#else
+       ida_free(&uverbs_ida, uverbs_dev->devnum);
++#endif
+       if (device->ops.disassociate_ucontext) {
+               /* We disassociate HW resources and immediately return.
+diff --git a/drivers/infiniband/core/uverbs_uapi.c b/drivers/infiniband/core/uverbs_uapi.c
+index xxxxxxx..xxxxxxx 100644
+--- a/drivers/infiniband/core/uverbs_uapi.c
++++ b/drivers/infiniband/core/uverbs_uapi.c
+@@ -478,7 +478,11 @@ static void uapi_remove_range(struct uverbs_api *uapi, u32 start, u32 last)
+               if (iter.index > last)
+                       return;
+               kfree(rcu_dereference_protected(*slot, true));
++#if defined(HAVE_RADIX_TREE_ITER_DELETE) && defined (HAVE_RADIX_TREE_ITER_DELETE_EXPORTED)
+               radix_tree_iter_delete(&uapi->radix, &iter, slot);
++#else
++              radix_tree_delete(&uapi->radix, iter.index);
++#endif
+       }
+ }
+@@ -567,7 +571,11 @@ again:
+                       if (method_elm->disabled) {
+                               kfree(method_elm);
++#if defined(HAVE_RADIX_TREE_ITER_DELETE) && defined (HAVE_RADIX_TREE_ITER_DELETE_EXPORTED)
+                               radix_tree_iter_delete(&uapi->radix, &iter, slot);
++#else
++                              radix_tree_delete(&uapi->radix, iter.index);
++#endif
+                       }
+                       continue;
+               }
+diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
+index xxxxxxx..xxxxxxx 100644
+--- a/drivers/infiniband/core/verbs.c
++++ b/drivers/infiniband/core/verbs.c
+@@ -1743,7 +1743,11 @@ int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width)
+       int rc;
+       u32 netdev_speed;
+       struct net_device *netdev;
++#ifdef HAVE___ETHTOOL_GET_LINK_KSETTINGS
+       struct ethtool_link_ksettings lksettings;
++#else
++      struct ethtool_cmd lksettings;
++#endif
+       if (rdma_port_get_link_layer(dev, port_num) != IB_LINK_LAYER_ETHERNET)
+               return -EINVAL;
+@@ -1753,13 +1757,21 @@ int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width)
+               return -ENODEV;
+       rtnl_lock();
++#ifdef HAVE___ETHTOOL_GET_LINK_KSETTINGS
+       rc = __ethtool_get_link_ksettings(netdev, &lksettings);
++#else
++      rc = __ethtool_get_settings(netdev, &lksettings);
++#endif
+       rtnl_unlock();
+       dev_put(netdev);
+       if (!rc) {
++#ifdef HAVE___ETHTOOL_GET_LINK_KSETTINGS
+               netdev_speed = lksettings.base.speed;
++#else
++              netdev_speed = ethtool_cmd_speed(&lksettings);
++#endif
+       } else {
+               netdev_speed = SPEED_1000;
+               pr_warn("%s speed is unknown, defaulting to %d\n", netdev->name,
+@@ -2417,6 +2429,7 @@ int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
+ }
+ EXPORT_SYMBOL(ib_check_mr_status);
++#ifdef HAVE_NDO_SET_VF_MAC
+ int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
+                        int state)
+ {
+@@ -2456,6 +2469,7 @@ int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
+       return device->ops.set_vf_guid(device, vf, port, guid, type);
+ }
+ EXPORT_SYMBOL(ib_set_vf_guid);
++#endif /* HAVE_NDO_SET_VF_MAC */
+ /**
+  * ib_map_mr_sg_pi() - Map the dma mapped SG lists for PI (protection
+diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
+index xxxxxxx..xxxxxxx 100644
+--- a/include/rdma/ib_addr.h
++++ b/include/rdma/ib_addr.h
+@@ -132,7 +132,11 @@ static inline int rdma_addr_gid_offset(struct rdma_dev_addr *dev_addr)
+       return dev_addr->dev_type == ARPHRD_INFINIBAND ? 4 : 0;
+ }
++#ifdef HAVE_IS_VLAN_DEV_CONST
+ static inline u16 rdma_vlan_dev_vlan_id(const struct net_device *dev)
++#else
++static inline u16 rdma_vlan_dev_vlan_id(struct net_device *dev)
++#endif
+ {
+       return is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 0xffff;
+ }
+@@ -222,15 +226,25 @@ static inline enum ib_mtu iboe_get_mtu(int mtu)
+ static inline int iboe_get_rate(struct net_device *dev)
+ {
++#ifdef HAVE___ETHTOOL_GET_LINK_KSETTINGS
+       struct ethtool_link_ksettings cmd;
++#else
++      struct ethtool_cmd cmd;
++      u32 speed;
++#endif
+       int err;
+       rtnl_lock();
++#ifdef HAVE___ETHTOOL_GET_LINK_KSETTINGS
+       err = __ethtool_get_link_ksettings(dev, &cmd);
++#else
++      err = __ethtool_get_settings(dev, &cmd);
++#endif
+       rtnl_unlock();
+       if (err)
+               return IB_RATE_PORT_CURRENT;
++#ifdef HAVE___ETHTOOL_GET_LINK_KSETTINGS
+       if (cmd.base.speed >= 40000)
+               return IB_RATE_40_GBPS;
+       else if (cmd.base.speed >= 30000)
+@@ -241,6 +255,19 @@ static inline int iboe_get_rate(struct net_device *dev)
+               return IB_RATE_10_GBPS;
+       else
+               return IB_RATE_PORT_CURRENT;
++#else
++      speed = ethtool_cmd_speed(&cmd);
++      if (speed >= 40000)
++              return IB_RATE_40_GBPS;
++      else if (speed >= 30000)
++              return IB_RATE_30_GBPS;
++      else if (speed >= 20000)
++              return IB_RATE_20_GBPS;
++      else if (speed >= 10000)
++              return IB_RATE_10_GBPS;
++      else
++              return IB_RATE_PORT_CURRENT;
++#endif
+ }
+ static inline int rdma_link_local_addr(struct in6_addr *addr)
+@@ -288,7 +315,11 @@ static inline u16 rdma_get_vlan_id(union ib_gid *dgid)
+       return vid < 0x1000 ? vid : 0xffff;
+ }
++#ifdef HAVE_IS_VLAN_DEV_CONST
+ static inline struct net_device *rdma_vlan_dev_real_dev(const struct net_device *dev)
++#else
++static inline struct net_device *rdma_vlan_dev_real_dev(struct net_device *dev)
++#endif
+ {
+       return is_vlan_dev(dev) ? vlan_dev_real_dev(dev) : NULL;
+ }
+diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
+index xxxxxxx..xxxxxxx 100644
+--- a/include/rdma/ib_verbs.h
++++ b/include/rdma/ib_verbs.h
+@@ -46,7 +46,11 @@
+ #include <linux/list.h>
+ #include <linux/rwsem.h>
+ #include <linux/workqueue.h>
++#if defined(HAVE_IRQ_POLL_H) && IS_ENABLED(CONFIG_IRQ_POLL)
+ #include <linux/irq_poll.h>
++#else
++#include <linux/blk-iopoll.h>
++#endif
+ #include <uapi/linux/if_ether.h>
+ #include <net/ipv6.h>
+ #include <net/ip.h>
+@@ -1399,11 +1403,13 @@ enum rdma_remove_reason {
+       RDMA_REMOVE_ABORT,
+ };
++#ifdef HAVE_CGROUP_RDMA_H
+ struct ib_rdmacg_object {
+ #ifdef CONFIG_CGROUP_RDMA
+       struct rdma_cgroup      *cg;            /* owner rdma cgroup */
+ #endif
+ };
++#endif
+ struct ib_ucontext {
+       struct ib_device       *device;
+@@ -1422,7 +1428,9 @@ struct ib_ucontext {
+       struct mutex per_mm_list_lock;
+       struct list_head per_mm_list;
++#ifdef HAVE_CGROUP_RDMA_H
+       struct ib_rdmacg_object cg_obj;
++#endif
+       /*
+        * Implementation details of the RDMA core, don't use in drivers:
+        */
+@@ -1437,7 +1445,9 @@ struct ib_uobject {
+       struct ib_ucontext     *context;        /* associated user context */
+       void                   *object;         /* containing object */
+       struct list_head        list;           /* link to context's list */
++#ifdef HAVE_CGROUP_RDMA_H
+       struct ib_rdmacg_object cg_obj;         /* rdmacg object */
++#endif
+       int                     id;             /* index into kernel idr */
+       struct kref             ref;
+       atomic_t                usecnt;         /* protects exclusive access */
+@@ -1506,7 +1516,11 @@ struct ib_cq {
+       enum ib_poll_context    poll_ctx;
+       struct ib_wc            *wc;
+       union {
++#if defined(HAVE_IRQ_POLL_H) && IS_ENABLED(CONFIG_IRQ_POLL)
+               struct irq_poll         iop;
++#else
++              struct blk_iopoll       iop;
++#endif
+               struct work_struct      work;
+       };
+       struct workqueue_struct *comp_wq;
+@@ -2105,6 +2119,63 @@ struct ib_cache {
+       struct ib_event_handler event_handler;
+ };
++#ifndef HAVE_DEVICE_DMA_OPS
++struct ib_dma_mapping_ops {
++      int             (*mapping_error)(struct ib_device *dev,
++                                       u64 dma_addr);
++      u64             (*map_single)(struct ib_device *dev,
++                                    void *ptr, size_t size,
++                                    enum dma_data_direction direction);
++      void            (*unmap_single)(struct ib_device *dev,
++                                      u64 addr, size_t size,
++                                      enum dma_data_direction direction);
++      u64             (*map_page)(struct ib_device *dev,
++                                  struct page *page, unsigned long offset,
++                                  size_t size,
++                                  enum dma_data_direction direction);
++      void            (*unmap_page)(struct ib_device *dev,
++                                    u64 addr, size_t size,
++                                    enum dma_data_direction direction);
++      int             (*map_sg)(struct ib_device *dev,
++                                struct scatterlist *sg, int nents,
++                                enum dma_data_direction direction);
++      void            (*unmap_sg)(struct ib_device *dev,
++                                  struct scatterlist *sg, int nents,
++                                  enum dma_data_direction direction);
++      int             (*map_sg_attrs)(struct ib_device *dev,
++                                      struct scatterlist *sg, int nents,
++                                      enum dma_data_direction direction,
++#ifdef HAVE_STRUCT_DMA_ATTRS
++                                      struct dma_attrs *attrs);
++#else
++                                      unsigned long attrs);
++#endif
++      void            (*unmap_sg_attrs)(struct ib_device *dev,
++                                        struct scatterlist *sg, int nents,
++                                        enum dma_data_direction direction,
++#ifdef HAVE_STRUCT_DMA_ATTRS
++                                        struct dma_attrs *attrs);
++#else
++                                        unsigned long attrs);
++#endif
++      void            (*sync_single_for_cpu)(struct ib_device *dev,
++                                             u64 dma_handle,
++                                             size_t size,
++                                             enum dma_data_direction dir);
++      void            (*sync_single_for_device)(struct ib_device *dev,
++                                                u64 dma_handle,
++                                                size_t size,
++                                                enum dma_data_direction dir);
++      void            *(*alloc_coherent)(struct ib_device *dev,
++                                         size_t size,
++                                         u64 *dma_handle,
++                                         gfp_t flag);
++      void            (*free_coherent)(struct ib_device *dev,
++                                       size_t size, void *cpu_addr,
++                                       u64 dma_handle);
++};
++#endif
++
+ struct ib_port_immutable {
+       int                           pkey_tbl_len;
+       int                           gid_tbl_len;
+@@ -2396,6 +2467,7 @@ struct ib_device_ops {
+               struct ib_flow_action *action,
+               const struct ib_flow_action_attrs_esp *attr,
+               struct uverbs_attr_bundle *attrs);
++#ifdef HAVE_NDO_SET_VF_MAC
+       int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
+                                int state);
+       int (*get_vf_config)(struct ib_device *device, int vf, u8 port,
+@@ -2404,6 +2476,7 @@ struct ib_device_ops {
+                           struct ifla_vf_stats *stats);
+       int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
+                          int type);
++#endif
+       struct ib_wq *(*create_wq)(struct ib_pd *pd,
+                                  struct ib_wq_init_attr *init_attr,
+                                  struct ib_udata *udata);
+@@ -2594,6 +2667,9 @@ struct ib_device {
+       struct rdma_restrack_root *res;
+       const struct uapi_definition   *driver_def;
++#ifndef HAVE_DEVICE_DMA_OPS
++      struct ib_dma_mapping_ops   *dma_ops;
++#endif
+       /*
+        * Positive refcount indicates that the device is currently
+@@ -3251,14 +3327,18 @@ static inline unsigned int rdma_find_pg_bit(unsigned long addr,
+       return __fls(pgsz);
+ }
++#ifdef HAVE_NDO_SET_VF_MAC
+ int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
+                        int state);
+ int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
+                    struct ifla_vf_info *info);
++#ifdef HAVE_NDO_GET_VF_STATS
+ int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
+                   struct ifla_vf_stats *stats);
++#endif
+ int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
+                  int type);
++#endif
+ int ib_query_pkey(struct ib_device *device,
+                 u8 port_num, u16 index, u16 *pkey);
+@@ -3863,6 +3943,10 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
+  */
+ static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
+ {
++#ifndef HAVE_DEVICE_DMA_OPS
++      if (dev->dma_ops)
++              return dev->dma_ops->mapping_error(dev, dma_addr);
++#endif
+       return dma_mapping_error(dev->dma_device, dma_addr);
+ }
+@@ -3877,6 +3961,10 @@ static inline u64 ib_dma_map_single(struct ib_device *dev,
+                                   void *cpu_addr, size_t size,
+                                   enum dma_data_direction direction)
+ {
++#ifndef HAVE_DEVICE_DMA_OPS
++      if (dev->dma_ops)
++              return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
++#endif
+       return dma_map_single(dev->dma_device, cpu_addr, size, direction);
+ }
+@@ -3891,6 +3979,11 @@ static inline void ib_dma_unmap_single(struct ib_device *dev,
+                                      u64 addr, size_t size,
+                                      enum dma_data_direction direction)
+ {
++#ifndef HAVE_DEVICE_DMA_OPS
++      if (dev->dma_ops)
++              dev->dma_ops->unmap_single(dev, addr, size, direction);
++      else
++#endif
+       dma_unmap_single(dev->dma_device, addr, size, direction);
+ }
+@@ -3908,6 +4001,10 @@ static inline u64 ib_dma_map_page(struct ib_device *dev,
+                                 size_t size,
+                                        enum dma_data_direction direction)
+ {
++#ifndef HAVE_DEVICE_DMA_OPS
++      if (dev->dma_ops)
++              return dev->dma_ops->map_page(dev, page, offset, size, direction);
++#endif
+       return dma_map_page(dev->dma_device, page, offset, size, direction);
+ }
+@@ -3922,6 +4019,11 @@ static inline void ib_dma_unmap_page(struct ib_device *dev,
+                                    u64 addr, size_t size,
+                                    enum dma_data_direction direction)
+ {
++#ifndef HAVE_DEVICE_DMA_OPS
++      if (dev->dma_ops)
++              dev->dma_ops->unmap_page(dev, addr, size, direction);
++      else
++#endif
+       dma_unmap_page(dev->dma_device, addr, size, direction);
+ }
+@@ -3936,6 +4038,10 @@ static inline int ib_dma_map_sg(struct ib_device *dev,
+                               struct scatterlist *sg, int nents,
+                               enum dma_data_direction direction)
+ {
++#ifndef HAVE_DEVICE_DMA_OPS
++      if (dev->dma_ops)
++              return dev->dma_ops->map_sg(dev, sg, nents, direction);
++#endif
+       return dma_map_sg(dev->dma_device, sg, nents, direction);
+ }
+@@ -3950,14 +4056,28 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev,
+                                  struct scatterlist *sg, int nents,
+                                  enum dma_data_direction direction)
+ {
++#ifndef HAVE_DEVICE_DMA_OPS
++      if (dev->dma_ops)
++              dev->dma_ops->unmap_sg(dev, sg, nents, direction);
++      else
++#endif
+       dma_unmap_sg(dev->dma_device, sg, nents, direction);
+ }
+ static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
+                                     struct scatterlist *sg, int nents,
+                                     enum dma_data_direction direction,
++#ifdef HAVE_STRUCT_DMA_ATTRS
++                                    struct dma_attrs *dma_attrs)
++#else
+                                     unsigned long dma_attrs)
++#endif
+ {
++#ifndef HAVE_DEVICE_DMA_OPS
++      if (dev->dma_ops)
++              return dev->dma_ops->map_sg_attrs(dev, sg, nents, direction,
++                                                dma_attrs);
++#endif
+       return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
+                               dma_attrs);
+ }
+@@ -3965,8 +4085,18 @@ static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
+ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
+                                        struct scatterlist *sg, int nents,
+                                        enum dma_data_direction direction,
++#ifdef HAVE_STRUCT_DMA_ATTRS
++                                      struct dma_attrs *dma_attrs)
++#else
+                                        unsigned long dma_attrs)
++#endif
+ {
++#ifndef HAVE_DEVICE_DMA_OPS
++      if (dev->dma_ops)
++              return dev->dma_ops->unmap_sg_attrs(dev, sg, nents, direction,
++                                                dma_attrs);
++      else
++#endif
+       dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
+ }
+@@ -3995,6 +4125,11 @@ static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
+                                             size_t size,
+                                             enum dma_data_direction dir)
+ {
++#ifndef HAVE_DEVICE_DMA_OPS
++      if (dev->dma_ops)
++              dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
++      else
++#endif
+       dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
+ }
+@@ -4010,6 +4145,11 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
+                                                size_t size,
+                                                enum dma_data_direction dir)
+ {
++#ifndef HAVE_DEVICE_DMA_OPS
++      if (dev->dma_ops)
++              dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
++      else
++#endif
+       dma_sync_single_for_device(dev->dma_device, addr, size, dir);
+ }
+@@ -4025,6 +4165,16 @@ static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
+                                          dma_addr_t *dma_handle,
+                                          gfp_t flag)
+ {
++#ifndef HAVE_DEVICE_DMA_OPS
++      if (dev->dma_ops) {
++              u64 handle;
++              void *ret;
++
++              ret = dev->dma_ops->alloc_coherent(dev, size, &handle, flag);
++              *dma_handle = handle;
++              return ret;
++      }
++#endif
+       return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
+ }
+@@ -4039,6 +4189,11 @@ static inline void ib_dma_free_coherent(struct ib_device *dev,
+                                       size_t size, void *cpu_addr,
+                                       dma_addr_t dma_handle)
+ {
++#ifndef HAVE_DEVICE_DMA_OPS
++      if (dev->dma_ops)
++              dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
++      else
++#endif
+       dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
+ }
+diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h
+index xxxxxxx..xxxxxxx 100644
+--- a/include/rdma/rdma_netlink.h
++++ b/include/rdma/rdma_netlink.h
+@@ -13,8 +13,12 @@ enum {
+ };
+ struct rdma_nl_cbs {
++#ifdef HAVE_NETLINK_EXT_ACK
+       int (*doit)(struct sk_buff *skb, struct nlmsghdr *nlh,
+                   struct netlink_ext_ack *extack);
++#else
++      int (*doit)(struct sk_buff *skb, struct nlmsghdr *nlh);
++#endif
+       int (*dump)(struct sk_buff *skb, struct netlink_callback *nlcb);
+       u8 flags;
+ };
+diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h
+index xxxxxxx..xxxxxxx 100644
+--- a/include/rdma/restrack.h
++++ b/include/rdma/restrack.h
+@@ -10,7 +10,10 @@
+ #include <linux/sched.h>
+ #include <linux/kref.h>
+ #include <linux/completion.h>
++#ifdef HAVE_LINUX_SCHED_TASK_H
+ #include <linux/sched/task.h>
++#endif
++#include <linux/hashtable.h>
+ #include <uapi/rdma/rdma_netlink.h>
+ #include <linux/xarray.h>
+@@ -90,6 +93,12 @@ struct rdma_restrack_entry {
+        * @kern_name: name of owner for the kernel created entities.
+        */
+       const char              *kern_name;
++#ifndef HAVE_XARRAY
++      /**
++       * @node: hash table entry
++       */
++      struct hlist_node       node;
++#endif
+       /**
+        * @type: various objects in restrack database
+        */
+diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h
+index xxxxxxx..xxxxxxx 100644
+--- a/include/rdma/uverbs_ioctl.h
++++ b/include/rdma/uverbs_ioctl.h
+@@ -872,6 +872,9 @@ int uverbs_get_flags32(u32 *to, const struct uverbs_attr_bundle *attrs_bundle,
+                      size_t idx, u64 allowed_bits);
+ int uverbs_copy_to(const struct uverbs_attr_bundle *attrs_bundle, size_t idx,
+                  const void *from, size_t size);
++#ifndef __malloc
++#define __malloc
++#endif
+ __malloc void *_uverbs_alloc(struct uverbs_attr_bundle *bundle, size_t size,
+                            gfp_t flags);
+diff --git a/include/trace/events/ib_mad.h b/include/trace/events/ib_mad.h
+index xxxxxxx..xxxxxxx 100644
+--- a/include/trace/events/ib_mad.h
++++ b/include/trace/events/ib_mad.h
+@@ -14,10 +14,17 @@
+ #include <rdma/ib_mad.h>
+ #ifdef CONFIG_TRACEPOINTS
++#ifdef HAVE_TRACE_EVENT_RAW_IB_MAD_SEND_TEMPLATE
+ struct trace_event_raw_ib_mad_send_template;
+ static void create_mad_addr_info(struct ib_mad_send_wr_private *mad_send_wr,
+                         struct ib_mad_qp_info *qp_info,
+                         struct trace_event_raw_ib_mad_send_template *entry);
++#else
++struct ftrace_raw_ib_mad_send_template;
++static void create_mad_addr_info(struct ib_mad_send_wr_private *mad_send_wr,
++                        struct ib_mad_qp_info *qp_info,
++                        struct ftrace_raw_ib_mad_send_template *entry);
++#endif
+ #endif
+ DECLARE_EVENT_CLASS(ib_mad_send_template,
diff --git a/patches/README b/patches/README
new file mode 100644 (file)
index 0000000..e11b4d4
--- /dev/null
@@ -0,0 +1,16 @@
+
+compat-rdma patches
+=======================
+
+You must have a really good reason to be adding files
+in this directory. Your reasoning should either match the
+explanation already present on the top of each patch file
+or you should add your own.
+
+We try to avoid having patch files because:
+
+  * Its a pain in the ass to maintain them.
+
+  * Most backport changes can be pulled off through
+    some macro magic or new files which implement
+    the new functionality on the old kernels.