]> git.openfabrics.org - ~aditr/compat-rdma.git/commitdiff
Added RHEL7.5 support for ib_core and IPoIB
authorVladimir Sokolovsky <vlad@mellanox.com>
Fri, 4 May 2018 21:17:24 +0000 (16:17 -0500)
committerVladimir Sokolovsky <vlad@mellanox.com>
Fri, 8 Jun 2018 21:30:11 +0000 (16:30 -0500)
Signed-off-by: Vladimir Sokolovsky <vlad@mellanox.com>
configure
patches/0002-BACKPORT-ib_core.patch [new file with mode: 0644]
patches/0003-BACKPORT-ipoib.patch [new file with mode: 0644]

index 1e9287e73cd036a88e246b711e2fd75ea63f0f7b..8d0d954fe5647d29ea5336454cdf8a461b58e90d 100755 (executable)
--- a/configure
+++ b/configure
@@ -492,6 +492,7 @@ main()
                                CONFIG_MLX5_INFINIBAND="m"
                                CONFIG_BACKPORT_LRO="m"
                                CONFIG_MLX5_DEBUG="y"
+                               CONFIG_MLX5_ESWITCH="y"
                                add_conf "# Load MLX5 modules" "MLX5_LOAD=yes"
                                ;;
                        --with-mlx4_core-mod)
@@ -504,11 +505,13 @@ main()
                                CONFIG_MLX5_CORE="m"
                                CONFIG_MLX5_CORE_EN="y"
                                CONFIG_MLX5_CORE_EN_DCB="y"
+                               CONFIG_MLX5_ESWITCH="y"
                                ;;
                        --without-mlx5_core-mod)
                                CONFIG_MLX5_CORE=
                                CONFIG_MLX5_CORE_EN=
                                CONFIG_MLX5_CORE_EN_DCB=
+                               CONFIG_MLX5_ESWITCH=
                                ;;
                        --without-mlx4-mod)
                                CONFIG_MLX4_CORE=
@@ -521,6 +524,7 @@ main()
                                CONFIG_MLX5_CORE_EN_DCB=
                                CONFIG_MLX5_INFINIBAND=
                                CONFIG_MLX5_DEBUG=
+                               CONFIG_MLX5_ESWITCH=
                                ;;
                        --with-mlx4_en-mod)
                                CONFIG_MLX4_CORE="m"
diff --git a/patches/0002-BACKPORT-ib_core.patch b/patches/0002-BACKPORT-ib_core.patch
new file mode 100644 (file)
index 0000000..55606dc
--- /dev/null
@@ -0,0 +1,1674 @@
+From: Vladimir Sokolovsky <vlad@mellanox.com>
+Subject: [PATCH] BACKPORT: ib_core
+
+Signed-off-by: Vladimir Sokolovsky <vlad@mellanox.com>
+---
+ drivers/infiniband/core/addr.c          |  15 ++++
+ drivers/infiniband/core/cma.c           |  27 ++++++
+ drivers/infiniband/core/cma_configfs.c  |  93 ++++++++++++++++++++
+ drivers/infiniband/core/core_priv.h     |   9 ++
+ drivers/infiniband/core/cq.c            |  35 +++++++-
+ drivers/infiniband/core/device.c        |  22 +++++
+ drivers/infiniband/core/fmr_pool.c      |  55 ++++++++++++
+ drivers/infiniband/core/mad.c           |   3 +
+ drivers/infiniband/core/netlink.c       |  29 +++++++
+ drivers/infiniband/core/nldev.c         |  25 ++++++
+ drivers/infiniband/core/restrack.c      |   4 +
+ drivers/infiniband/core/roce_gid_mgmt.c |  31 +++++++
+ drivers/infiniband/core/sa_query.c      |  47 ++++++++++
+ drivers/infiniband/core/ucm.c           |  16 ++++
+ drivers/infiniband/core/ucma.c          |  15 ++++
+ drivers/infiniband/core/umem.c          |  31 +++++++
+ drivers/infiniband/core/user_mad.c      |  20 +++++
+ drivers/infiniband/core/uverbs_main.c   |  33 +++++++
+ drivers/infiniband/core/verbs.c         |   4 +
+ include/rdma/ib_addr.h                  |  23 +++++
+ include/rdma/ib_verbs.h                 | 147 ++++++++++++++++++++++++++++++++
+ include/rdma/rdma_netlink.h             |   4 +
+ include/rdma/restrack.h                 |   2 +
+ 23 files changed, 689 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/infiniband/core/addr.c
++++ b/drivers/infiniband/core/addr.c
+@@ -130,11 +130,26 @@ static void ib_nl_process_good_ip_rsep(const struct nlmsghdr *nlh)
+ }
+ int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
++#ifdef HAVE_NETLINK_EXT_ACK
+                            struct nlmsghdr *nlh,
+                            struct netlink_ext_ack *extack)
+ {
++#else
++                           struct netlink_callback *cb)
++{
++      const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
++
++#endif
+       if ((nlh->nlmsg_flags & NLM_F_REQUEST) ||
++#ifdef HAVE_NETLINK_CAPABLE
++#ifdef HAVE_NETLINK_SKB_PARMS_SK
+           !(NETLINK_CB(skb).sk))
++#else
++          !(NETLINK_CB(skb).ssk))
++#endif
++#else
++          sock_net(skb->sk) != &init_net)
++#endif
+               return -EPERM;
+       if (ib_nl_is_good_ip_resp(nlh))
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -1280,7 +1280,11 @@ static bool validate_ipv4_net_dev(struct net_device *net_dev,
+       fl4.saddr = saddr;
+       rcu_read_lock();
++#ifdef HAVE_FIB_LOOKUP_4_PARAMS
+       err = fib_lookup(dev_net(net_dev), &fl4, &res, 0);
++#else
++      err = fib_lookup(dev_net(net_dev), &fl4, &res);
++#endif
+       ret = err == 0 && FIB_RES_DEV(res) == net_dev;
+       rcu_read_unlock();
+@@ -1296,7 +1300,11 @@ static bool validate_ipv6_net_dev(struct net_device *net_dev,
+                          IPV6_ADDR_LINKLOCAL;
+       struct rt6_info *rt = rt6_lookup(dev_net(net_dev), &dst_addr->sin6_addr,
+                                        &src_addr->sin6_addr, net_dev->ifindex,
++#ifdef HAVE_RT6_LOOKUP_6_PARAMS
+                                        NULL, strict);
++#else
++                                       strict);
++#endif
+       bool ret;
+       if (!rt)
+@@ -2593,6 +2601,7 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
+       return 0;
+ }
++#if defined(HAVE_VLAN_DEV_GET_EGRESS_QOS_MASK) && defined(HAVE_NETDEV_GET_PRIO_TC_MAP)
+ static int iboe_tos_to_sl(struct net_device *ndev, int tos)
+ {
+       int prio;
+@@ -2610,6 +2619,7 @@ static int iboe_tos_to_sl(struct net_device *ndev, int tos)
+ #endif
+       return 0;
+ }
++#endif
+ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
+ {
+@@ -2655,7 +2665,16 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
+       route->path_rec->reversible = 1;
+       route->path_rec->pkey = cpu_to_be16(0xffff);
+       route->path_rec->mtu_selector = IB_SA_EQ;
++#if defined(HAVE_VLAN_DEV_GET_EGRESS_QOS_MASK) && defined(HAVE_NETDEV_GET_PRIO_TC_MAP)
+       route->path_rec->sl = iboe_tos_to_sl(ndev, tos);
++#elif defined(HAVE_NETDEV_GET_PRIO_TC_MAP)
++      route->path_rec->sl = netdev_get_prio_tc_map(
++                      ndev->priv_flags & IFF_802_1Q_VLAN ?
++                              vlan_dev_real_dev(ndev) : ndev,
++                      rt_tos2priority(tos));
++#else
++      route->path_rec->sl = tos >> 5;
++#endif
+       route->path_rec->traffic_class = tos;
+       route->path_rec->mtu = iboe_get_mtu(ndev->mtu);
+       route->path_rec->rate_selector = IB_SA_EQ;
+@@ -3092,7 +3111,11 @@ static int cma_alloc_any_port(enum rdma_ucm_port_space ps,
+       unsigned int rover;
+       struct net *net = id_priv->id.route.addr.dev_addr.net;
++#ifdef HAVE_INET_GET_LOCAL_PORT_RANGE_3_PARAMS
+       inet_get_local_port_range(net, &low, &high);
++#else
++      inet_get_local_port_range(&low, &high);
++#endif
+       remaining = (high - low) + 1;
+       rover = prandom_u32() % remaining + low;
+ retry:
+@@ -4047,7 +4070,11 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
+                                               id_priv->id.port_num, &rec,
+                                               comp_mask, GFP_KERNEL,
+                                               cma_ib_mc_handler, mc);
++#ifdef HAVE_PTR_ERR_OR_ZERO
+       return PTR_ERR_OR_ZERO(mc->multicast.ib);
++#else
++      return PTR_RET(mc->multicast.ib);
++#endif
+ }
+ static void iboe_mcast_work_handler(struct work_struct *work)
+diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/infiniband/core/cma_configfs.c
++++ b/drivers/infiniband/core/cma_configfs.c
+@@ -35,6 +35,10 @@
+ #include <rdma/ib_verbs.h>
+ #include "core_priv.h"
++#ifndef CONFIGFS_ATTR
++#define HAVE_OLD_CONFIGFS_API
++#endif
++
+ struct cma_device;
+ struct cma_dev_group;
+@@ -52,6 +56,23 @@ struct cma_dev_group {
+       struct cma_dev_port_group       *ports;
+ };
++#ifdef HAVE_OLD_CONFIGFS_API
++struct cma_configfs_attr {
++      struct configfs_attribute       attr;
++      ssize_t                         (*show)(struct config_item *item,
++                                              char *buf);
++      ssize_t                         (*store)(struct config_item *item,
++                                               const char *buf, size_t count);
++};
++#define CONFIGFS_ATTR(dummy, _name)                           \
++static struct cma_configfs_attr attr_##_name =        \
++      __CONFIGFS_ATTR(_name, S_IRUGO | S_IWUSR, _name##_show, _name##_store)
++
++#define CONFIGFS_ATTR_ADD(name) &name.attr
++#else
++#define CONFIGFS_ATTR_ADD(name) &name
++#endif /* HAVE_OLD_CONFIGFS_API */
++
+ static struct cma_dev_port_group *to_dev_port_group(struct config_item *item)
+ {
+       struct config_group *group;
+@@ -68,6 +89,34 @@ static bool filter_by_name(struct ib_device *ib_dev, void *cookie)
+       return !strcmp(ib_dev->name, cookie);
+ }
++#ifdef HAVE_OLD_CONFIGFS_API
++static ssize_t cma_configfs_attr_show(struct config_item *item,
++                                    struct configfs_attribute *attr,
++                                    char *buf)
++{
++      struct cma_configfs_attr *ca =
++              container_of(attr, struct cma_configfs_attr, attr);
++
++      if (ca->show)
++              return ca->show(item, buf);
++
++      return -EINVAL;
++}
++
++static ssize_t cma_configfs_attr_store(struct config_item *item,
++                                     struct configfs_attribute *attr,
++                                     const char *buf, size_t count)
++{
++      struct cma_configfs_attr *ca =
++              container_of(attr, struct cma_configfs_attr, attr);
++
++      if (ca->store)
++              return ca->store(item, buf, count);
++
++      return -EINVAL;
++}
++#endif /* HAVE_OLD_CONFIGFS_API */
++
+ static int cma_configfs_params_get(struct config_item *item,
+                                  struct cma_device **pcma_dev,
+                                  struct cma_dev_port_group **pgroup)
+@@ -186,7 +235,11 @@ static struct configfs_attribute *cma_configfs_attributes[] = {
+       NULL,
+ };
++#ifdef CONFIG_GROUP_INIT_TYPE_NAME_PARAM_3_IS_CONST
+ static const struct config_item_type cma_port_group_type = {
++#else
++static struct config_item_type cma_port_group_type = {
++#endif
+       .ct_attrs       = cma_configfs_attributes,
+       .ct_owner       = THIS_MODULE
+ };
+@@ -214,6 +267,14 @@ static int make_cma_ports(struct cma_dev_group *cma_dev_group,
+               goto free;
+       }
++#ifndef HAVE_CONFIGFS_DEFAULT_GROUPS_LIST
++      cma_dev_group->ports_group.default_groups = kcalloc((ports_num + 1),
++                                                          sizeof(struct config_group *),
++                                                          GFP_KERNEL);
++      if (!cma_dev_group->ports_group.default_groups)
++              goto free;
++#endif
++
+       for (i = 0; i < ports_num; i++) {
+               char port_str[10];
+@@ -223,10 +284,17 @@ static int make_cma_ports(struct cma_dev_group *cma_dev_group,
+               config_group_init_type_name(&ports[i].group,
+                                           port_str,
+                                           &cma_port_group_type);
++#ifdef HAVE_CONFIGFS_DEFAULT_GROUPS_LIST
+               configfs_add_default_group(&ports[i].group,
+                               &cma_dev_group->ports_group);
++#else
++              cma_dev_group->ports_group.default_groups[i] = &ports[i].group;
++#endif
+       }
++#ifndef HAVE_CONFIGFS_DEFAULT_GROUPS_LIST
++      cma_dev_group->ports_group.default_groups[i] = NULL;
++#endif
+       cma_dev_group->ports = ports;
+       return 0;
+@@ -263,7 +331,11 @@ static struct configfs_item_operations cma_ports_item_ops = {
+       .release = release_cma_ports_group
+ };
++#ifdef CONFIG_GROUP_INIT_TYPE_NAME_PARAM_3_IS_CONST
+ static const struct config_item_type cma_ports_group_type = {
++#else
++static struct config_item_type cma_ports_group_type = {
++#endif
+       .ct_item_ops    = &cma_ports_item_ops,
+       .ct_owner       = THIS_MODULE
+ };
+@@ -272,7 +344,11 @@ static struct configfs_item_operations cma_device_item_ops = {
+       .release = release_cma_dev
+ };
++#ifdef CONFIG_GROUP_INIT_TYPE_NAME_PARAM_3_IS_CONST
+ static const struct config_item_type cma_device_group_type = {
++#else
++static struct config_item_type cma_device_group_type = {
++#endif
+       .ct_item_ops    = &cma_device_item_ops,
+       .ct_owner       = THIS_MODULE
+ };
+@@ -302,16 +378,29 @@ static struct config_group *make_cma_dev(struct config_group *group,
+       err = make_cma_ports(cma_dev_group, cma_dev);
+       if (err)
++#ifdef HAVE_CONFIGFS_DEFAULT_GROUPS_LIST
+               goto fail;
++#else
++              goto fail_free;
++#endif
+       config_group_init_type_name(&cma_dev_group->device_group, name,
+                                   &cma_device_group_type);
++#ifdef HAVE_CONFIGFS_DEFAULT_GROUPS_LIST
+       configfs_add_default_group(&cma_dev_group->ports_group,
+                       &cma_dev_group->device_group);
++#else
++      cma_dev_group->device_group.default_groups[0] = &cma_dev_group->ports_group;
++      cma_dev_group->device_group.default_groups[1] = NULL;
++#endif
+       cma_deref_dev(cma_dev);
+       return &cma_dev_group->device_group;
++#ifndef HAVE_CONFIGFS_DEFAULT_GROUPS_LIST
++fail_free:
++      kfree(cma_dev_group->device_group.default_groups);
++#endif
+ fail:
+       if (cma_dev)
+               cma_deref_dev(cma_dev);
+@@ -323,7 +412,11 @@ static struct configfs_group_operations cma_subsys_group_ops = {
+       .make_group     = make_cma_dev,
+ };
++#ifdef CONFIG_GROUP_INIT_TYPE_NAME_PARAM_3_IS_CONST
+ static const struct config_item_type cma_subsys_type = {
++#else
++static struct config_item_type cma_subsys_type = {
++#endif
+       .ct_group_ops   = &cma_subsys_group_ops,
+       .ct_owner       = THIS_MODULE,
+ };
+diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/infiniband/core/core_priv.h
++++ b/drivers/infiniband/core/core_priv.h
+@@ -194,6 +194,7 @@ void ib_sa_cleanup(void);
+ int rdma_nl_init(void);
+ void rdma_nl_exit(void);
++#ifdef HAVE_NETLINK_EXT_ACK
+ int ib_nl_handle_resolve_resp(struct sk_buff *skb,
+                             struct nlmsghdr *nlh,
+                             struct netlink_ext_ack *extack);
+@@ -203,6 +204,14 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb,
+ int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
+                            struct nlmsghdr *nlh,
+                            struct netlink_ext_ack *extack);
++#else
++int ib_nl_handle_resolve_resp(struct sk_buff *skb,
++                            struct netlink_callback *cb);
++int ib_nl_handle_set_timeout(struct sk_buff *skb,
++                            struct netlink_callback *cb);
++int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
++                            struct netlink_callback *cb);
++#endif
+ int ib_get_cached_subnet_prefix(struct ib_device *device,
+                               u8                port_num,
+diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/infiniband/core/cq.c
++++ b/drivers/infiniband/core/cq.c
+@@ -83,6 +83,7 @@ static void ib_cq_completion_direct(struct ib_cq *cq, void *private)
+       WARN_ONCE(1, "got unsolicited completion for CQ 0x%p\n", cq);
+ }
++#if defined(HAVE_IRQ_POLL_H) && IS_ENABLED(CONFIG_IRQ_POLL)
+ static int ib_poll_handler(struct irq_poll *iop, int budget)
+ {
+       struct ib_cq *cq = container_of(iop, struct ib_cq, iop);
+@@ -102,6 +103,30 @@ static void ib_cq_completion_softirq(struct ib_cq *cq, void *private)
+ {
+       irq_poll_sched(&cq->iop);
+ }
++#else
++static int ib_poll_handler(struct blk_iopoll *iop, int budget)
++{
++      struct ib_cq *cq = container_of(iop, struct ib_cq, iop);
++      int completed;
++
++      completed = __ib_process_cq(cq, budget);
++      if (completed < budget) {
++              blk_iopoll_complete(&cq->iop);
++              if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) {
++                      if (!blk_iopoll_sched_prep(&cq->iop))
++                              blk_iopoll_sched(&cq->iop);
++              }
++      }
++
++      return completed;
++}
++
++static void ib_cq_completion_softirq(struct ib_cq *cq, void *private)
++{
++      if (!blk_iopoll_sched_prep(&cq->iop))
++              blk_iopoll_sched(&cq->iop);
++}
++#endif
+ static void ib_cq_poll_work(struct work_struct *work)
+ {
+@@ -170,8 +195,12 @@ struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
+               break;
+       case IB_POLL_SOFTIRQ:
+               cq->comp_handler = ib_cq_completion_softirq;
+-
++#if defined(HAVE_IRQ_POLL_H) && IS_ENABLED(CONFIG_IRQ_POLL)
+               irq_poll_init(&cq->iop, IB_POLL_BUDGET_IRQ, ib_poll_handler);
++#else
++              blk_iopoll_init(&cq->iop, IB_POLL_BUDGET_IRQ, ib_poll_handler);
++              blk_iopoll_enable(&cq->iop);
++#endif
+               ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+               break;
+       case IB_POLL_WORKQUEUE:
+@@ -210,7 +239,11 @@ void ib_free_cq(struct ib_cq *cq)
+       case IB_POLL_DIRECT:
+               break;
+       case IB_POLL_SOFTIRQ:
++#if defined(HAVE_IRQ_POLL_H) && IS_ENABLED(CONFIG_IRQ_POLL)
+               irq_poll_disable(&cq->iop);
++#else
++              blk_iopoll_disable(&cq->iop);
++#endif
+               break;
+       case IB_POLL_WORKQUEUE:
+               cancel_work_sync(&cq->work);
+diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/infiniband/core/device.c
++++ b/drivers/infiniband/core/device.c
+@@ -462,6 +462,7 @@ int ib_register_device(struct ib_device *device,
+       int ret;
+       struct ib_client *client;
+       struct ib_udata uhw = {.outlen = 0, .inlen = 0};
++#ifdef HAVE_DEVICE_DMA_OPS
+       struct device *parent = device->dev.parent;
+       WARN_ON_ONCE(device->dma_device);
+@@ -493,6 +494,15 @@ int ib_register_device(struct ib_device *device,
+               WARN_ON_ONCE(!parent);
+               device->dma_device = parent;
+       }
++#else /* HAVE_DEVICE_DMA_OPS */
++      WARN_ON_ONCE(!device->dev.parent && !device->dma_device);
++      WARN_ON_ONCE(device->dev.parent && device->dma_device
++                   && device->dev.parent != device->dma_device);
++      if (!device->dev.parent)
++              device->dev.parent = device->dma_device;
++      if (!device->dma_device)
++              device->dma_device = device->dev.parent;
++#endif /* HAVE_DEVICE_DMA_OPS */
+       mutex_lock(&device_mutex);
+@@ -1159,15 +1169,27 @@ EXPORT_SYMBOL(ib_get_net_dev_by_params);
+ static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
+       [RDMA_NL_LS_OP_RESOLVE] = {
++#ifdef HAVE_NETLINK_EXT_ACK
+               .doit = ib_nl_handle_resolve_resp,
++#else
++              .dump = ib_nl_handle_resolve_resp,
++#endif
+               .flags = RDMA_NL_ADMIN_PERM,
+       },
+       [RDMA_NL_LS_OP_SET_TIMEOUT] = {
++#ifdef HAVE_NETLINK_EXT_ACK
+               .doit = ib_nl_handle_set_timeout,
++#else
++              .dump = ib_nl_handle_set_timeout,
++#endif
+               .flags = RDMA_NL_ADMIN_PERM,
+       },
+       [RDMA_NL_LS_OP_IP_RESOLVE] = {
++#ifdef HAVE_NETLINK_EXT_ACK
+               .doit = ib_nl_handle_ip_res_resp,
++#else
++              .dump = ib_nl_handle_ip_res_resp,
++#endif
+               .flags = RDMA_NL_ADMIN_PERM,
+       },
+ };
+diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/infiniband/core/fmr_pool.c
++++ b/drivers/infiniband/core/fmr_pool.c
+@@ -96,8 +96,12 @@ struct ib_fmr_pool {
+                                                  void *              arg);
+       void                     *flush_arg;
++#ifdef HAVE_KTHREAD_QUEUE_WORK
+       struct kthread_worker     *worker;
+       struct kthread_work       work;
++#else
++      struct task_struct       *thread;
++#endif
+       atomic_t                  req_ser;
+       atomic_t                  flush_ser;
+@@ -175,6 +179,7 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
+       spin_unlock_irq(&pool->pool_lock);
+ }
++#ifdef HAVE_KTHREAD_QUEUE_WORK
+ static void ib_fmr_cleanup_func(struct kthread_work *work)
+ {
+       struct ib_fmr_pool *pool = container_of(work, struct ib_fmr_pool, work);
+@@ -189,6 +194,32 @@ static void ib_fmr_cleanup_func(struct kthread_work *work)
+       if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0)
+               kthread_queue_work(pool->worker, &pool->work);
+ }
++#else /* HAVE_KTHREAD_QUEUE_WORK */
++static int ib_fmr_cleanup_thread(void *pool_ptr)
++{
++      struct ib_fmr_pool *pool = pool_ptr;
++
++      do {
++              if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
++                      ib_fmr_batch_release(pool);
++
++                      atomic_inc(&pool->flush_ser);
++                      wake_up_interruptible(&pool->force_wait);
++
++                      if (pool->flush_function)
++                              pool->flush_function(pool, pool->flush_arg);
++              }
++
++              set_current_state(TASK_INTERRUPTIBLE);
++              if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
++                  !kthread_should_stop())
++                      schedule();
++              __set_current_state(TASK_RUNNING);
++      } while (!kthread_should_stop());
++
++      return 0;
++}
++#endif /* HAVE_KTHREAD_QUEUE_WORK */
+ /**
+  * ib_create_fmr_pool - Create an FMR pool
+@@ -256,6 +287,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd             *pd,
+       atomic_set(&pool->flush_ser, 0);
+       init_waitqueue_head(&pool->force_wait);
++#ifdef HAVE_KTHREAD_QUEUE_WORK
+       pool->worker = kthread_create_worker(0, "ib_fmr(%s)", device->name);
+       if (IS_ERR(pool->worker)) {
+               pr_warn(PFX "couldn't start cleanup kthread worker\n");
+@@ -263,6 +295,17 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd             *pd,
+               goto out_free_pool;
+       }
+       kthread_init_work(&pool->work, ib_fmr_cleanup_func);
++#else
++      pool->thread = kthread_run(ib_fmr_cleanup_thread,
++                                 pool,
++                                 "ib_fmr(%s)",
++                                 device->name);
++      if (IS_ERR(pool->thread)) {
++              pr_warn(PFX "couldn't start cleanup thread\n");
++              ret = PTR_ERR(pool->thread);
++              goto out_free_pool;
++      }
++#endif
+       {
+               struct ib_pool_fmr *fmr;
+@@ -327,7 +370,11 @@ void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
+       LIST_HEAD(fmr_list);
+       int                 i;
++#ifdef HAVE_KTHREAD_QUEUE_WORK
+       kthread_destroy_worker(pool->worker);
++#else
++      kthread_stop(pool->thread);
++#endif
+       ib_fmr_batch_release(pool);
+       i = 0;
+@@ -377,7 +424,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
+       spin_unlock_irq(&pool->pool_lock);
+       serial = atomic_inc_return(&pool->req_ser);
++#ifdef HAVE_KTHREAD_QUEUE_WORK
+       kthread_queue_work(pool->worker, &pool->work);
++#else
++      wake_up_process(pool->thread);
++#endif
+       if (wait_event_interruptible(pool->force_wait,
+                                    atomic_read(&pool->flush_ser) - serial >= 0))
+@@ -489,7 +540,11 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
+                       list_add_tail(&fmr->list, &pool->dirty_list);
+                       if (++pool->dirty_len >= pool->dirty_watermark) {
+                               atomic_inc(&pool->req_ser);
++#ifdef HAVE_KTHREAD_QUEUE_WORK
+                               kthread_queue_work(pool->worker, &pool->work);
++#else
++                              wake_up_process(pool->thread);
++#endif
+                       }
+               }
+       }
+diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/infiniband/core/mad.c
++++ b/drivers/infiniband/core/mad.c
+@@ -35,6 +35,9 @@
+  *
+  */
++#ifdef pr_fmt
++#undef pr_fmt
++#endif
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ #include <linux/dma-mapping.h>
+diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/infiniband/core/netlink.c
++++ b/drivers/infiniband/core/netlink.c
+@@ -31,6 +31,9 @@
+  * SOFTWARE.
+  */
++#ifdef pr_fmt
++#undef pr_fmt
++#endif
+ #define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__
+ #include <linux/export.h>
+@@ -154,8 +157,12 @@ int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh,
+ }
+ EXPORT_SYMBOL(ibnl_put_attr);
++#ifdef HAVE_NETLINK_EXT_ACK
+ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
+                          struct netlink_ext_ack *extack)
++#else
++static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
++#endif
+ {
+       int type = nlh->nlmsg_type;
+       unsigned int index = RDMA_NL_GET_CLIENT(type);
+@@ -177,7 +184,11 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
+        */
+       if (index == RDMA_NL_LS) {
+               if (cb_table[op].doit)
++#ifdef HAVE_NETLINK_EXT_ACK
+                       return cb_table[op].doit(skb, nlh, extack);
++#else
++                      return cb_table[op].doit(skb, nlh);
++#endif
+               return -EINVAL;
+       }
+       /* FIXME: Convert IWCM to properly handle doit callbacks */
+@@ -192,7 +203,11 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
+       }
+       if (cb_table[op].doit)
++#ifdef HAVE_NETLINK_EXT_ACK
+               return cb_table[op].doit(skb, nlh, extack);
++#else
++              return cb_table[op].doit(skb, nlh);
++#endif
+       return 0;
+ }
+@@ -204,10 +219,16 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
+  * for that consumer only.
+  */
+ static int rdma_nl_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
++#ifdef HAVE_NETLINK_EXT_ACK
+                                                  struct nlmsghdr *,
+                                                  struct netlink_ext_ack *))
++#else
++                                                 struct nlmsghdr *))
++#endif
+ {
++#ifdef HAVE_NETLINK_EXT_ACK
+       struct netlink_ext_ack extack = {};
++#endif
+       struct nlmsghdr *nlh;
+       int err;
+@@ -235,13 +256,21 @@ static int rdma_nl_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
+               if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
+                       goto ack;
++#ifdef HAVE_NETLINK_EXT_ACK
+               err = cb(skb, nlh, &extack);
++#else
++              err = cb(skb, nlh);
++#endif
+               if (err == -EINTR)
+                       goto skip;
+ ack:
+               if (nlh->nlmsg_flags & NLM_F_ACK || err)
++#ifdef HAVE_NETLINK_EXT_ACK
+                       netlink_ack(skb, nlh, err, &extack);
++#else
++                      netlink_ack(skb, nlh, err);
++#endif
+ skip:
+               msglen = NLMSG_ALIGN(nlh->nlmsg_len);
+diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/infiniband/core/nldev.c
++++ b/drivers/infiniband/core/nldev.c
+@@ -503,8 +503,12 @@ out:
+       return -EMSGSIZE;
+ }
++#ifdef HAVE_NETLINK_EXT_ACK
+ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+                         struct netlink_ext_ack *extack)
++#else
++static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
++#endif
+ {
+       struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+       struct ib_device *device;
+@@ -513,7 +517,11 @@ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+       int err;
+       err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
++#ifdef HAVE_NETLINK_EXT_ACK
+                         nldev_policy, extack);
++#else
++                        nldev_policy, NULL);
++#endif
+       if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
+               return -EINVAL;
+@@ -586,8 +594,12 @@ static int nldev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+       return ib_enum_all_devs(_nldev_get_dumpit, skb, cb);
+ }
++#ifdef HAVE_NETLINK_EXT_ACK
+ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+                              struct netlink_ext_ack *extack)
++#else
++static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
++#endif
+ {
+       struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+       struct ib_device *device;
+@@ -597,7 +609,11 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+       int err;
+       err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
++#ifdef HAVE_NETLINK_EXT_ACK
+                         nldev_policy, extack);
++#else
++                        nldev_policy, NULL);
++#endif
+       if (err ||
+           !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
+           !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
+@@ -698,8 +714,12 @@ out:
+       return skb->len;
+ }
++#ifdef HAVE_NETLINK_EXT_ACK
+ static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+                             struct netlink_ext_ack *extack)
++#else
++static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
++#endif
+ {
+       struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+       struct ib_device *device;
+@@ -708,7 +728,12 @@ static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+       int ret;
+       ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
++#ifdef HAVE_NETLINK_EXT_ACK
+                         nldev_policy, extack);
++#else
++                          nldev_policy, NULL);
++#endif
++
+       if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
+               return -EINVAL;
+diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/infiniband/core/restrack.c
++++ b/drivers/infiniband/core/restrack.c
+@@ -12,6 +12,10 @@
+ #include "cma_priv.h"
++#ifndef CUT_HERE
++#define CUT_HERE               "------------[ cut here ]------------\n"
++#endif
++
+ void rdma_restrack_init(struct rdma_restrack_root *res)
+ {
+       init_rwsem(&res->rwsem);
+diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/infiniband/core/roce_gid_mgmt.c
++++ b/drivers/infiniband/core/roce_gid_mgmt.c
+@@ -132,12 +132,17 @@ static enum bonding_slave_state is_eth_active_slave_of_bonding_rcu(struct net_de
+                                                                  struct net_device *upper)
+ {
+       if (upper && netif_is_bond_master(upper)) {
++#ifdef HAVE_BONDING_H
+               struct net_device *pdev =
+                       bond_option_active_slave_get_rcu(netdev_priv(upper));
+               if (pdev)
+                       return dev == pdev ? BONDING_SLAVE_STATE_ACTIVE :
+                               BONDING_SLAVE_STATE_INACTIVE;
++#else
++      return memcmp(upper->dev_addr, dev->dev_addr, ETH_ALEN) ?
++              BONDING_SLAVE_STATE_INACTIVE : BONDING_SLAVE_STATE_ACTIVE;
++#endif
+       }
+       return BONDING_SLAVE_STATE_NA;
+@@ -405,12 +410,16 @@ static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev,
+        * our feet
+        */
+       rtnl_lock();
++#ifdef HAVE_NET_RWSEM
+       down_read(&net_rwsem);
++#endif
+       for_each_net(net)
+               for_each_netdev(net, ndev)
+                       if (is_eth_port_of_netdev(ib_dev, port, rdma_ndev, ndev))
+                               add_netdev_ips(ib_dev, port, rdma_ndev, ndev);
++#ifdef HAVE_NET_RWSEM
+       up_read(&net_rwsem);
++#endif
+       rtnl_unlock();
+ }
+@@ -459,6 +468,7 @@ static int netdev_upper_walk(struct net_device *upper, void *data)
+       return 0;
+ }
++#ifdef HAVE_NETDEV_NOTIFIER_CHANGEUPPER_INFO
+ static void handle_netdev_upper(struct ib_device *ib_dev, u8 port,
+                               void *cookie,
+                               void (*handle_netdev)(struct ib_device *ib_dev,
+@@ -501,6 +511,7 @@ static void add_netdev_upper_ips(struct ib_device *ib_dev, u8 port,
+ {
+       handle_netdev_upper(ib_dev, port, cookie, _add_netdev_ips);
+ }
++#endif
+ static void del_netdev_default_ips_join(struct ib_device *ib_dev, u8 port,
+                                       struct net_device *rdma_ndev,
+@@ -578,6 +589,8 @@ static int netdevice_queue_work(struct netdev_event_work_cmd *cmds,
+ static const struct netdev_event_work_cmd add_cmd = {
+       .cb = add_netdev_ips, .filter = is_eth_port_of_netdev};
++
++#ifdef HAVE_NETDEV_NOTIFIER_CHANGEUPPER_INFO
+ static const struct netdev_event_work_cmd add_cmd_upper_ips = {
+       .cb = add_netdev_upper_ips, .filter = is_eth_port_of_netdev};
+@@ -601,10 +614,15 @@ static void netdevice_event_changeupper(struct netdev_notifier_changeupper_info
+               cmds[1].filter_ndev = changeupper_info->upper_dev;
+       }
+ }
++#endif
+ static int netdevice_event(struct notifier_block *this, unsigned long event,
+                          void *ptr)
+ {
++#ifndef HAVE_NETDEV_NOTIFIER_CHANGEUPPER_INFO
++      static const struct netdev_event_work_cmd add_cmd = {
++              .cb = add_netdev_ips, .filter = is_eth_port_of_netdev};
++#endif
+       static const struct netdev_event_work_cmd del_cmd = {
+               .cb = del_netdev_ips, .filter = pass_all_filter};
+       static const struct netdev_event_work_cmd bonding_default_del_cmd_join = {
+@@ -612,7 +630,11 @@ static int netdevice_event(struct notifier_block *this, unsigned long event,
+       static const struct netdev_event_work_cmd default_del_cmd = {
+               .cb = del_netdev_default_ips, .filter = pass_all_filter};
+       static const struct netdev_event_work_cmd bonding_event_ips_del_cmd = {
++#ifdef HAVE_NETDEV_NOTIFIER_CHANGEUPPER_INFO
+               .cb = del_netdev_upper_ips, .filter = upper_device_filter};
++#else
++      .cb = del_netdev_ips, .filter = upper_device_filter};
++#endif
+       struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+       struct netdev_event_work_cmd cmds[ROCE_NETDEV_CALLBACK_SZ] = { {NULL} };
+@@ -622,6 +644,9 @@ static int netdevice_event(struct notifier_block *this, unsigned long event,
+       switch (event) {
+       case NETDEV_REGISTER:
+       case NETDEV_UP:
++#ifndef HAVE_NETDEV_NOTIFIER_CHANGEUPPER_INFO
++      case NETDEV_JOIN:
++#endif
+               cmds[0] = bonding_default_del_cmd_join;
+               cmds[1] = add_cmd;
+               break;
+@@ -638,16 +663,22 @@ static int netdevice_event(struct notifier_block *this, unsigned long event,
+               cmds[1] = add_cmd;
+               break;
++#ifdef HAVE_NETDEV_NOTIFIER_CHANGEUPPER_INFO
+       case NETDEV_CHANGEUPPER:
+               netdevice_event_changeupper(
+                       container_of(ptr, struct netdev_notifier_changeupper_info, info),
+                       cmds);
+               break;
++#endif
+       case NETDEV_BONDING_FAILOVER:
+               cmds[0] = bonding_event_ips_del_cmd;
+               cmds[1] = bonding_default_del_cmd_join;
++#ifdef HAVE_NETDEV_NOTIFIER_CHANGEUPPER_INFO
+               cmds[2] = add_cmd_upper_ips;
++#else
++              cmds[2] = add_cmd;
++#endif
+               break;
+       default:
+diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/infiniband/core/sa_query.c
++++ b/drivers/infiniband/core/sa_query.c
+@@ -1022,9 +1022,15 @@ static void ib_nl_request_timeout(struct work_struct *work)
+ }
+ int ib_nl_handle_set_timeout(struct sk_buff *skb,
++#ifdef HAVE_NETLINK_EXT_ACK
+                            struct nlmsghdr *nlh,
+                            struct netlink_ext_ack *extack)
+ {
++#else
++                           struct netlink_callback *cb)
++{
++      const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
++#endif
+       int timeout, delta, abs_delta;
+       const struct nlattr *attr;
+       unsigned long flags;
+@@ -1034,7 +1040,15 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb,
+       int ret;
+       if (!(nlh->nlmsg_flags & NLM_F_REQUEST) ||
++#ifdef HAVE_NETLINK_CAPABLE
++#ifdef HAVE_NETLINK_SKB_PARMS_SK
+           !(NETLINK_CB(skb).sk))
++#else
++          !(NETLINK_CB(skb).ssk))
++#endif
++#else
++          sock_net(skb->sk) != &init_net)
++#endif
+               return -EPERM;
+       ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
+@@ -1098,9 +1112,15 @@ static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
+ }
+ int ib_nl_handle_resolve_resp(struct sk_buff *skb,
++#ifdef HAVE_NETLINK_EXT_ACK
+                             struct nlmsghdr *nlh,
+                             struct netlink_ext_ack *extack)
+ {
++#else
++                            struct netlink_callback *cb)
++{
++      const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
++#endif
+       unsigned long flags;
+       struct ib_sa_query *query;
+       struct ib_mad_send_buf *send_buf;
+@@ -1109,7 +1129,15 @@ int ib_nl_handle_resolve_resp(struct sk_buff *skb,
+       int ret;
+       if ((nlh->nlmsg_flags & NLM_F_REQUEST) ||
++#ifdef HAVE_NETLINK_CAPABLE
++#ifdef HAVE_NETLINK_SKB_PARMS_SK
+           !(NETLINK_CB(skb).sk))
++#else
++          !(NETLINK_CB(skb).ssk)) 
++#endif
++#else
++          sock_net(skb->sk) != &init_net)
++#endif
+               return -EPERM;
+       spin_lock_irqsave(&ib_nl_request_lock, flags);
+@@ -1423,10 +1451,17 @@ static void init_mad(struct ib_sa_query *query, struct ib_mad_agent *agent)
+ static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
+ {
++#ifdef HAVE_IDR_ALLOC
++#ifdef __GFP_WAIT
++      bool preload = !!(gfp_mask & __GFP_WAIT);
++#else
+       bool preload = gfpflags_allow_blocking(gfp_mask);
++#endif
++#endif
+       unsigned long flags;
+       int ret, id;
++#ifdef HAVE_IDR_ALLOC
+       if (preload)
+               idr_preload(gfp_mask);
+       spin_lock_irqsave(&idr_lock, flags);
+@@ -1438,6 +1473,18 @@ static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
+               idr_preload_end();
+       if (id < 0)
+               return id;
++#else
++retry:
++      if (!idr_pre_get(&query_idr, gfp_mask))
++              return -ENOMEM;
++      spin_lock_irqsave(&idr_lock, flags);
++      ret = idr_get_new(&query_idr, query, &id);
++      spin_unlock_irqrestore(&idr_lock, flags);
++      if (ret == -EAGAIN)
++              goto retry;
++      if (ret)
++              return ret;
++#endif
+       query->mad_buf->timeout_ms  = timeout_ms;
+       query->mad_buf->context[0] = query;
+diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/infiniband/core/ucm.c
++++ b/drivers/infiniband/core/ucm.c
+@@ -1135,6 +1135,7 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
+       return result;
+ }
++#ifdef EPOLLIN
+ static __poll_t ib_ucm_poll(struct file *filp,
+                               struct poll_table_struct *wait)
+ {
+@@ -1148,6 +1149,21 @@ static __poll_t ib_ucm_poll(struct file *filp,
+       return mask;
+ }
++#else
++static unsigned int ib_ucm_poll(struct file *filp,
++                              struct poll_table_struct *wait)
++{
++      struct ib_ucm_file *file = filp->private_data;
++      unsigned int mask = 0;
++
++      poll_wait(filp, &file->poll_wait, wait);
++
++      if (!list_empty(&file->events))
++              mask = POLLIN | POLLRDNORM;
++
++      return mask;
++}
++#endif
+ /*
+  * ib_ucm_open() does not need the BKL:
+diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/infiniband/core/ucma.c
++++ b/drivers/infiniband/core/ucma.c
+@@ -1680,6 +1680,7 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
+       return ret;
+ }
++#ifdef EPOLLIN
+ static __poll_t ucma_poll(struct file *filp, struct poll_table_struct *wait)
+ {
+       struct ucma_file *file = filp->private_data;
+@@ -1692,6 +1693,20 @@ static __poll_t ucma_poll(struct file *filp, struct poll_table_struct *wait)
+       return mask;
+ }
++#else
++static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
++{
++      struct ucma_file *file = filp->private_data;
++      unsigned int mask = 0;
++
++      poll_wait(filp, &file->poll_wait, wait);
++
++      if (!list_empty(&file->event_list))
++              mask = POLLIN | POLLRDNORM;
++
++      return mask;
++}
++#endif
+ /*
+  * ucma_open() does not need the BKL:
+diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/infiniband/core/umem.c
++++ b/drivers/infiniband/core/umem.c
+@@ -92,13 +92,23 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
+       unsigned long npages;
+       int ret;
+       int i;
++#ifdef HAVE_STRUCT_DMA_ATTRS
++      DEFINE_DMA_ATTRS(dma_attrs);
++#else
+       unsigned long dma_attrs = 0;
++#endif
+       struct scatterlist *sg, *sg_list_start;
+       int need_release = 0;
++#ifdef HAVE_GET_USER_PAGES_GUP_FLAGS
+       unsigned int gup_flags = FOLL_WRITE;
++#endif
+       if (dmasync)
++#ifdef HAVE_STRUCT_DMA_ATTRS
++              dma_set_attr(DMA_ATTR_WRITE_BARRIER, &dma_attrs);
++#else
+               dma_attrs |= DMA_ATTR_WRITE_BARRIER;
++#endif
+       /*
+        * If the combination of the addr and size requested for this memory
+@@ -184,17 +194,34 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
+       if (ret)
+               goto out;
++#ifdef HAVE_GET_USER_PAGES_GUP_FLAGS
+       if (!umem->writable)
+               gup_flags |= FOLL_FORCE;
++#endif
+       need_release = 1;
+       sg_list_start = umem->sg_head.sgl;
+       while (npages) {
++#ifdef HAVE_GET_USER_PAGES_8_PARAMS
++              ret = get_user_pages(current, current->mm, cur_base,
++                                   min_t(unsigned long, npages,
++                                         PAGE_SIZE / sizeof (struct page *)),
++                                   1, !umem->writable, page_list, vma_list);
++#else
++#ifdef HAVE_GET_USER_PAGES_LONGTERM
+               ret = get_user_pages_longterm(cur_base,
++#else
++              ret = get_user_pages(cur_base,
++#endif
+                                    min_t(unsigned long, npages,
+                                          PAGE_SIZE / sizeof (struct page *)),
++#ifdef HAVE_GET_USER_PAGES_GUP_FLAGS
+                                    gup_flags, page_list, vma_list);
++#else
++                                   1, !umem->writable, page_list, vma_list);
++#endif
++#endif
+               if (ret < 0)
+                       goto out;
+@@ -218,7 +245,11 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
+                                 umem->sg_head.sgl,
+                                 umem->npages,
+                                 DMA_BIDIRECTIONAL,
++#ifdef HAVE_STRUCT_DMA_ATTRS
++                                &dma_attrs);
++#else
+                                 dma_attrs);
++#endif
+       if (umem->nmap <= 0) {
+               ret = -ENOMEM;
+diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/infiniband/core/user_mad.c
++++ b/drivers/infiniband/core/user_mad.c
+@@ -33,6 +33,9 @@
+  * SOFTWARE.
+  */
++#ifdef pr_fmt
++#undef pr_fmt
++#endif
+ #define pr_fmt(fmt) "user_mad: " fmt
+ #include <linux/module.h>
+@@ -628,6 +631,7 @@ err:
+       return ret;
+ }
++#ifdef EPOLLIN
+ static __poll_t ib_umad_poll(struct file *filp, struct poll_table_struct *wait)
+ {
+       struct ib_umad_file *file = filp->private_data;
+@@ -642,6 +646,22 @@ static __poll_t ib_umad_poll(struct file *filp, struct poll_table_struct *wait)
+       return mask;
+ }
++#else
++static unsigned int ib_umad_poll(struct file *filp, struct poll_table_struct *wait)
++{
++      struct ib_umad_file *file = filp->private_data;
++
++      /* we will always be able to post a MAD send */
++      unsigned int mask = POLLOUT | POLLWRNORM;
++
++      poll_wait(filp, &file->recv_wait, wait);
++
++      if (!list_empty(&file->recv_list))
++              mask |= POLLIN | POLLRDNORM;
++
++      return mask;
++}
++#endif
+ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg,
+                            int compat_method_mask)
+diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/infiniband/core/uverbs_main.c
++++ b/drivers/infiniband/core/uverbs_main.c
+@@ -341,6 +341,7 @@ static ssize_t ib_uverbs_comp_event_read(struct file *filp, char __user *buf,
+                                   sizeof(struct ib_uverbs_comp_event_desc));
+ }
++#ifdef EPOLLIN
+ static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue,
+                                        struct file *filp,
+                                        struct poll_table_struct *wait)
+@@ -371,6 +372,38 @@ static __poll_t ib_uverbs_comp_event_poll(struct file *filp,
+       return ib_uverbs_event_poll(&comp_ev_file->ev_queue, filp, wait);
+ }
++#else
++static unsigned int ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue,
++                                       struct file *filp,
++                                       struct poll_table_struct *wait)
++{
++      unsigned int pollflags = 0;
++
++      poll_wait(filp, &ev_queue->poll_wait, wait);
++
++      spin_lock_irq(&ev_queue->lock);
++      if (!list_empty(&ev_queue->event_list))
++              pollflags = POLLIN | POLLRDNORM;
++      spin_unlock_irq(&ev_queue->lock);
++
++      return pollflags;
++}
++
++static unsigned int ib_uverbs_async_event_poll(struct file *filp,
++                                             struct poll_table_struct *wait)
++{
++      return ib_uverbs_event_poll(filp->private_data, filp, wait);
++}
++
++static unsigned int ib_uverbs_comp_event_poll(struct file *filp,
++                                            struct poll_table_struct *wait)
++{
++      struct ib_uverbs_completion_event_file *comp_ev_file =
++              filp->private_data;
++
++      return ib_uverbs_event_poll(&comp_ev_file->ev_queue, filp, wait);
++}
++#endif
+ static int ib_uverbs_async_event_fasync(int fd, struct file *filp, int on)
+ {
+diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/infiniband/core/verbs.c
++++ b/drivers/infiniband/core/verbs.c
+@@ -2032,6 +2032,7 @@ int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
+ }
+ EXPORT_SYMBOL(ib_get_vf_config);
++#ifdef HAVE_NDO_GET_VF_STATS
+ int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
+                   struct ifla_vf_stats *stats)
+ {
+@@ -2041,7 +2042,9 @@ int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
+       return device->get_vf_stats(device, vf, port, stats);
+ }
+ EXPORT_SYMBOL(ib_get_vf_stats);
++#endif
++#ifdef HAVE_NDO_SET_VF_GUID
+ int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
+                  int type)
+ {
+@@ -2051,6 +2054,7 @@ int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
+       return device->set_vf_guid(device, vf, port, guid, type);
+ }
+ EXPORT_SYMBOL(ib_set_vf_guid);
++#endif
+ /**
+  * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list
+diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/include/rdma/ib_addr.h
++++ b/include/rdma/ib_addr.h
+@@ -241,15 +241,25 @@ static inline enum ib_mtu iboe_get_mtu(int mtu)
+ static inline int iboe_get_rate(struct net_device *dev)
+ {
++#ifdef HAVE___ETHTOOL_GET_LINK_KSETTINGS
+       struct ethtool_link_ksettings cmd;
++#else
++      struct ethtool_cmd cmd;
++      u32 speed;
++#endif
+       int err;
+       rtnl_lock();
++#ifdef HAVE___ETHTOOL_GET_LINK_KSETTINGS
+       err = __ethtool_get_link_ksettings(dev, &cmd);
++#else
++      err = __ethtool_get_settings(dev, &cmd);
++#endif
+       rtnl_unlock();
+       if (err)
+               return IB_RATE_PORT_CURRENT;
++#ifdef HAVE___ETHTOOL_GET_LINK_KSETTINGS
+       if (cmd.base.speed >= 40000)
+               return IB_RATE_40_GBPS;
+       else if (cmd.base.speed >= 30000)
+@@ -260,6 +270,19 @@ static inline int iboe_get_rate(struct net_device *dev)
+               return IB_RATE_10_GBPS;
+       else
+               return IB_RATE_PORT_CURRENT;
++#else
++      speed = ethtool_cmd_speed(&cmd);
++      if (speed >= 40000)
++              return IB_RATE_40_GBPS;
++      else if (speed >= 30000)
++              return IB_RATE_30_GBPS;
++      else if (speed >= 20000)
++              return IB_RATE_20_GBPS;
++      else if (speed >= 10000)
++              return IB_RATE_10_GBPS;
++      else
++              return IB_RATE_PORT_CURRENT;
++#endif
+ }
+ static inline int rdma_link_local_addr(struct in6_addr *addr)
+diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/include/rdma/ib_verbs.h
++++ b/include/rdma/ib_verbs.h
+@@ -49,7 +49,11 @@
+ #include <linux/scatterlist.h>
+ #include <linux/workqueue.h>
+ #include <linux/socket.h>
++#if defined(HAVE_IRQ_POLL_H) && IS_ENABLED(CONFIG_IRQ_POLL)
+ #include <linux/irq_poll.h>
++#else
++#include <linux/blk-iopoll.h>
++#endif
+ #include <uapi/linux/if_ether.h>
+ #include <net/ipv6.h>
+ #include <net/ip.h>
+@@ -1587,7 +1591,11 @@ struct ib_cq {
+       enum ib_poll_context    poll_ctx;
+       struct ib_wc            *wc;
+       union {
++#if defined(HAVE_IRQ_POLL_H) && IS_ENABLED(CONFIG_IRQ_POLL)
+               struct irq_poll         iop;
++#else
++              struct blk_iopoll       iop;
++#endif
+               struct work_struct      work;
+       };
+       /*
+@@ -2134,6 +2142,63 @@ struct ib_cache {
+       struct ib_port_cache   *ports;
+ };
++#ifndef HAVE_DEVICE_DMA_OPS
++struct ib_dma_mapping_ops {
++      int             (*mapping_error)(struct ib_device *dev,
++                                       u64 dma_addr);
++      u64             (*map_single)(struct ib_device *dev,
++                                    void *ptr, size_t size,
++                                    enum dma_data_direction direction);
++      void            (*unmap_single)(struct ib_device *dev,
++                                      u64 addr, size_t size,
++                                      enum dma_data_direction direction);
++      u64             (*map_page)(struct ib_device *dev,
++                                  struct page *page, unsigned long offset,
++                                  size_t size,
++                                  enum dma_data_direction direction);
++      void            (*unmap_page)(struct ib_device *dev,
++                                    u64 addr, size_t size,
++                                    enum dma_data_direction direction);
++      int             (*map_sg)(struct ib_device *dev,
++                                struct scatterlist *sg, int nents,
++                                enum dma_data_direction direction);
++      void            (*unmap_sg)(struct ib_device *dev,
++                                  struct scatterlist *sg, int nents,
++                                  enum dma_data_direction direction);
++      int             (*map_sg_attrs)(struct ib_device *dev,
++                                      struct scatterlist *sg, int nents,
++                                      enum dma_data_direction direction,
++#ifdef HAVE_STRUCT_DMA_ATTRS
++                                      struct dma_attrs *attrs);
++#else
++                                      unsigned long attrs);
++#endif
++      void            (*unmap_sg_attrs)(struct ib_device *dev,
++                                        struct scatterlist *sg, int nents,
++                                        enum dma_data_direction direction,
++#ifdef HAVE_STRUCT_DMA_ATTRS
++                                        struct dma_attrs *attrs);
++#else
++                                        unsigned long attrs);
++#endif
++      void            (*sync_single_for_cpu)(struct ib_device *dev,
++                                             u64 dma_handle,
++                                             size_t size,
++                                             enum dma_data_direction dir);
++      void            (*sync_single_for_device)(struct ib_device *dev,
++                                                u64 dma_handle,
++                                                size_t size,
++                                                enum dma_data_direction dir);
++      void            *(*alloc_coherent)(struct ib_device *dev,
++                                         size_t size,
++                                         u64 *dma_handle,
++                                         gfp_t flag);
++      void            (*free_coherent)(struct ib_device *dev,
++                                       size_t size, void *cpu_addr,
++                                       u64 dma_handle);
++};
++#endif
++
+ struct iw_cm_verbs;
+ struct ib_port_immutable {
+@@ -2420,8 +2485,10 @@ struct ib_device {
+                                                       int state);
+       int                        (*get_vf_config)(struct ib_device *device, int vf, u8 port,
+                                                  struct ifla_vf_info *ivf);
++#ifdef HAVE_NDO_GET_VF_STATS
+       int                        (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
+                                                  struct ifla_vf_stats *stats);
++#endif
+       int                        (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
+                                                 int type);
+       struct ib_wq *             (*create_wq)(struct ib_pd *pd,
+@@ -2451,6 +2518,9 @@ struct ib_device {
+       struct ib_mr *             (*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
+                                               struct ib_dm_mr_attr *attr,
+                                               struct uverbs_attr_bundle *attrs);
++#ifndef HAVE_DEVICE_DMA_OPS
++      struct ib_dma_mapping_ops   *dma_ops;
++#endif
+       /**
+        * rdma netdev operation
+        *
+@@ -2973,8 +3043,10 @@ int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
+                        int state);
+ int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
+                    struct ifla_vf_info *info);
++#ifdef HAVE_NDO_GET_VF_STATS
+ int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
+                   struct ifla_vf_stats *stats);
++#endif
+ int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
+                  int type);
+@@ -3412,6 +3484,10 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
+  */
+ static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
+ {
++#ifndef HAVE_DEVICE_DMA_OPS
++      if (dev->dma_ops)
++              return dev->dma_ops->mapping_error(dev, dma_addr);
++#endif
+       return dma_mapping_error(dev->dma_device, dma_addr);
+ }
+@@ -3426,6 +3502,10 @@ static inline u64 ib_dma_map_single(struct ib_device *dev,
+                                   void *cpu_addr, size_t size,
+                                   enum dma_data_direction direction)
+ {
++#ifndef HAVE_DEVICE_DMA_OPS
++      if (dev->dma_ops)
++              return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
++#endif
+       return dma_map_single(dev->dma_device, cpu_addr, size, direction);
+ }
+@@ -3440,6 +3520,11 @@ static inline void ib_dma_unmap_single(struct ib_device *dev,
+                                      u64 addr, size_t size,
+                                      enum dma_data_direction direction)
+ {
++#ifndef HAVE_DEVICE_DMA_OPS
++      if (dev->dma_ops)
++              dev->dma_ops->unmap_single(dev, addr, size, direction);
++      else
++#endif
+       dma_unmap_single(dev->dma_device, addr, size, direction);
+ }
+@@ -3457,6 +3542,10 @@ static inline u64 ib_dma_map_page(struct ib_device *dev,
+                                 size_t size,
+                                        enum dma_data_direction direction)
+ {
++#ifndef HAVE_DEVICE_DMA_OPS
++      if (dev->dma_ops)
++              return dev->dma_ops->map_page(dev, page, offset, size, direction);
++#endif
+       return dma_map_page(dev->dma_device, page, offset, size, direction);
+ }
+@@ -3471,6 +3560,11 @@ static inline void ib_dma_unmap_page(struct ib_device *dev,
+                                    u64 addr, size_t size,
+                                    enum dma_data_direction direction)
+ {
++#ifndef HAVE_DEVICE_DMA_OPS
++      if (dev->dma_ops)
++              dev->dma_ops->unmap_page(dev, addr, size, direction);
++      else
++#endif
+       dma_unmap_page(dev->dma_device, addr, size, direction);
+ }
+@@ -3485,6 +3579,10 @@ static inline int ib_dma_map_sg(struct ib_device *dev,
+                               struct scatterlist *sg, int nents,
+                               enum dma_data_direction direction)
+ {
++#ifndef HAVE_DEVICE_DMA_OPS
++      if (dev->dma_ops)
++              return dev->dma_ops->map_sg(dev, sg, nents, direction);
++#endif
+       return dma_map_sg(dev->dma_device, sg, nents, direction);
+ }
+@@ -3499,14 +3597,28 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev,
+                                  struct scatterlist *sg, int nents,
+                                  enum dma_data_direction direction)
+ {
++#ifndef HAVE_DEVICE_DMA_OPS
++      if (dev->dma_ops)
++              dev->dma_ops->unmap_sg(dev, sg, nents, direction);
++      else
++#endif
+       dma_unmap_sg(dev->dma_device, sg, nents, direction);
+ }
+ static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
+                                     struct scatterlist *sg, int nents,
+                                     enum dma_data_direction direction,
++#ifdef HAVE_STRUCT_DMA_ATTRS
++                                    struct dma_attrs *dma_attrs)
++#else
+                                     unsigned long dma_attrs)
++#endif
+ {
++#ifndef HAVE_DEVICE_DMA_OPS
++      if (dev->dma_ops)
++              return dev->dma_ops->map_sg_attrs(dev, sg, nents, direction,
++                                                dma_attrs);
++#endif
+       return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
+                               dma_attrs);
+ }
+@@ -3514,8 +3626,18 @@ static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
+ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
+                                        struct scatterlist *sg, int nents,
+                                        enum dma_data_direction direction,
++#ifdef HAVE_STRUCT_DMA_ATTRS
++                                       struct dma_attrs *dma_attrs)
++#else
+                                        unsigned long dma_attrs)
++#endif
+ {
++#ifndef HAVE_DEVICE_DMA_OPS
++      if (dev->dma_ops)
++              return dev->dma_ops->unmap_sg_attrs(dev, sg, nents, direction,
++                                                dma_attrs);
++      else
++#endif
+       dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
+ }
+ /**
+@@ -3558,6 +3680,11 @@ static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
+                                             size_t size,
+                                             enum dma_data_direction dir)
+ {
++#ifndef HAVE_DEVICE_DMA_OPS
++      if (dev->dma_ops)
++              dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
++      else
++#endif
+       dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
+ }
+@@ -3573,6 +3700,11 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
+                                                size_t size,
+                                                enum dma_data_direction dir)
+ {
++#ifndef HAVE_DEVICE_DMA_OPS
++      if (dev->dma_ops)
++              dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
++      else
++#endif
+       dma_sync_single_for_device(dev->dma_device, addr, size, dir);
+ }
+@@ -3588,6 +3720,16 @@ static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
+                                          dma_addr_t *dma_handle,
+                                          gfp_t flag)
+ {
++#ifndef HAVE_DEVICE_DMA_OPS
++      if (dev->dma_ops) {
++              u64 handle;
++              void *ret;
++
++              ret = dev->dma_ops->alloc_coherent(dev, size, &handle, flag);
++              *dma_handle = handle;
++              return ret;
++      }
++#endif
+       return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
+ }
+@@ -3602,6 +3744,11 @@ static inline void ib_dma_free_coherent(struct ib_device *dev,
+                                       size_t size, void *cpu_addr,
+                                       dma_addr_t dma_handle)
+ {
++#ifndef HAVE_DEVICE_DMA_OPS
++      if (dev->dma_ops)
++              dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
++      else
++#endif
+       dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
+ }
+diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/include/rdma/rdma_netlink.h
++++ b/include/rdma/rdma_netlink.h
+@@ -7,8 +7,12 @@
+ #include <uapi/rdma/rdma_netlink.h>
+ struct rdma_nl_cbs {
++#ifdef HAVE_NETLINK_EXT_ACK
+       int (*doit)(struct sk_buff *skb, struct nlmsghdr *nlh,
+                   struct netlink_ext_ack *extack);
++#else
++      int (*doit)(struct sk_buff *skb, struct nlmsghdr *nlh);
++#endif
+       int (*dump)(struct sk_buff *skb, struct netlink_callback *nlcb);
+       u8 flags;
+ };
+diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/include/rdma/restrack.h
++++ b/include/rdma/restrack.h
+@@ -11,7 +11,9 @@
+ #include <linux/sched.h>
+ #include <linux/kref.h>
+ #include <linux/completion.h>
++#ifdef HAVE_LINUX_SCHED_TASK_H
+ #include <linux/sched/task.h>
++#endif
+ /**
+  * enum rdma_restrack_type - HW objects to track
diff --git a/patches/0003-BACKPORT-ipoib.patch b/patches/0003-BACKPORT-ipoib.patch
new file mode 100644 (file)
index 0000000..0c9a3dc
--- /dev/null
@@ -0,0 +1,241 @@
+From: Vladimir Sokolovsky <vlad@mellanox.com>
+Subject: [PATCH] BACKPORT: ipoib
+
+Signed-off-by: Vladimir Sokolovsky <vlad@mellanox.com>
+---
+ drivers/infiniband/ulp/ipoib/ipoib_main.c    | 50 +++++++++++++++++++++++++++-
+ drivers/infiniband/ulp/ipoib/ipoib_netlink.c | 31 ++++++++++++++++-
+ 2 files changed, 79 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+@@ -258,14 +258,21 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
+                               "link layer MTU - 4 (%u)\n", priv->mcast_mtu);
+       new_mtu = min(priv->mcast_mtu, priv->admin_mtu);
+-
++#ifdef HAVE_NDO_CHANGE_MTU_EXTENDED
++      if (priv->rn_ops->extended.ndo_change_mtu) {
++#else
+       if (priv->rn_ops->ndo_change_mtu) {
++#endif
+               bool carrier_status = netif_carrier_ok(dev);
+               netif_carrier_off(dev);
+               /* notify lower level on the real mtu */
++#ifdef HAVE_NDO_CHANGE_MTU_EXTENDED
++              ret = priv->rn_ops->extended.ndo_change_mtu(dev, new_mtu);
++#else
+               ret = priv->rn_ops->ndo_change_mtu(dev, new_mtu);
++#endif
+               if (carrier_status)
+                       netif_carrier_on(dev);
+@@ -303,9 +310,21 @@ static bool ipoib_is_dev_match_addr_rcu(const struct sockaddr *addr,
+               if (!in_dev)
+                       return false;
++#ifdef HAVE_INET_CONFIRM_ADDR_EXPORTED
++#ifdef HAVE_INET_CONFIRM_ADDR_5_PARAMS
+               ret_addr = inet_confirm_addr(net, in_dev, 0,
+                                            addr_in->sin_addr.s_addr,
+                                            RT_SCOPE_HOST);
++#else
++              ret_addr = inet_confirm_addr(in_dev, 0,
++                                           addr_in->sin_addr.s_addr,
++                                           RT_SCOPE_HOST);
++#endif
++#else
++              ret_addr = confirm_addr_indev(in_dev, 0,
++                                            addr_in->sin_addr.s_addr,
++                                            RT_SCOPE_HOST);
++#endif
+               in_dev_put(in_dev);
+               if (ret_addr)
+                       return true;
+@@ -1868,6 +1887,7 @@ static int ipoib_get_vf_config(struct net_device *dev, int vf,
+       return 0;
+ }
++#ifdef HAVE_NDO_SET_VF_GUID
+ static int ipoib_set_vf_guid(struct net_device *dev, int vf, u64 guid, int type)
+ {
+       struct ipoib_dev_priv *priv = ipoib_priv(dev);
+@@ -1877,6 +1897,7 @@ static int ipoib_set_vf_guid(struct net_device *dev, int vf, u64 guid, int type)
+       return ib_set_vf_guid(priv->ca, vf, priv->port, guid, type);
+ }
++#endif
+ static int ipoib_get_vf_stats(struct net_device *dev, int vf,
+                             struct ifla_vf_stats *vf_stats)
+@@ -1894,7 +1915,11 @@ static const struct net_device_ops ipoib_netdev_ops_pf = {
+       .ndo_uninit              = ipoib_uninit,
+       .ndo_open                = ipoib_open,
+       .ndo_stop                = ipoib_stop,
++#ifdef HAVE_NDO_CHANGE_MTU_EXTENDED
++      .extended.ndo_change_mtu = ipoib_change_mtu,
++#else
+       .ndo_change_mtu          = ipoib_change_mtu,
++#endif
+       .ndo_fix_features        = ipoib_fix_features,
+       .ndo_start_xmit          = ipoib_start_xmit,
+       .ndo_tx_timeout          = ipoib_timeout,
+@@ -1903,7 +1928,9 @@ static const struct net_device_ops ipoib_netdev_ops_pf = {
+       .ndo_set_vf_link_state   = ipoib_set_vf_link_state,
+       .ndo_get_vf_config       = ipoib_get_vf_config,
+       .ndo_get_vf_stats        = ipoib_get_vf_stats,
++#ifdef HAVE_NDO_SET_VF_GUID
+       .ndo_set_vf_guid         = ipoib_set_vf_guid,
++#endif
+       .ndo_set_mac_address     = ipoib_set_mac,
+       .ndo_get_stats64         = ipoib_get_stats,
+       .ndo_do_ioctl            = ipoib_ioctl,
+@@ -1913,7 +1940,11 @@ static const struct net_device_ops ipoib_netdev_ops_vf = {
+       .ndo_uninit              = ipoib_uninit,
+       .ndo_open                = ipoib_open,
+       .ndo_stop                = ipoib_stop,
++#ifdef HAVE_NDO_CHANGE_MTU_EXTENDED
++      .extended.ndo_change_mtu = ipoib_change_mtu,
++#else
+       .ndo_change_mtu          = ipoib_change_mtu,
++#endif
+       .ndo_fix_features        = ipoib_fix_features,
+       .ndo_start_xmit          = ipoib_start_xmit,
+       .ndo_tx_timeout          = ipoib_timeout,
+@@ -1985,9 +2016,16 @@ static struct net_device
+       struct net_device *dev;
+       struct rdma_netdev *rn;
++#ifdef HAVE_ALLOC_NETDEV_4P
+       dev = alloc_netdev((int)sizeof(struct rdma_netdev),
+                          name,
+                          name_assign_type, setup);
++#else
++      dev = alloc_netdev((int)sizeof(struct rdma_netdev),
++                         name,
++                         setup);
++#endif
++
+       if (!dev)
+               return NULL;
+@@ -2012,14 +2050,22 @@ static struct net_device *ipoib_get_netdev(struct ib_device *hca, u8 port,
+       if (hca->alloc_rdma_netdev) {
+               dev = hca->alloc_rdma_netdev(hca, port,
+                                            RDMA_NETDEV_IPOIB, name,
++#ifdef NET_NAME_UNKNOWN
+                                            NET_NAME_UNKNOWN,
++#else
++                                           0,         /* NET_NAME_UNKNOWN */
++#endif
+                                            ipoib_setup_common);
+               if (IS_ERR_OR_NULL(dev) && PTR_ERR(dev) != -EOPNOTSUPP)
+                       return NULL;
+       }
+       if (!hca->alloc_rdma_netdev || PTR_ERR(dev) == -EOPNOTSUPP)
++#ifdef NET_NAME_UNKNOWN
+               dev = ipoib_create_netdev_default(hca, name, NET_NAME_UNKNOWN,
++#else
++              dev = ipoib_create_netdev_default(hca, name, 0, /* NET_NAME_UNKNOWN, */
++#endif
+                                                 ipoib_setup_common);
+       return dev;
+@@ -2262,7 +2308,9 @@ static struct net_device *ipoib_add_port(const char *format,
+       /* MTU will be reset when mcast join happens */
+       priv->dev->mtu  = IPOIB_UD_MTU(priv->max_ib_mtu);
+       priv->mcast_mtu  = priv->admin_mtu = priv->dev->mtu;
++#ifdef HAVE_NET_DEVICE_MIN_MAX_MTU
+       priv->dev->max_mtu = IPOIB_CM_MTU;
++#endif
+       priv->dev->neigh_priv_len = sizeof(struct ipoib_neigh);
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+@@ -64,9 +64,14 @@ nla_put_failure:
+       return -EMSGSIZE;
+ }
++#if defined(HAVE_RTNL_LINK_OPS_NEWLINK_5_PARAMS)
+ static int ipoib_changelink(struct net_device *dev, struct nlattr *tb[],
+                           struct nlattr *data[],
+                           struct netlink_ext_ack *extack)
++#else
++static int ipoib_changelink(struct net_device *dev, struct nlattr *tb[],
++                          struct nlattr *data[])
++#endif
+ {
+       u16 mode, umcast;
+       int ret = 0;
+@@ -93,9 +98,17 @@ out_err:
+       return ret;
+ }
++#if defined(HAVE_RTNL_LINK_OPS_NEWLINK_5_PARAMS)
+ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
+                               struct nlattr *tb[], struct nlattr *data[],
+                               struct netlink_ext_ack *extack)
++#elif defined(HAVE_RTNL_LINK_OPS_NEWLINK_4_PARAMS)
++static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
++                              struct nlattr *tb[], struct nlattr *data[])
++#else
++static int ipoib_new_child_link(struct net_device *dev,
++                              struct nlattr *tb[], struct nlattr *data[])
++#endif
+ {
+       struct net_device *pdev;
+       struct ipoib_dev_priv *ppriv;
+@@ -104,8 +117,12 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
+       if (!tb[IFLA_LINK])
+               return -EINVAL;
+-
++#ifdef HAVE_RTNL_LINK_OPS_NEWLINK_4_PARAMS
+       pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
++#else
++      pdev = __dev_get_by_index(dev_net(dev), nla_get_u32(tb[IFLA_LINK]));
++#endif
++
+       if (!pdev || pdev->type != ARPHRD_INFINIBAND)
+               return -ENODEV;
+@@ -135,11 +152,19 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
+                              child_pkey, IPOIB_RTNL_CHILD);
+       if (!err && data)
++#if defined(HAVE_RTNL_LINK_OPS_NEWLINK_5_PARAMS)
+               err = ipoib_changelink(dev, tb, data, extack);
++#else
++              err = ipoib_changelink(dev, tb, data);
++#endif
+       return err;
+ }
++#ifdef HAVE_RTNL_LINK_OPS_DELLINK_2_PARAMS
+ static void ipoib_unregister_child_dev(struct net_device *dev, struct list_head *head)
++#else
++static void ipoib_unregister_child_dev(struct net_device *dev)
++#endif
+ {
+       struct ipoib_dev_priv *priv, *ppriv;
+@@ -147,7 +172,11 @@ static void ipoib_unregister_child_dev(struct net_device *dev, struct list_head
+       ppriv = ipoib_priv(priv->parent);
+       down_write(&ppriv->vlan_rwsem);
++#ifdef HAVE_RTNL_LINK_OPS_DELLINK_2_PARAMS
+       unregister_netdevice_queue(dev, head);
++#else
++      unregister_netdevice(dev);
++#endif
+       list_del(&priv->list);
+       up_write(&ppriv->vlan_rwsem);
+ }