--- /dev/null
+From: Vladimir Sokolovsky <vlad@mellanox.com>
+Subject: [PATCH] BACKPORT: ib_core
+
+Signed-off-by: Vladimir Sokolovsky <vlad@mellanox.com>
+---
+ drivers/infiniband/core/cma.c | 23 +++++++++++++++++
+ drivers/infiniband/core/core_priv.h | 17 ++++++++++++
+ drivers/infiniband/core/cq.c | 6 +++++
+ drivers/infiniband/core/mad.c | 3 +++
+ drivers/infiniband/core/netlink.c | 3 +++
+ drivers/infiniband/core/roce_gid_mgmt.c | 46 +++++++++++++++++++++++++++++++++
+ drivers/infiniband/core/sa_query.c | 19 ++++++++++++++
+ drivers/infiniband/core/umem.c | 19 ++++++++++++++
+ drivers/infiniband/core/user_mad.c | 3 +++
+ include/rdma/ib_addr.h | 23 +++++++++++++++++
+ include/rdma/ib_verbs.h | 18 +++++++++++++
+ 11 files changed, 180 insertions(+)
+
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -1252,7 +1252,11 @@ static bool validate_ipv4_net_dev(struct net_device *net_dev,
+ fl4.saddr = saddr;
+
+ rcu_read_lock();
++#ifdef HAVE_FIB_LOOKUP_4_PARAMS
+ err = fib_lookup(dev_net(net_dev), &fl4, &res, 0);
++#else
++ err = fib_lookup(dev_net(net_dev), &fl4, &res);
++#endif
+ ret = err == 0 && FIB_RES_DEV(res) == net_dev;
+ rcu_read_unlock();
+
+@@ -2416,6 +2420,7 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
+ return 0;
+ }
+
++#if defined(HAVE_VLAN_DEV_GET_EGRESS_QOS_MASK) && defined(HAVE_NETDEV_GET_PRIO_TC_MAP)
+ static int iboe_tos_to_sl(struct net_device *ndev, int tos)
+ {
+ int prio;
+@@ -2435,6 +2440,7 @@ static int iboe_tos_to_sl(struct net_device *ndev, int tos)
+ #endif
+ return 0;
+ }
++#endif
+
+ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
+ {
+@@ -2509,7 +2515,16 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
+ route->path_rec->reversible = 1;
+ route->path_rec->pkey = cpu_to_be16(0xffff);
+ route->path_rec->mtu_selector = IB_SA_EQ;
++#if defined(HAVE_VLAN_DEV_GET_EGRESS_QOS_MASK) && defined(HAVE_NETDEV_GET_PRIO_TC_MAP)
+ route->path_rec->sl = iboe_tos_to_sl(ndev, id_priv->tos);
++#elif defined(HAVE_NETDEV_GET_PRIO_TC_MAP)
++ route->path_rec->sl = netdev_get_prio_tc_map(
++ ndev->priv_flags & IFF_802_1Q_VLAN ?
++ vlan_dev_real_dev(ndev) : ndev,
++ rt_tos2priority(id_priv->tos));
++#else
++ route->path_rec->sl = id_priv->tos >> 5;
++#endif
+ route->path_rec->mtu = iboe_get_mtu(ndev->mtu);
+ route->path_rec->rate_selector = IB_SA_EQ;
+ route->path_rec->rate = iboe_get_rate(ndev);
+@@ -2905,7 +2920,11 @@ static int cma_alloc_any_port(enum rdma_port_space ps,
+ unsigned int rover;
+ struct net *net = id_priv->id.route.addr.dev_addr.net;
+
++#ifdef HAVE_INET_GET_LOCAL_PORT_RANGE_3_PARAMS
+ inet_get_local_port_range(net, &low, &high);
++#else
++ inet_get_local_port_range(&low, &high);
++#endif
+ remaining = (high - low) + 1;
+ rover = prandom_u32() % remaining + low;
+ retry:
+@@ -3879,7 +3898,11 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
+ id_priv->id.port_num, &rec,
+ comp_mask, GFP_KERNEL,
+ cma_ib_mc_handler, mc);
++#ifdef HAVE_PTR_ERR_OR_ZERO
+ return PTR_ERR_OR_ZERO(mc->multicast.ib);
++#else
++ return PTR_RET(mc->multicast.ib);
++#endif
+ }
+
+ static void iboe_mcast_work_handler(struct work_struct *work)
+diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/infiniband/core/core_priv.h
++++ b/drivers/infiniband/core/core_priv.h
+@@ -37,6 +37,7 @@
+ #include <linux/spinlock.h>
+
+ #include <rdma/ib_verbs.h>
++#include <rdma/ib_addr.h>
+
+ #if IS_ENABLED(CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS)
+ int cma_configfs_init(void);
+@@ -127,6 +128,7 @@ void ib_cache_release_one(struct ib_device *device);
+ static inline bool rdma_is_upper_dev_rcu(struct net_device *dev,
+ struct net_device *upper)
+ {
++#ifdef HAVE_NETDEV_FOR_EACH_ALL_UPPER_DEV_RCU
+ struct net_device *_upper = NULL;
+ struct list_head *iter;
+
+@@ -135,6 +137,21 @@ static inline bool rdma_is_upper_dev_rcu(struct net_device *dev,
+ break;
+
+ return _upper == upper;
++#else
++ struct net_device *rdev_upper = rdma_vlan_dev_real_dev(upper);
++ struct net_device *master;
++ bool ret;
++
++ master = netdev_master_upper_dev_get_rcu(dev);
++ if (!upper || !dev)
++ ret = false;
++ else
++ ret = (upper == master) ||
++ (rdev_upper && (rdev_upper == master)) ||
++ (rdev_upper == dev);
++
++ return ret;
++#endif
+ }
+
+ int addr_init(void);
+diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/infiniband/core/cq.c
++++ b/drivers/infiniband/core/cq.c
+@@ -74,6 +74,7 @@ static void ib_cq_completion_direct(struct ib_cq *cq, void *private)
+ WARN_ONCE(1, "got unsolicited completion for CQ 0x%p\n", cq);
+ }
+
++#if defined(HAVE_IRQ_POLL_H) && IS_ENABLED(CONFIG_IRQ_POLL)
+ static int ib_poll_handler(struct irq_poll *iop, int budget)
+ {
+ struct ib_cq *cq = container_of(iop, struct ib_cq, iop);
+@@ -93,6 +94,7 @@ static void ib_cq_completion_softirq(struct ib_cq *cq, void *private)
+ {
+ irq_poll_sched(&cq->iop);
+ }
++#endif
+
+ static void ib_cq_poll_work(struct work_struct *work)
+ {
+@@ -152,12 +154,14 @@ struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
+ case IB_POLL_DIRECT:
+ cq->comp_handler = ib_cq_completion_direct;
+ break;
++#if defined(HAVE_IRQ_POLL_H) && IS_ENABLED(CONFIG_IRQ_POLL)
+ case IB_POLL_SOFTIRQ:
+ cq->comp_handler = ib_cq_completion_softirq;
+
+ irq_poll_init(&cq->iop, IB_POLL_BUDGET_IRQ, ib_poll_handler);
+ ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+ break;
++#endif
+ case IB_POLL_WORKQUEUE:
+ cq->comp_handler = ib_cq_completion_workqueue;
+ INIT_WORK(&cq->work, ib_cq_poll_work);
+@@ -192,9 +196,11 @@ void ib_free_cq(struct ib_cq *cq)
+ switch (cq->poll_ctx) {
+ case IB_POLL_DIRECT:
+ break;
++#if defined(HAVE_IRQ_POLL_H) && IS_ENABLED(CONFIG_IRQ_POLL)
+ case IB_POLL_SOFTIRQ:
+ irq_poll_disable(&cq->iop);
+ break;
++#endif
+ case IB_POLL_WORKQUEUE:
+ flush_work(&cq->work);
+ break;
+diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/infiniband/core/mad.c
++++ b/drivers/infiniband/core/mad.c
+@@ -35,6 +35,9 @@
+ *
+ */
+
++#ifdef pr_fmt
++#undef pr_fmt
++#endif
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+ #include <linux/dma-mapping.h>
+diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/infiniband/core/netlink.c
++++ b/drivers/infiniband/core/netlink.c
+@@ -30,6 +30,9 @@
+ * SOFTWARE.
+ */
+
++#ifdef pr_fmt
++#undef pr_fmt
++#endif
+ #define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__
+
+ #include <linux/export.h>
+diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/infiniband/core/roce_gid_mgmt.c
++++ b/drivers/infiniband/core/roce_gid_mgmt.c
+@@ -189,6 +189,33 @@ static int pass_all_filter(struct ib_device *ib_dev, u8 port,
+ return 1;
+ }
+
++#ifndef HAVE_NETDEV_CHANGEUPPER
++#define IS_NETDEV_BONDING_MASTER(ndev) \
++ (((ndev)->priv_flags & IFF_BONDING) && \
++ ((ndev)->flags & IFF_MASTER))
++
++static int bonding_slaves_filter(struct ib_device *ib_dev, u8 port,
++ struct net_device *idev, void *cookie)
++{
++ struct net_device *rdev;
++ struct net_device *ndev = (struct net_device *)cookie;
++ int res;
++
++ rdev = rdma_vlan_dev_real_dev(ndev);
++
++ ndev = rdev ? rdev : ndev;
++ if (!idev || !IS_NETDEV_BONDING_MASTER(ndev))
++ return 0;
++
++ rcu_read_lock();
++ res = rdma_is_upper_dev_rcu(idev, ndev);
++ rcu_read_unlock();
++
++ return res;
++}
++#endif
++
++#ifdef HAVE_NETDEV_CHANGEUPPER
+ static int upper_device_filter(struct ib_device *ib_dev, u8 port,
+ struct net_device *rdma_ndev, void *cookie)
+ {
+@@ -207,6 +234,7 @@ static int upper_device_filter(struct ib_device *ib_dev, u8 port,
+
+ return res;
+ }
++#endif
+
+ static void update_gid_ip(enum gid_op_type gid_op,
+ struct ib_device *ib_dev,
+@@ -437,6 +465,7 @@ static void callback_for_addr_gid_device_scan(struct ib_device *device,
+ &parsed->gid_attr);
+ }
+
++#ifdef HAVE_NETDEV_CHANGEUPPER
+ static void handle_netdev_upper(struct ib_device *ib_dev, u8 port,
+ void *cookie,
+ void (*handle_netdev)(struct ib_device *ib_dev,
+@@ -497,6 +526,7 @@ static void add_netdev_upper_ips(struct ib_device *ib_dev, u8 port,
+ {
+ handle_netdev_upper(ib_dev, port, cookie, _add_netdev_ips);
+ }
++#endif
+
+ static void del_netdev_default_ips_join(struct ib_device *ib_dev, u8 port,
+ struct net_device *rdma_ndev,
+@@ -578,6 +608,7 @@ static int netdevice_queue_work(struct netdev_event_work_cmd *cmds,
+
+ static const struct netdev_event_work_cmd add_cmd = {
+ .cb = add_netdev_ips, .filter = is_eth_port_of_netdev};
++#ifdef HAVE_NETDEV_CHANGEUPPER
+ static const struct netdev_event_work_cmd add_cmd_upper_ips = {
+ .cb = add_netdev_upper_ips, .filter = is_eth_port_of_netdev};
+
+@@ -601,10 +632,15 @@ static void netdevice_event_changeupper(struct netdev_notifier_changeupper_info
+ cmds[1].filter_ndev = changeupper_info->upper_dev;
+ }
+ }
++#endif
+
+ static int netdevice_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
+ {
++#ifndef HAVE_NETDEV_CHANGEUPPER
++ static const struct netdev_event_work_cmd add_cmd = {
++ .cb = add_netdev_ips, .filter = is_eth_port_of_netdev};
++#endif
+ static const struct netdev_event_work_cmd del_cmd = {
+ .cb = del_netdev_ips, .filter = pass_all_filter};
+ static const struct netdev_event_work_cmd bonding_default_del_cmd_join = {
+@@ -612,7 +648,11 @@ static int netdevice_event(struct notifier_block *this, unsigned long event,
+ static const struct netdev_event_work_cmd default_del_cmd = {
+ .cb = del_netdev_default_ips, .filter = pass_all_filter};
+ static const struct netdev_event_work_cmd bonding_event_ips_del_cmd = {
++#ifdef HAVE_NETDEV_CHANGEUPPER
+ .cb = del_netdev_upper_ips, .filter = upper_device_filter};
++#else
++ .cb = del_netdev_ips, .filter = bonding_slaves_filter};
++#endif
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_event_work_cmd cmds[ROCE_NETDEV_CALLBACK_SZ] = { {NULL} };
+
+@@ -638,16 +678,22 @@ static int netdevice_event(struct notifier_block *this, unsigned long event,
+ cmds[1] = add_cmd;
+ break;
+
++#ifdef HAVE_NETDEV_CHANGEUPPER
+ case NETDEV_CHANGEUPPER:
+ netdevice_event_changeupper(
+ container_of(ptr, struct netdev_notifier_changeupper_info, info),
+ cmds);
+ break;
++#endif
+
+ case NETDEV_BONDING_FAILOVER:
+ cmds[0] = bonding_event_ips_del_cmd;
+ cmds[1] = bonding_default_del_cmd_join;
++#ifdef HAVE_NETDEV_CHANGEUPPER
+ cmds[2] = add_cmd_upper_ips;
++#else
++ cmds[2] = add_cmd;
++#endif
+ break;
+
+ default:
+diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/infiniband/core/sa_query.c
++++ b/drivers/infiniband/core/sa_query.c
+@@ -1238,10 +1238,17 @@ static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent)
+
+ static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
+ {
++#ifdef HAVE_IDR_ALLOC
++#ifdef __GFP_WAIT
++ bool preload = !!(gfp_mask & __GFP_WAIT);
++#else
+ bool preload = gfpflags_allow_blocking(gfp_mask);
++#endif
++#endif
+ unsigned long flags;
+ int ret, id;
+
++#ifdef HAVE_IDR_ALLOC
+ if (preload)
+ idr_preload(gfp_mask);
+ spin_lock_irqsave(&idr_lock, flags);
+@@ -1253,6 +1260,18 @@ static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
+ idr_preload_end();
+ if (id < 0)
+ return id;
++#else
++retry:
++ if (!idr_pre_get(&query_idr, gfp_mask))
++ return -ENOMEM;
++ spin_lock_irqsave(&idr_lock, flags);
++ ret = idr_get_new(&query_idr, query, &id);
++ spin_unlock_irqrestore(&idr_lock, flags);
++ if (ret == -EAGAIN)
++ goto retry;
++ if (ret)
++ return ret;
++#endif
+
+ query->mad_buf->timeout_ms = timeout_ms;
+ query->mad_buf->context[0] = query;
+diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/infiniband/core/umem.c
++++ b/drivers/infiniband/core/umem.c
+@@ -37,6 +37,9 @@
+ #include <linux/sched.h>
+ #include <linux/export.h>
+ #include <linux/hugetlb.h>
++#ifdef HAVE_STRUCT_DMA_ATTRS
++#include <linux/dma-attrs.h>
++#endif
+ #include <linux/slab.h>
+ #include <rdma/ib_umem_odp.h>
+
+@@ -91,12 +94,20 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
+ unsigned long npages;
+ int ret;
+ int i;
++#ifdef HAVE_STRUCT_DMA_ATTRS
++ DEFINE_DMA_ATTRS(dma_attrs);
++#else
+ unsigned long dma_attrs = 0;
++#endif
+ struct scatterlist *sg, *sg_list_start;
+ int need_release = 0;
+
+ if (dmasync)
++#ifdef HAVE_STRUCT_DMA_ATTRS
++ dma_set_attr(DMA_ATTR_WRITE_BARRIER, &dma_attrs);
++#else
+ dma_attrs |= DMA_ATTR_WRITE_BARRIER;
++#endif
+
+ if (!size)
+ return ERR_PTR(-EINVAL);
+@@ -187,7 +198,11 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
+ sg_list_start = umem->sg_head.sgl;
+
+ while (npages) {
++#ifdef HAVE_GET_USER_PAGES_6_PARAMS
+ ret = get_user_pages(cur_base,
++#else
++ ret = get_user_pages(current, current->mm, cur_base,
++#endif
+ min_t(unsigned long, npages,
+ PAGE_SIZE / sizeof (struct page *)),
+ 1, !umem->writable, page_list, vma_list);
+@@ -214,7 +229,11 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
+ umem->sg_head.sgl,
+ umem->npages,
+ DMA_BIDIRECTIONAL,
++#ifdef HAVE_STRUCT_DMA_ATTRS
++ &dma_attrs);
++#else
+ dma_attrs);
++#endif
+
+ if (umem->nmap <= 0) {
+ ret = -ENOMEM;
+diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/infiniband/core/user_mad.c
++++ b/drivers/infiniband/core/user_mad.c
+@@ -33,6 +33,9 @@
+ * SOFTWARE.
+ */
+
++#ifdef pr_fmt
++#undef pr_fmt
++#endif
+ #define pr_fmt(fmt) "user_mad: " fmt
+
+ #include <linux/module.h>
+diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/include/rdma/ib_addr.h
++++ b/include/rdma/ib_addr.h
+@@ -262,15 +262,25 @@ static inline enum ib_mtu iboe_get_mtu(int mtu)
+
+ static inline int iboe_get_rate(struct net_device *dev)
+ {
++#ifdef HAVE___ETHTOOL_GET_LINK_KSETTINGS
+ struct ethtool_link_ksettings cmd;
++#else
++ struct ethtool_cmd cmd;
++ u32 speed;
++#endif
+ int err;
+
+ rtnl_lock();
++#ifdef HAVE___ETHTOOL_GET_LINK_KSETTINGS
+ err = __ethtool_get_link_ksettings(dev, &cmd);
++#else
++ err = __ethtool_get_settings(dev, &cmd);
++#endif
+ rtnl_unlock();
+ if (err)
+ return IB_RATE_PORT_CURRENT;
+
++#ifdef HAVE___ETHTOOL_GET_LINK_KSETTINGS
+ if (cmd.base.speed >= 40000)
+ return IB_RATE_40_GBPS;
+ else if (cmd.base.speed >= 30000)
+@@ -281,6 +291,19 @@ static inline int iboe_get_rate(struct net_device *dev)
+ return IB_RATE_10_GBPS;
+ else
+ return IB_RATE_PORT_CURRENT;
++#else
++ speed = ethtool_cmd_speed(&cmd);
++ if (speed >= 40000)
++ return IB_RATE_40_GBPS;
++ else if (speed >= 30000)
++ return IB_RATE_30_GBPS;
++ else if (speed >= 20000)
++ return IB_RATE_20_GBPS;
++ else if (speed >= 10000)
++ return IB_RATE_10_GBPS;
++ else
++ return IB_RATE_PORT_CURRENT;
++#endif
+ }
+
+ static inline int rdma_link_local_addr(struct in6_addr *addr)
+diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/include/rdma/ib_verbs.h
++++ b/include/rdma/ib_verbs.h
+@@ -1410,7 +1410,9 @@ struct ib_cq {
+ enum ib_poll_context poll_ctx;
+ struct ib_wc *wc;
+ union {
++#if defined(HAVE_IRQ_POLL_H) && IS_ENABLED(CONFIG_IRQ_POLL)
+ struct irq_poll iop;
++#endif
+ struct work_struct work;
+ };
+ };
+@@ -2911,7 +2913,11 @@ static inline void ib_dma_unmap_single(struct ib_device *dev,
+ static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
+ void *cpu_addr, size_t size,
+ enum dma_data_direction direction,
++#ifdef HAVE_STRUCT_DMA_ATTRS
++ struct dma_attrs *dma_attrs)
++#else
+ unsigned long dma_attrs)
++#endif
+ {
+ return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
+ direction, dma_attrs);
+@@ -2920,7 +2926,11 @@ static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
+ static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
+ u64 addr, size_t size,
+ enum dma_data_direction direction,
++#ifdef HAVE_STRUCT_DMA_ATTRS
++ struct dma_attrs *dma_attrs)
++#else
+ unsigned long dma_attrs)
++#endif
+ {
+ return dma_unmap_single_attrs(dev->dma_device, addr, size,
+ direction, dma_attrs);
+@@ -2998,7 +3008,11 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev,
+ static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction direction,
++#ifdef HAVE_STRUCT_DMA_ATTRS
++ struct dma_attrs *dma_attrs)
++#else
+ unsigned long dma_attrs)
++#endif
+ {
+ return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
+ dma_attrs);
+@@ -3007,7 +3021,11 @@ static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
+ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction direction,
++#ifdef HAVE_STRUCT_DMA_ATTRS
++ struct dma_attrs *dma_attrs)
++#else
+ unsigned long dma_attrs)
++#endif
+ {
+ dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
+ }
+++ /dev/null
-From: Vladimir Sokolovsky <vlad@mellanox.com>
-Subject: [PATCH] BACKPORT: ib_core
-
-Signed-off-by: Vladimir Sokolovsky <vlad@mellanox.com>
----
- drivers/infiniband/core/cma.c | 23 +++++++++++++++++
- drivers/infiniband/core/core_priv.h | 17 ++++++++++++
- drivers/infiniband/core/cq.c | 6 +++++
- drivers/infiniband/core/mad.c | 3 +++
- drivers/infiniband/core/netlink.c | 3 +++
- drivers/infiniband/core/roce_gid_mgmt.c | 46 +++++++++++++++++++++++++++++++++
- drivers/infiniband/core/sa_query.c | 19 ++++++++++++++
- drivers/infiniband/core/umem.c | 19 ++++++++++++++
- drivers/infiniband/core/user_mad.c | 3 +++
- include/rdma/ib_addr.h | 23 +++++++++++++++++
- include/rdma/ib_verbs.h | 18 +++++++++++++
- 11 files changed, 180 insertions(+)
-
-diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
-index xxxxxxx..xxxxxxx xxxxxx
---- a/drivers/infiniband/core/cma.c
-+++ b/drivers/infiniband/core/cma.c
-@@ -1252,7 +1252,11 @@ static bool validate_ipv4_net_dev(struct net_device *net_dev,
- fl4.saddr = saddr;
-
- rcu_read_lock();
-+#ifdef HAVE_FIB_LOOKUP_4_PARAMS
- err = fib_lookup(dev_net(net_dev), &fl4, &res, 0);
-+#else
-+ err = fib_lookup(dev_net(net_dev), &fl4, &res);
-+#endif
- ret = err == 0 && FIB_RES_DEV(res) == net_dev;
- rcu_read_unlock();
-
-@@ -2416,6 +2420,7 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
- return 0;
- }
-
-+#if defined(HAVE_VLAN_DEV_GET_EGRESS_QOS_MASK) && defined(HAVE_NETDEV_GET_PRIO_TC_MAP)
- static int iboe_tos_to_sl(struct net_device *ndev, int tos)
- {
- int prio;
-@@ -2435,6 +2440,7 @@ static int iboe_tos_to_sl(struct net_device *ndev, int tos)
- #endif
- return 0;
- }
-+#endif
-
- static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
- {
-@@ -2509,7 +2515,16 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
- route->path_rec->reversible = 1;
- route->path_rec->pkey = cpu_to_be16(0xffff);
- route->path_rec->mtu_selector = IB_SA_EQ;
-+#if defined(HAVE_VLAN_DEV_GET_EGRESS_QOS_MASK) && defined(HAVE_NETDEV_GET_PRIO_TC_MAP)
- route->path_rec->sl = iboe_tos_to_sl(ndev, id_priv->tos);
-+#elif defined(HAVE_NETDEV_GET_PRIO_TC_MAP)
-+ route->path_rec->sl = netdev_get_prio_tc_map(
-+ ndev->priv_flags & IFF_802_1Q_VLAN ?
-+ vlan_dev_real_dev(ndev) : ndev,
-+ rt_tos2priority(id_priv->tos));
-+#else
-+ route->path_rec->sl = id_priv->tos >> 5;
-+#endif
- route->path_rec->mtu = iboe_get_mtu(ndev->mtu);
- route->path_rec->rate_selector = IB_SA_EQ;
- route->path_rec->rate = iboe_get_rate(ndev);
-@@ -2905,7 +2920,11 @@ static int cma_alloc_any_port(enum rdma_port_space ps,
- unsigned int rover;
- struct net *net = id_priv->id.route.addr.dev_addr.net;
-
-+#ifdef HAVE_INET_GET_LOCAL_PORT_RANGE_3_PARAMS
- inet_get_local_port_range(net, &low, &high);
-+#else
-+ inet_get_local_port_range(&low, &high);
-+#endif
- remaining = (high - low) + 1;
- rover = prandom_u32() % remaining + low;
- retry:
-@@ -3879,7 +3898,11 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
- id_priv->id.port_num, &rec,
- comp_mask, GFP_KERNEL,
- cma_ib_mc_handler, mc);
-+#ifdef HAVE_PTR_ERR_OR_ZERO
- return PTR_ERR_OR_ZERO(mc->multicast.ib);
-+#else
-+ return PTR_RET(mc->multicast.ib);
-+#endif
- }
-
- static void iboe_mcast_work_handler(struct work_struct *work)
-diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
-index xxxxxxx..xxxxxxx xxxxxx
---- a/drivers/infiniband/core/core_priv.h
-+++ b/drivers/infiniband/core/core_priv.h
-@@ -37,6 +37,7 @@
- #include <linux/spinlock.h>
-
- #include <rdma/ib_verbs.h>
-+#include <rdma/ib_addr.h>
-
- #if IS_ENABLED(CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS)
- int cma_configfs_init(void);
-@@ -127,6 +128,7 @@ void ib_cache_release_one(struct ib_device *device);
- static inline bool rdma_is_upper_dev_rcu(struct net_device *dev,
- struct net_device *upper)
- {
-+#ifdef HAVE_NETDEV_FOR_EACH_ALL_UPPER_DEV_RCU
- struct net_device *_upper = NULL;
- struct list_head *iter;
-
-@@ -135,6 +137,21 @@ static inline bool rdma_is_upper_dev_rcu(struct net_device *dev,
- break;
-
- return _upper == upper;
-+#else
-+ struct net_device *rdev_upper = rdma_vlan_dev_real_dev(upper);
-+ struct net_device *master;
-+ bool ret;
-+
-+ master = netdev_master_upper_dev_get_rcu(dev);
-+ if (!upper || !dev)
-+ ret = false;
-+ else
-+ ret = (upper == master) ||
-+ (rdev_upper && (rdev_upper == master)) ||
-+ (rdev_upper == dev);
-+
-+ return ret;
-+#endif
- }
-
- int addr_init(void);
-diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
-index xxxxxxx..xxxxxxx xxxxxx
---- a/drivers/infiniband/core/cq.c
-+++ b/drivers/infiniband/core/cq.c
-@@ -74,6 +74,7 @@ static void ib_cq_completion_direct(struct ib_cq *cq, void *private)
- WARN_ONCE(1, "got unsolicited completion for CQ 0x%p\n", cq);
- }
-
-+#if defined(HAVE_IRQ_POLL_H) && IS_ENABLED(CONFIG_IRQ_POLL)
- static int ib_poll_handler(struct irq_poll *iop, int budget)
- {
- struct ib_cq *cq = container_of(iop, struct ib_cq, iop);
-@@ -93,6 +94,7 @@ static void ib_cq_completion_softirq(struct ib_cq *cq, void *private)
- {
- irq_poll_sched(&cq->iop);
- }
-+#endif
-
- static void ib_cq_poll_work(struct work_struct *work)
- {
-@@ -152,12 +154,14 @@ struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
- case IB_POLL_DIRECT:
- cq->comp_handler = ib_cq_completion_direct;
- break;
-+#if defined(HAVE_IRQ_POLL_H) && IS_ENABLED(CONFIG_IRQ_POLL)
- case IB_POLL_SOFTIRQ:
- cq->comp_handler = ib_cq_completion_softirq;
-
- irq_poll_init(&cq->iop, IB_POLL_BUDGET_IRQ, ib_poll_handler);
- ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
- break;
-+#endif
- case IB_POLL_WORKQUEUE:
- cq->comp_handler = ib_cq_completion_workqueue;
- INIT_WORK(&cq->work, ib_cq_poll_work);
-@@ -192,9 +196,11 @@ void ib_free_cq(struct ib_cq *cq)
- switch (cq->poll_ctx) {
- case IB_POLL_DIRECT:
- break;
-+#if defined(HAVE_IRQ_POLL_H) && IS_ENABLED(CONFIG_IRQ_POLL)
- case IB_POLL_SOFTIRQ:
- irq_poll_disable(&cq->iop);
- break;
-+#endif
- case IB_POLL_WORKQUEUE:
- flush_work(&cq->work);
- break;
-diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
-index xxxxxxx..xxxxxxx xxxxxx
---- a/drivers/infiniband/core/mad.c
-+++ b/drivers/infiniband/core/mad.c
-@@ -35,6 +35,9 @@
- *
- */
-
-+#ifdef pr_fmt
-+#undef pr_fmt
-+#endif
- #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
- #include <linux/dma-mapping.h>
-diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
-index xxxxxxx..xxxxxxx xxxxxx
---- a/drivers/infiniband/core/netlink.c
-+++ b/drivers/infiniband/core/netlink.c
-@@ -30,6 +30,9 @@
- * SOFTWARE.
- */
-
-+#ifdef pr_fmt
-+#undef pr_fmt
-+#endif
- #define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__
-
- #include <linux/export.h>
-diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
-index xxxxxxx..xxxxxxx xxxxxx
---- a/drivers/infiniband/core/roce_gid_mgmt.c
-+++ b/drivers/infiniband/core/roce_gid_mgmt.c
-@@ -189,6 +189,33 @@ static int pass_all_filter(struct ib_device *ib_dev, u8 port,
- return 1;
- }
-
-+#ifndef HAVE_NETDEV_CHANGEUPPER
-+#define IS_NETDEV_BONDING_MASTER(ndev) \
-+ (((ndev)->priv_flags & IFF_BONDING) && \
-+ ((ndev)->flags & IFF_MASTER))
-+
-+static int bonding_slaves_filter(struct ib_device *ib_dev, u8 port,
-+ struct net_device *idev, void *cookie)
-+{
-+ struct net_device *rdev;
-+ struct net_device *ndev = (struct net_device *)cookie;
-+ int res;
-+
-+ rdev = rdma_vlan_dev_real_dev(ndev);
-+
-+ ndev = rdev ? rdev : ndev;
-+ if (!idev || !IS_NETDEV_BONDING_MASTER(ndev))
-+ return 0;
-+
-+ rcu_read_lock();
-+ res = rdma_is_upper_dev_rcu(idev, ndev);
-+ rcu_read_unlock();
-+
-+ return res;
-+}
-+#endif
-+
-+#ifdef HAVE_NETDEV_CHANGEUPPER
- static int upper_device_filter(struct ib_device *ib_dev, u8 port,
- struct net_device *rdma_ndev, void *cookie)
- {
-@@ -207,6 +234,7 @@ static int upper_device_filter(struct ib_device *ib_dev, u8 port,
-
- return res;
- }
-+#endif
-
- static void update_gid_ip(enum gid_op_type gid_op,
- struct ib_device *ib_dev,
-@@ -437,6 +465,7 @@ static void callback_for_addr_gid_device_scan(struct ib_device *device,
- &parsed->gid_attr);
- }
-
-+#ifdef HAVE_NETDEV_CHANGEUPPER
- static void handle_netdev_upper(struct ib_device *ib_dev, u8 port,
- void *cookie,
- void (*handle_netdev)(struct ib_device *ib_dev,
-@@ -497,6 +526,7 @@ static void add_netdev_upper_ips(struct ib_device *ib_dev, u8 port,
- {
- handle_netdev_upper(ib_dev, port, cookie, _add_netdev_ips);
- }
-+#endif
-
- static void del_netdev_default_ips_join(struct ib_device *ib_dev, u8 port,
- struct net_device *rdma_ndev,
-@@ -578,6 +608,7 @@ static int netdevice_queue_work(struct netdev_event_work_cmd *cmds,
-
- static const struct netdev_event_work_cmd add_cmd = {
- .cb = add_netdev_ips, .filter = is_eth_port_of_netdev};
-+#ifdef HAVE_NETDEV_CHANGEUPPER
- static const struct netdev_event_work_cmd add_cmd_upper_ips = {
- .cb = add_netdev_upper_ips, .filter = is_eth_port_of_netdev};
-
-@@ -601,10 +632,15 @@ static void netdevice_event_changeupper(struct netdev_notifier_changeupper_info
- cmds[1].filter_ndev = changeupper_info->upper_dev;
- }
- }
-+#endif
-
- static int netdevice_event(struct notifier_block *this, unsigned long event,
- void *ptr)
- {
-+#ifndef HAVE_NETDEV_CHANGEUPPER
-+ static const struct netdev_event_work_cmd add_cmd = {
-+ .cb = add_netdev_ips, .filter = is_eth_port_of_netdev};
-+#endif
- static const struct netdev_event_work_cmd del_cmd = {
- .cb = del_netdev_ips, .filter = pass_all_filter};
- static const struct netdev_event_work_cmd bonding_default_del_cmd_join = {
-@@ -612,7 +648,11 @@ static int netdevice_event(struct notifier_block *this, unsigned long event,
- static const struct netdev_event_work_cmd default_del_cmd = {
- .cb = del_netdev_default_ips, .filter = pass_all_filter};
- static const struct netdev_event_work_cmd bonding_event_ips_del_cmd = {
-+#ifdef HAVE_NETDEV_CHANGEUPPER
- .cb = del_netdev_upper_ips, .filter = upper_device_filter};
-+#else
-+ .cb = del_netdev_ips, .filter = bonding_slaves_filter};
-+#endif
- struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
- struct netdev_event_work_cmd cmds[ROCE_NETDEV_CALLBACK_SZ] = { {NULL} };
-
-@@ -638,16 +678,22 @@ static int netdevice_event(struct notifier_block *this, unsigned long event,
- cmds[1] = add_cmd;
- break;
-
-+#ifdef HAVE_NETDEV_CHANGEUPPER
- case NETDEV_CHANGEUPPER:
- netdevice_event_changeupper(
- container_of(ptr, struct netdev_notifier_changeupper_info, info),
- cmds);
- break;
-+#endif
-
- case NETDEV_BONDING_FAILOVER:
- cmds[0] = bonding_event_ips_del_cmd;
- cmds[1] = bonding_default_del_cmd_join;
-+#ifdef HAVE_NETDEV_CHANGEUPPER
- cmds[2] = add_cmd_upper_ips;
-+#else
-+ cmds[2] = add_cmd;
-+#endif
- break;
-
- default:
-diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
-index xxxxxxx..xxxxxxx xxxxxx
---- a/drivers/infiniband/core/sa_query.c
-+++ b/drivers/infiniband/core/sa_query.c
-@@ -1238,10 +1238,17 @@ static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent)
-
- static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
- {
-+#ifdef HAVE_IDR_ALLOC
-+#ifdef __GFP_WAIT
-+ bool preload = !!(gfp_mask & __GFP_WAIT);
-+#else
- bool preload = gfpflags_allow_blocking(gfp_mask);
-+#endif
-+#endif
- unsigned long flags;
- int ret, id;
-
-+#ifdef HAVE_IDR_ALLOC
- if (preload)
- idr_preload(gfp_mask);
- spin_lock_irqsave(&idr_lock, flags);
-@@ -1253,6 +1260,18 @@ static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
- idr_preload_end();
- if (id < 0)
- return id;
-+#else
-+retry:
-+ if (!idr_pre_get(&query_idr, gfp_mask))
-+ return -ENOMEM;
-+ spin_lock_irqsave(&idr_lock, flags);
-+ ret = idr_get_new(&query_idr, query, &id);
-+ spin_unlock_irqrestore(&idr_lock, flags);
-+ if (ret == -EAGAIN)
-+ goto retry;
-+ if (ret)
-+ return ret;
-+#endif
-
- query->mad_buf->timeout_ms = timeout_ms;
- query->mad_buf->context[0] = query;
-diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
-index xxxxxxx..xxxxxxx xxxxxx
---- a/drivers/infiniband/core/umem.c
-+++ b/drivers/infiniband/core/umem.c
-@@ -37,6 +37,9 @@
- #include <linux/sched.h>
- #include <linux/export.h>
- #include <linux/hugetlb.h>
-+#ifdef HAVE_STRUCT_DMA_ATTRS
-+#include <linux/dma-attrs.h>
-+#endif
- #include <linux/slab.h>
- #include <rdma/ib_umem_odp.h>
-
-@@ -91,12 +94,20 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
- unsigned long npages;
- int ret;
- int i;
-+#ifdef HAVE_STRUCT_DMA_ATTRS
-+ DEFINE_DMA_ATTRS(dma_attrs);
-+#else
- unsigned long dma_attrs = 0;
-+#endif
- struct scatterlist *sg, *sg_list_start;
- int need_release = 0;
-
- if (dmasync)
-+#ifdef HAVE_STRUCT_DMA_ATTRS
-+ dma_set_attr(DMA_ATTR_WRITE_BARRIER, &dma_attrs);
-+#else
- dma_attrs |= DMA_ATTR_WRITE_BARRIER;
-+#endif
-
- if (!size)
- return ERR_PTR(-EINVAL);
-@@ -187,7 +198,11 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
- sg_list_start = umem->sg_head.sgl;
-
- while (npages) {
-+#ifdef HAVE_GET_USER_PAGES_6_PARAMS
- ret = get_user_pages(cur_base,
-+#else
-+ ret = get_user_pages(current, current->mm, cur_base,
-+#endif
- min_t(unsigned long, npages,
- PAGE_SIZE / sizeof (struct page *)),
- 1, !umem->writable, page_list, vma_list);
-@@ -214,7 +229,11 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
- umem->sg_head.sgl,
- umem->npages,
- DMA_BIDIRECTIONAL,
-+#ifdef HAVE_STRUCT_DMA_ATTRS
-+ &dma_attrs);
-+#else
- dma_attrs);
-+#endif
-
- if (umem->nmap <= 0) {
- ret = -ENOMEM;
-diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
-index xxxxxxx..xxxxxxx xxxxxx
---- a/drivers/infiniband/core/user_mad.c
-+++ b/drivers/infiniband/core/user_mad.c
-@@ -33,6 +33,9 @@
- * SOFTWARE.
- */
-
-+#ifdef pr_fmt
-+#undef pr_fmt
-+#endif
- #define pr_fmt(fmt) "user_mad: " fmt
-
- #include <linux/module.h>
-diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
-index xxxxxxx..xxxxxxx xxxxxx
---- a/include/rdma/ib_addr.h
-+++ b/include/rdma/ib_addr.h
-@@ -262,15 +262,25 @@ static inline enum ib_mtu iboe_get_mtu(int mtu)
-
- static inline int iboe_get_rate(struct net_device *dev)
- {
-+#ifdef HAVE___ETHTOOL_GET_LINK_KSETTINGS
- struct ethtool_link_ksettings cmd;
-+#else
-+ struct ethtool_cmd cmd;
-+ u32 speed;
-+#endif
- int err;
-
- rtnl_lock();
-+#ifdef HAVE___ETHTOOL_GET_LINK_KSETTINGS
- err = __ethtool_get_link_ksettings(dev, &cmd);
-+#else
-+ err = __ethtool_get_settings(dev, &cmd);
-+#endif
- rtnl_unlock();
- if (err)
- return IB_RATE_PORT_CURRENT;
-
-+#ifdef HAVE___ETHTOOL_GET_LINK_KSETTINGS
- if (cmd.base.speed >= 40000)
- return IB_RATE_40_GBPS;
- else if (cmd.base.speed >= 30000)
-@@ -281,6 +291,19 @@ static inline int iboe_get_rate(struct net_device *dev)
- return IB_RATE_10_GBPS;
- else
- return IB_RATE_PORT_CURRENT;
-+#else
-+ speed = ethtool_cmd_speed(&cmd);
-+ if (speed >= 40000)
-+ return IB_RATE_40_GBPS;
-+ else if (speed >= 30000)
-+ return IB_RATE_30_GBPS;
-+ else if (speed >= 20000)
-+ return IB_RATE_20_GBPS;
-+ else if (speed >= 10000)
-+ return IB_RATE_10_GBPS;
-+ else
-+ return IB_RATE_PORT_CURRENT;
-+#endif
- }
-
- static inline int rdma_link_local_addr(struct in6_addr *addr)
-diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
-index xxxxxxx..xxxxxxx xxxxxx
---- a/include/rdma/ib_verbs.h
-+++ b/include/rdma/ib_verbs.h
-@@ -1410,7 +1410,9 @@ struct ib_cq {
- enum ib_poll_context poll_ctx;
- struct ib_wc *wc;
- union {
-+#if defined(HAVE_IRQ_POLL_H) && IS_ENABLED(CONFIG_IRQ_POLL)
- struct irq_poll iop;
-+#endif
- struct work_struct work;
- };
- };
-@@ -2911,7 +2913,11 @@ static inline void ib_dma_unmap_single(struct ib_device *dev,
- static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
- void *cpu_addr, size_t size,
- enum dma_data_direction direction,
-+#ifdef HAVE_STRUCT_DMA_ATTRS
-+ struct dma_attrs *dma_attrs)
-+#else
- unsigned long dma_attrs)
-+#endif
- {
- return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
- direction, dma_attrs);
-@@ -2920,7 +2926,11 @@ static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
- static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
- u64 addr, size_t size,
- enum dma_data_direction direction,
-+#ifdef HAVE_STRUCT_DMA_ATTRS
-+ struct dma_attrs *dma_attrs)
-+#else
- unsigned long dma_attrs)
-+#endif
- {
- return dma_unmap_single_attrs(dev->dma_device, addr, size,
- direction, dma_attrs);
-@@ -2998,7 +3008,11 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev,
- static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction direction,
-+#ifdef HAVE_STRUCT_DMA_ATTRS
-+ struct dma_attrs *dma_attrs)
-+#else
- unsigned long dma_attrs)
-+#endif
- {
- return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
- dma_attrs);
-@@ -3007,7 +3021,11 @@ static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
- static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction direction,
-+#ifdef HAVE_STRUCT_DMA_ATTRS
-+ struct dma_attrs *dma_attrs)
-+#else
- unsigned long dma_attrs)
-+#endif
- {
- dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
- }
--- /dev/null
+From: Vladimir Sokolovsky <vlad@mellanox.com>
+Subject: [PATCH] BACKPORT: mlx4
+
+Signed-off-by: Vladimir Sokolovsky <vlad@mellanox.com>
+---
+ drivers/infiniband/hw/mlx4/main.c | 15 +
+ drivers/net/ethernet/mellanox/mlx4/en_cq.c | 5 +
+ drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c | 19 ++
+ drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | 365 +++++++++++++++++++++++-
+ drivers/net/ethernet/mellanox/mlx4/en_main.c | 4 +
+ drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 169 +++++++++++
+ drivers/net/ethernet/mellanox/mlx4/en_rx.c | 43 +++
+ drivers/net/ethernet/mellanox/mlx4/en_tx.c | 46 +++
+ drivers/net/ethernet/mellanox/mlx4/intf.c | 4 +
+ drivers/net/ethernet/mellanox/mlx4/main.c | 43 +++
+ drivers/net/ethernet/mellanox/mlx4/mlx4.h | 4 +
+ drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 30 ++
+ drivers/net/ethernet/mellanox/mlx4/pd.c | 4 +
+ 13 files changed, 747 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/infiniband/hw/mlx4/main.c
++++ b/drivers/infiniband/hw/mlx4/main.c
+@@ -41,7 +41,9 @@
+ #include <linux/if_vlan.h>
+ #include <net/ipv6.h>
+ #include <net/addrconf.h>
++#ifdef HAVE_NET_DEVLINK_H
+ #include <net/devlink.h>
++#endif
+
+ #include <rdma/ib_smi.h>
+ #include <rdma/ib_user_verbs.h>
+@@ -57,8 +59,17 @@
+ #include "mlx4_ib.h"
+ #include "user.h"
+
++#ifdef DRV_NAME
++#undef DRV_NAME
++#endif
+ #define DRV_NAME MLX4_IB_DRV_NAME
++#ifdef DRV_VERSION
++#undef DRV_VERSION
++#endif
+ #define DRV_VERSION "2.2-1"
++#ifdef DRV_RELDATE
++#undef DRV_RELDATE
++#endif
+ #define DRV_RELDATE "Feb 2014"
+
+ #define MLX4_IB_FLOW_MAX_PRIO 0xFFF
+@@ -2781,9 +2792,11 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
+ }
+
+ ibdev->ib_active = true;
++#ifdef HAVE_NET_DEVLINK_H
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
+ devlink_port_type_ib_set(mlx4_get_devlink_port(dev, i),
+ &ibdev->ib_dev);
++#endif
+
+ if (mlx4_is_mfunc(ibdev->dev))
+ init_pkeys(ibdev);
+@@ -2911,10 +2924,12 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
+ {
+ struct mlx4_ib_dev *ibdev = ibdev_ptr;
+ int p;
++#ifdef HAVE_NET_DEVLINK_H
+ int i;
+
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
+ devlink_port_type_clear(mlx4_get_devlink_port(dev, i));
++#endif
+ ibdev->ib_active = false;
+ flush_workqueue(wq);
+
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+@@ -150,8 +150,13 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
+ cq->mcq.event = mlx4_en_cq_event;
+
+ if (cq->is_tx)
++#ifdef HAVE_NETIF_TX_NAPI_ADD
+ netif_tx_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
+ NAPI_POLL_WEIGHT);
++#else
++ netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
++ NAPI_POLL_WEIGHT);
++#endif
+ else
+ netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
+
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+@@ -138,7 +138,11 @@ static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
+ priv->cee_params.dcb_cfg.pfc_state = true;
+ }
+
++#ifdef NDO_GETNUMTCS_RETURNS_INT
+ static int mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
++#else
++static u8 mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
++#endif
+ {
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+
+@@ -245,7 +249,11 @@ static u8 mlx4_en_dcbnl_set_state(struct net_device *dev, u8 state)
+ * otherwise returns 0 as the invalid user priority bitmap to
+ * indicate an error.
+ */
++#ifdef NDO_GETAPP_RETURNS_INT
+ static int mlx4_en_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
++#else
++static u8 mlx4_en_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
++#endif
+ {
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+ struct dcb_app app = {
+@@ -258,8 +266,13 @@ static int mlx4_en_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
+ return dcb_getapp(netdev, &app);
+ }
+
++#ifdef NDO_SETAPP_RETURNS_INT
+ static int mlx4_en_dcbnl_setapp(struct net_device *netdev, u8 idtype,
+ u16 id, u8 up)
++#else
++static u8 mlx4_en_dcbnl_setapp(struct net_device *netdev, u8 idtype,
++ u16 id, u8 up)
++#endif
+ {
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+ struct dcb_app app;
+@@ -520,6 +533,7 @@ static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device *dev,
+ #define RPG_ENABLE_BIT 31
+ #define CN_TAG_BIT 30
+
++#ifdef HAVE_IEEE_GETQCN
+ static int mlx4_en_dcbnl_ieee_getqcn(struct net_device *dev,
+ struct ieee_qcn *qcn)
+ {
+@@ -688,15 +702,20 @@ static int mlx4_en_dcbnl_ieee_getqcnstats(struct net_device *dev,
+ mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
+ return 0;
+ }
++#endif
+
+ const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = {
+ .ieee_getets = mlx4_en_dcbnl_ieee_getets,
+ .ieee_setets = mlx4_en_dcbnl_ieee_setets,
++#ifdef HAVE_IEEE_GET_SET_MAXRATE
+ .ieee_getmaxrate = mlx4_en_dcbnl_ieee_getmaxrate,
+ .ieee_setmaxrate = mlx4_en_dcbnl_ieee_setmaxrate,
++#endif
++#ifdef HAVE_IEEE_GETQCN
+ .ieee_getqcn = mlx4_en_dcbnl_ieee_getqcn,
+ .ieee_setqcn = mlx4_en_dcbnl_ieee_setqcn,
+ .ieee_getqcnstats = mlx4_en_dcbnl_ieee_getqcnstats,
++#endif
+ .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc,
+ .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc,
+
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+@@ -30,7 +30,6 @@
+ * SOFTWARE.
+ *
+ */
+-
+ #include <linux/kernel.h>
+ #include <linux/ethtool.h>
+ #include <linux/netdevice.h>
+@@ -504,30 +503,54 @@ static u32 mlx4_en_autoneg_get(struct net_device *dev)
+ return autoneg;
+ }
+
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ static void ptys2ethtool_update_supported_port(unsigned long *mask,
+ struct mlx4_ptys_reg *ptys_reg)
++#else
++static u32 ptys_get_supported_port(struct mlx4_ptys_reg *ptys_reg)
++#endif
+ {
+ u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap);
+
+ if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T)
+ | MLX4_PROT_MASK(MLX4_1000BASE_T)
+ | MLX4_PROT_MASK(MLX4_100BASE_TX))) {
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ __set_bit(ETHTOOL_LINK_MODE_TP_BIT, mask);
+ } else if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR)
++#else
++ return SUPPORTED_TP;
++ }
++
++ if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR)
++#endif
+ | MLX4_PROT_MASK(MLX4_10GBASE_SR)
+ | MLX4_PROT_MASK(MLX4_56GBASE_SR4)
+ | MLX4_PROT_MASK(MLX4_40GBASE_CR4)
+ | MLX4_PROT_MASK(MLX4_40GBASE_SR4)
+ | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) {
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ __set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mask);
+ } else if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4)
++#else
++ return SUPPORTED_FIBRE;
++ }
++
++ if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4)
++#endif
+ | MLX4_PROT_MASK(MLX4_40GBASE_KR4)
+ | MLX4_PROT_MASK(MLX4_20GBASE_KR2)
+ | MLX4_PROT_MASK(MLX4_10GBASE_KR)
+ | MLX4_PROT_MASK(MLX4_10GBASE_KX4)
+ | MLX4_PROT_MASK(MLX4_1000BASE_KX))) {
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ __set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mask);
+ }
++#else
++ return SUPPORTED_Backplane;
++ }
++ return 0;
++#endif
+ }
+
+ static u32 ptys_get_active_port(struct mlx4_ptys_reg *ptys_reg)
+@@ -573,8 +596,12 @@ static u32 ptys_get_active_port(struct mlx4_ptys_reg *ptys_reg)
+ enum ethtool_report {
+ SUPPORTED = 0,
+ ADVERTISED = 1,
++#ifndef HAVE_ETHTOOL_xLINKSETTINGS
++ SPEED = 2
++#endif
+ };
+
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ struct ptys2ethtool_config {
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised);
+@@ -651,7 +678,102 @@ void __init mlx4_en_init_ptys2ethtool_map(void)
+ MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_56GBASE_SR4, SPEED_56000,
+ ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT);
+ };
++#else
++static u32 ptys2ethtool_map[MLX4_LINK_MODES_SZ][3] = {
++ [MLX4_100BASE_TX] = {
++ SUPPORTED_100baseT_Full,
++ ADVERTISED_100baseT_Full,
++ SPEED_100
++ },
++
++ [MLX4_1000BASE_T] = {
++ SUPPORTED_1000baseT_Full,
++ ADVERTISED_1000baseT_Full,
++ SPEED_1000
++ },
++ [MLX4_1000BASE_CX_SGMII] = {
++ SUPPORTED_1000baseKX_Full,
++ ADVERTISED_1000baseKX_Full,
++ SPEED_1000
++ },
++ [MLX4_1000BASE_KX] = {
++ SUPPORTED_1000baseKX_Full,
++ ADVERTISED_1000baseKX_Full,
++ SPEED_1000
++ },
++
++ [MLX4_10GBASE_T] = {
++ SUPPORTED_10000baseT_Full,
++ ADVERTISED_10000baseT_Full,
++ SPEED_10000
++ },
++ [MLX4_10GBASE_CX4] = {
++ SUPPORTED_10000baseKX4_Full,
++ ADVERTISED_10000baseKX4_Full,
++ SPEED_10000
++ },
++ [MLX4_10GBASE_KX4] = {
++ SUPPORTED_10000baseKX4_Full,
++ ADVERTISED_10000baseKX4_Full,
++ SPEED_10000
++ },
++ [MLX4_10GBASE_KR] = {
++ SUPPORTED_10000baseKR_Full,
++ ADVERTISED_10000baseKR_Full,
++ SPEED_10000
++ },
++ [MLX4_10GBASE_CR] = {
++ SUPPORTED_10000baseKR_Full,
++ ADVERTISED_10000baseKR_Full,
++ SPEED_10000
++ },
++ [MLX4_10GBASE_SR] = {
++ SUPPORTED_10000baseKR_Full,
++ ADVERTISED_10000baseKR_Full,
++ SPEED_10000
++ },
++
++ [MLX4_20GBASE_KR2] = {
++ SUPPORTED_20000baseMLD2_Full | SUPPORTED_20000baseKR2_Full,
++ ADVERTISED_20000baseMLD2_Full | ADVERTISED_20000baseKR2_Full,
++ SPEED_20000
++ },
++
++ [MLX4_40GBASE_CR4] = {
++ SUPPORTED_40000baseCR4_Full,
++ ADVERTISED_40000baseCR4_Full,
++ SPEED_40000
++ },
++ [MLX4_40GBASE_KR4] = {
++ SUPPORTED_40000baseKR4_Full,
++ ADVERTISED_40000baseKR4_Full,
++ SPEED_40000
++ },
++ [MLX4_40GBASE_SR4] = {
++ SUPPORTED_40000baseSR4_Full,
++ ADVERTISED_40000baseSR4_Full,
++ SPEED_40000
++ },
++
++ [MLX4_56GBASE_KR4] = {
++ SUPPORTED_56000baseKR4_Full,
++ ADVERTISED_56000baseKR4_Full,
++ SPEED_56000
++ },
++ [MLX4_56GBASE_CR4] = {
++ SUPPORTED_56000baseCR4_Full,
++ ADVERTISED_56000baseCR4_Full,
++ SPEED_56000
++ },
++ [MLX4_56GBASE_SR4] = {
++ SUPPORTED_56000baseSR4_Full,
++ ADVERTISED_56000baseSR4_Full,
++ SPEED_56000
++ },
++};
++#endif
+
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ static void ptys2ethtool_update_link_modes(unsigned long *link_modes,
+ u32 eth_proto,
+ enum ethtool_report report)
+@@ -665,19 +787,40 @@ static void ptys2ethtool_update_link_modes(unsigned long *link_modes,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ }
+ }
++#else
++static u32 ptys2ethtool_link_modes(u32 eth_proto, enum ethtool_report report)
++{
++ int i;
++ u32 link_modes = 0;
++
++ for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
++ if (eth_proto & MLX4_PROT_MASK(i))
++ link_modes |= ptys2ethtool_map[i][report];
++ }
++ return link_modes;
++}
++#endif
+
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ static u32 ethtool2ptys_link_modes(const unsigned long *link_modes,
+ enum ethtool_report report)
++#else
++static u32 ethtool2ptys_link_modes(u32 link_modes, enum ethtool_report report)
++#endif
+ {
+ int i;
+ u32 ptys_modes = 0;
+
+ for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ if (bitmap_intersects(
+ ptys2ethtool_link_mode(&ptys2ethtool_map[i],
+ report),
+ link_modes,
+ __ETHTOOL_LINK_MODE_MASK_NBITS))
++#else
++ if (ptys2ethtool_map[i][report] & link_modes)
++#endif
+ ptys_modes |= 1 << i;
+ }
+ return ptys_modes;
+@@ -690,15 +833,24 @@ static u32 speed2ptys_link_modes(u32 speed)
+ u32 ptys_modes = 0;
+
+ for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ if (ptys2ethtool_map[i].speed == speed)
++#else
++ if (ptys2ethtool_map[i][SPEED] == speed)
++#endif
+ ptys_modes |= 1 << i;
+ }
+ return ptys_modes;
+ }
+
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ static int
+ ethtool_get_ptys_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *link_ksettings)
++#else
++static int ethtool_get_ptys_settings(struct net_device *dev,
++ struct ethtool_cmd *cmd)
++#endif
+ {
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_ptys_reg ptys_reg;
+@@ -726,6 +878,7 @@ ethtool_get_ptys_link_ksettings(struct net_device *dev,
+ en_dbg(DRV, priv, "ptys_reg.eth_proto_lp_adv %x\n",
+ be32_to_cpu(ptys_reg.eth_proto_lp_adv));
+
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ /* reset supported/advertising masks */
+ ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
+ ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
+@@ -780,10 +933,54 @@ ethtool_get_ptys_link_ksettings(struct net_device *dev,
+ link_ksettings->base.mdio_support = 0;
+ link_ksettings->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
+ link_ksettings->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
++#else
++ cmd->supported = 0;
++ cmd->advertising = 0;
++
++ cmd->supported |= ptys_get_supported_port(&ptys_reg);
++
++ eth_proto = be32_to_cpu(ptys_reg.eth_proto_cap);
++ cmd->supported |= ptys2ethtool_link_modes(eth_proto, SUPPORTED);
++
++ eth_proto = be32_to_cpu(ptys_reg.eth_proto_admin);
++ cmd->advertising |= ptys2ethtool_link_modes(eth_proto, ADVERTISED);
++
++ cmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
++ cmd->advertising |= (priv->prof->tx_pause) ? ADVERTISED_Pause : 0;
++
++ cmd->advertising |= (priv->prof->tx_pause ^ priv->prof->rx_pause) ?
++ ADVERTISED_Asym_Pause : 0;
++
++ cmd->port = ptys_get_active_port(&ptys_reg);
++ cmd->transceiver = (SUPPORTED_TP & cmd->supported) ?
++ XCVR_EXTERNAL : XCVR_INTERNAL;
++
++ if (mlx4_en_autoneg_get(dev)) {
++ cmd->supported |= SUPPORTED_Autoneg;
++ cmd->advertising |= ADVERTISED_Autoneg;
++ }
++
++ cmd->autoneg = (priv->port_state.flags & MLX4_EN_PORT_ANC) ?
++ AUTONEG_ENABLE : AUTONEG_DISABLE;
++
++ eth_proto = be32_to_cpu(ptys_reg.eth_proto_lp_adv);
++ cmd->lp_advertising = ptys2ethtool_link_modes(eth_proto, ADVERTISED);
++
++ cmd->lp_advertising |= (priv->port_state.flags & MLX4_EN_PORT_ANC) ?
++ ADVERTISED_Autoneg : 0;
++
++ cmd->phy_address = 0;
++ cmd->mdio_support = 0;
++ cmd->maxtxpkt = 0;
++ cmd->maxrxpkt = 0;
++ cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
++ cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
++#endif
+
+ return ret;
+ }
+
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ static void
+ ethtool_get_default_link_ksettings(
+ struct net_device *dev, struct ethtool_link_ksettings *link_ksettings)
+@@ -818,10 +1015,42 @@ ethtool_get_default_link_ksettings(
+ link_ksettings->base.port = -1;
+ }
+ }
++#else
++static void ethtool_get_default_settings(struct net_device *dev,
++ struct ethtool_cmd *cmd)
++{
++ struct mlx4_en_priv *priv = netdev_priv(dev);
++ int trans_type;
++
++ cmd->autoneg = AUTONEG_DISABLE;
++ cmd->supported = SUPPORTED_10000baseT_Full;
++ cmd->advertising = ADVERTISED_10000baseT_Full;
++ trans_type = priv->port_state.transceiver;
++
++ if (trans_type > 0 && trans_type <= 0xC) {
++ cmd->port = PORT_FIBRE;
++ cmd->transceiver = XCVR_EXTERNAL;
++ cmd->supported |= SUPPORTED_FIBRE;
++ cmd->advertising |= ADVERTISED_FIBRE;
++ } else if (trans_type == 0x80 || trans_type == 0) {
++ cmd->port = PORT_TP;
++ cmd->transceiver = XCVR_INTERNAL;
++ cmd->supported |= SUPPORTED_TP;
++ cmd->advertising |= ADVERTISED_TP;
++ } else {
++ cmd->port = -1;
++ cmd->transceiver = -1;
++ }
++}
++#endif
+
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ static int
+ mlx4_en_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *link_ksettings)
++#else
++static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
++#endif
+ {
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ int ret = -EINVAL;
+@@ -833,6 +1062,7 @@ mlx4_en_get_link_ksettings(struct net_device *dev,
+ priv->port_state.flags & MLX4_EN_PORT_ANC,
+ priv->port_state.flags & MLX4_EN_PORT_ANE);
+
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL)
+ ret = ethtool_get_ptys_link_ksettings(dev, link_ksettings);
+ if (ret) /* ETH PROT CRTL is not supported or PTYS CMD failed */
+@@ -845,6 +1075,20 @@ mlx4_en_get_link_ksettings(struct net_device *dev,
+ link_ksettings->base.speed = SPEED_UNKNOWN;
+ link_ksettings->base.duplex = DUPLEX_UNKNOWN;
+ }
++#else
++ if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL)
++ ret = ethtool_get_ptys_settings(dev, cmd);
++ if (ret) /* ETH PROT CRTL is not supported or PTYS CMD failed */
++ ethtool_get_default_settings(dev, cmd);
++
++ if (netif_carrier_ok(dev)) {
++ ethtool_cmd_speed_set(cmd, priv->port_state.link_speed);
++ cmd->duplex = DUPLEX_FULL;
++ } else {
++ ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
++ cmd->duplex = DUPLEX_UNKNOWN;
++ }
++#endif
+ return 0;
+ }
+
+@@ -867,15 +1111,20 @@ static __be32 speed_set_ptys_admin(struct mlx4_en_priv *priv, u32 speed,
+ return proto_admin;
+ }
+
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ static int
+ mlx4_en_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *link_ksettings)
++#else
++static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
++#endif
+ {
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_ptys_reg ptys_reg;
+ __be32 proto_admin;
+ int ret;
+
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ u32 ptys_adv = ethtool2ptys_link_modes(
+ link_ksettings->link_modes.advertising, ADVERTISED);
+ const int speed = link_ksettings->base.speed;
+@@ -891,6 +1140,17 @@ mlx4_en_set_link_ksettings(struct net_device *dev,
+ MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL) ||
+ (link_ksettings->base.duplex == DUPLEX_HALF))
+ return -EINVAL;
++#else
++ u32 ptys_adv = ethtool2ptys_link_modes(cmd->advertising, ADVERTISED);
++ int speed = ethtool_cmd_speed(cmd);
++
++ en_dbg(DRV, priv, "Set Speed=%d adv=0x%x autoneg=%d duplex=%d\n",
++ speed, cmd->advertising, cmd->autoneg, cmd->duplex);
++
++ if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL) ||
++ (cmd->duplex == DUPLEX_HALF))
++ return -EINVAL;
++#endif
+
+ memset(&ptys_reg, 0, sizeof(ptys_reg));
+ ptys_reg.local_port = priv->port;
+@@ -903,7 +1163,11 @@ mlx4_en_set_link_ksettings(struct net_device *dev,
+ return 0;
+ }
+
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ proto_admin = link_ksettings->base.autoneg == AUTONEG_ENABLE ?
++#else
++ proto_admin = cmd->autoneg == AUTONEG_ENABLE ?
++#endif
+ cpu_to_be32(ptys_adv) :
+ speed_set_ptys_admin(priv, speed,
+ ptys_reg.eth_proto_cap);
+@@ -1042,8 +1306,10 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
+ {
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ struct mlx4_en_port_profile new_prof;
+ struct mlx4_en_priv *tmp;
++#endif
+ u32 rx_size, tx_size;
+ int port_up = 0;
+ int err = 0;
+@@ -1063,6 +1329,7 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
+ tx_size == priv->tx_ring[0]->size)
+ return 0;
+
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+@@ -1074,13 +1341,28 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
+ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+ if (err)
+ goto out;
+-
++#else
++ mutex_lock(&mdev->state_lock);
++#endif
+ if (priv->port_up) {
+ port_up = 1;
+ mlx4_en_stop_port(dev, 1);
+ }
+
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ mlx4_en_safe_replace_resources(priv, tmp);
++#else
++ mlx4_en_free_resources(priv);
++
++ priv->prof->tx_ring_size = tx_size;
++ priv->prof->rx_ring_size = rx_size;
++
++ err = mlx4_en_alloc_resources(priv);
++ if (err) {
++ en_err(priv, "Failed reallocating port resources\n");
++ goto out;
++ }
++#endif
+
+ if (port_up) {
+ err = mlx4_en_start_port(dev);
+@@ -1090,7 +1372,9 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
+
+ err = mlx4_en_moderation_update(priv);
+ out:
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ kfree(tmp);
++#endif
+ mutex_unlock(&mdev->state_lock);
+ return err;
+ }
+@@ -1112,7 +1396,11 @@ static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
+ {
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ return rounddown_pow_of_two(priv->rx_ring_num);
++#else
++ return priv->rx_ring_num;
++#endif
+ }
+
+ static u32 mlx4_en_get_rxfh_key_size(struct net_device *netdev)
+@@ -1146,6 +1434,7 @@ static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
+ u8 *hfunc)
+ {
+ struct mlx4_en_priv *priv = netdev_priv(dev);
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ u32 n = mlx4_en_get_rxfh_indir_size(dev);
+ u32 i, rss_rings;
+ int err = 0;
+@@ -1158,6 +1447,22 @@ static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
+ break;
+ ring_index[i] = i % rss_rings;
+ }
++#else
++ struct mlx4_en_rss_map *rss_map = &priv->rss_map;
++ int rss_rings;
++ size_t n = priv->rx_ring_num;
++ int err = 0;
++
++ rss_rings = priv->prof->rss_rings ?: priv->rx_ring_num;
++ rss_rings = 1 << ilog2(rss_rings);
++
++ while (n--) {
++ if (!ring_index)
++ break;
++ ring_index[n] = rss_map->qps[n % rss_rings].qpn -
++ rss_map->base_qpn;
++ }
++#endif
+ if (key)
+ memcpy(key, priv->rss_key, MLX4_EN_RSS_KEY_SIZE);
+ if (hfunc)
+@@ -1169,7 +1474,9 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
+ const u8 *key, const u8 hfunc)
+ {
+ struct mlx4_en_priv *priv = netdev_priv(dev);
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ u32 n = mlx4_en_get_rxfh_indir_size(dev);
++#endif
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int port_up = 0;
+ int err = 0;
+@@ -1179,6 +1486,7 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
+ /* Calculate RSS table size and make sure flows are spread evenly
+ * between rings
+ */
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ for (i = 0; i < n; i++) {
+ if (!ring_index)
+ break;
+@@ -1191,6 +1499,20 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
+
+ if (!rss_rings)
+ rss_rings = n;
++#else
++ for (i = 0; i < priv->rx_ring_num; i++) {
++ if (!ring_index)
++ continue;
++ if (i > 0 && !ring_index[i] && !rss_rings)
++ rss_rings = i;
++
++ if (ring_index[i] != (i % (rss_rings ?: priv->rx_ring_num)))
++ return -EINVAL;
++ }
++
++ if (!rss_rings)
++ rss_rings = priv->rx_ring_num;
++#endif
+
+ /* RSS table size must be an order of 2 */
+ if (!is_power_of_2(rss_rings))
+@@ -1718,8 +2040,10 @@ static int mlx4_en_set_channels(struct net_device *dev,
+ {
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ struct mlx4_en_port_profile new_prof;
+ struct mlx4_en_priv *tmp;
++#endif
+ int port_up = 0;
+ int err = 0;
+
+@@ -1729,17 +2053,22 @@ static int mlx4_en_set_channels(struct net_device *dev,
+ !channel->tx_count || !channel->rx_count)
+ return -EINVAL;
+
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
++#ifdef HAVE_LINUX_BPF_H
+ if (channel->tx_count * MLX4_EN_NUM_UP <= priv->xdp_ring_num) {
+ en_err(priv, "Minimum %d tx channels required with XDP on\n",
+ priv->xdp_ring_num / MLX4_EN_NUM_UP + 1);
+ return -EINVAL;
+ }
++#endif
+
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
++#endif
+
+ mutex_lock(&mdev->state_lock);
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
+ new_prof.num_tx_rings_p_up = channel->tx_count;
+ new_prof.tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP;
+@@ -1748,16 +2077,35 @@ static int mlx4_en_set_channels(struct net_device *dev,
+ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+ if (err)
+ goto out;
++#endif
+
+ if (priv->port_up) {
+ port_up = 1;
+ mlx4_en_stop_port(dev, 1);
+ }
+
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ mlx4_en_safe_replace_resources(priv, tmp);
++#else
++ mlx4_en_free_resources(priv);
++
++ priv->num_tx_rings_p_up = channel->tx_count;
++ priv->tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP;
++ priv->rx_ring_num = channel->rx_count;
++
++ err = mlx4_en_alloc_resources(priv);
++ if (err) {
++ en_err(priv, "Failed reallocating port resources\n");
++ goto out;
++ }
++#endif
+
++#ifdef HAVE_LINUX_BPF_H
+ netif_set_real_num_tx_queues(dev, priv->tx_ring_num -
+ priv->xdp_ring_num);
++#else
++ netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
++#endif
+ netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
+
+ if (dev->num_tc)
+@@ -1774,7 +2122,9 @@ static int mlx4_en_set_channels(struct net_device *dev,
+
+ err = mlx4_en_moderation_update(priv);
+ out:
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ kfree(tmp);
++#endif
+ mutex_unlock(&mdev->state_lock);
+ return err;
+ }
+@@ -1867,6 +2217,7 @@ static u32 mlx4_en_get_priv_flags(struct net_device *dev)
+ return priv->pflags;
+ }
+
++#ifdef HAVE_GET_SET_TUNABLE
+ static int mlx4_en_get_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna,
+ void *data)
+@@ -1908,6 +2259,7 @@ static int mlx4_en_set_tunable(struct net_device *dev,
+
+ return ret;
+ }
++#endif
+
+ static int mlx4_en_get_module_info(struct net_device *dev,
+ struct ethtool_modinfo *modinfo)
+@@ -2018,8 +2370,13 @@ static int mlx4_en_set_phys_id(struct net_device *dev,
+
+ const struct ethtool_ops mlx4_en_ethtool_ops = {
+ .get_drvinfo = mlx4_en_get_drvinfo,
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ .get_link_ksettings = mlx4_en_get_link_ksettings,
+ .set_link_ksettings = mlx4_en_set_link_ksettings,
++#else
++ .get_settings = mlx4_en_get_settings,
++ .set_settings = mlx4_en_set_settings,
++#endif
+ .get_link = ethtool_op_get_link,
+ .get_strings = mlx4_en_get_strings,
+ .get_sset_count = mlx4_en_get_sset_count,
+@@ -2047,13 +2404,13 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
+ .get_ts_info = mlx4_en_get_ts_info,
+ .set_priv_flags = mlx4_en_set_priv_flags,
+ .get_priv_flags = mlx4_en_get_priv_flags,
++#ifdef HAVE_GET_SET_TUNABLE
+ .get_tunable = mlx4_en_get_tunable,
+ .set_tunable = mlx4_en_set_tunable,
++#endif
+ .get_module_info = mlx4_en_get_module_info,
+ .get_module_eeprom = mlx4_en_get_module_eeprom
+ };
+
+
+
+-
+-
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
+@@ -255,12 +255,14 @@ static void mlx4_en_activate(struct mlx4_dev *dev, void *ctx)
+ mdev->pndev[i] = NULL;
+ }
+
++#ifdef HAVE_NETDEV_BONDING_INFO
+ /* register notifier */
+ mdev->nb.notifier_call = mlx4_en_netdev_event;
+ if (register_netdevice_notifier(&mdev->nb)) {
+ mdev->nb.notifier_call = NULL;
+ mlx4_err(mdev, "Failed to create notifier\n");
+ }
++#endif
+ }
+
+ static void *mlx4_en_add(struct mlx4_dev *dev)
+@@ -382,7 +384,9 @@ static void mlx4_en_verify_params(void)
+ static int __init mlx4_en_init(void)
+ {
+ mlx4_en_verify_params();
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ mlx4_en_init_ptys2ethtool_map();
++#endif
+
+ return mlx4_register_interface(&mlx4_en_interface);
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+@@ -31,17 +31,22 @@
+ *
+ */
+
++#ifdef HAVE_LINUX_BPF_H
+ #include <linux/bpf.h>
++#endif
+ #include <linux/etherdevice.h>
+ #include <linux/tcp.h>
+ #include <linux/if_vlan.h>
+ #include <linux/delay.h>
+ #include <linux/slab.h>
+ #include <linux/hash.h>
++#include <linux/if_bonding.h>
+ #include <net/ip.h>
+ #include <net/busy_poll.h>
+ #include <net/vxlan.h>
++#ifdef HAVE_NET_DEVLINK_H
+ #include <net/devlink.h>
++#endif
+
+ #include <linux/mlx4/driver.h>
+ #include <linux/mlx4/device.h>
+@@ -51,6 +56,7 @@
+ #include "mlx4_en.h"
+ #include "en_port.h"
+
++#ifdef HAVE_NEW_TX_RING_SCHEME
+ int mlx4_en_setup_tc(struct net_device *dev, u8 up)
+ {
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+@@ -82,6 +88,7 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up)
+ return 0;
+ }
+
++#ifdef HAVE_NDO_SETUP_TC_4_PARAMS
+ static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
+ struct tc_to_netdev *tc)
+ {
+@@ -90,6 +97,8 @@ static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
+
+ return mlx4_en_setup_tc(dev, tc->tc);
+ }
++#endif /* HAVE_NDO_SETUP_TC_4_PARAMS */
++#endif /* HAVE_NEW_TX_RING_SCHEME */
+
+ #ifdef CONFIG_RFS_ACCEL
+
+@@ -1522,6 +1531,7 @@ static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
+ free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask);
+ }
+
++#ifdef HAVE_LINUX_BPF_H
+ static void mlx4_en_init_recycle_ring(struct mlx4_en_priv *priv,
+ int tx_ring_idx)
+ {
+@@ -1539,6 +1549,7 @@ static void mlx4_en_init_recycle_ring(struct mlx4_en_priv *priv,
+ tx_ring->recycle_ring = NULL;
+ }
+ }
++#endif
+
+ int mlx4_en_start_port(struct net_device *dev)
+ {
+@@ -1662,7 +1673,9 @@ int mlx4_en_start_port(struct net_device *dev)
+ }
+ tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
+
++#ifdef HAVE_LINUX_BPF_H
+ mlx4_en_init_recycle_ring(priv, i);
++#endif
+
+ /* Arm CQ for TX completions */
+ mlx4_en_arm_cq(priv, cq);
+@@ -1728,8 +1741,13 @@ int mlx4_en_start_port(struct net_device *dev)
+ /* Schedule multicast task to populate multicast list */
+ queue_work(mdev->workqueue, &priv->rx_mode_task);
+
++#ifdef HAVE_UDP_TUNNEL_GET_RX_INFO
+ if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
+ udp_tunnel_get_rx_info(dev);
++#elif CONFIG_MLX4_EN_VXLAN
++ if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
++ vxlan_get_rx_port(dev);
++#endif
+
+ priv->port_up = true;
+ netif_tx_start_all_queues(dev);
+@@ -1985,7 +2003,11 @@ static int mlx4_en_close(struct net_device *dev)
+ return 0;
+ }
+
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
++#else
++void mlx4_en_free_resources(struct mlx4_en_priv *priv)
++#endif
+ {
+ int i;
+
+@@ -2010,7 +2032,11 @@ static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
+
+ }
+
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
++#else
++int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
++#endif
+ {
+ struct mlx4_en_port_profile *prof = priv->prof;
+ int i;
+@@ -2157,8 +2183,10 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
+
+ /* Unregister device - this will close the port if it was up */
+ if (priv->registered) {
++#ifdef HAVE_NET_DEVLINK_H
+ devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev,
+ priv->port));
++#endif
+ if (shutdown)
+ mlx4_en_shutdown(dev);
+ else
+@@ -2208,11 +2236,13 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
+ en_err(priv, "Bad MTU size:%d.\n", new_mtu);
+ return -EPERM;
+ }
++#ifdef HAVE_LINUX_BPF_H
+ if (priv->xdp_ring_num && MLX4_EN_EFF_MTU(new_mtu) > FRAG_SZ0) {
+ en_err(priv, "MTU size:%d requires frags but XDP running\n",
+ new_mtu);
+ return -EOPNOTSUPP;
+ }
++#endif
+ dev->mtu = new_mtu;
+
+ if (netif_running(dev)) {
+@@ -2417,6 +2447,7 @@ static int mlx4_en_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
+ max_tx_rate);
+ }
+
++#ifdef HAVE_NETDEV_OPS_EXT_NDO_SET_VF_SPOOFCHK
+ static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
+ {
+ struct mlx4_en_priv *en_priv = netdev_priv(dev);
+@@ -2424,7 +2455,9 @@ static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
+
+ return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting);
+ }
++#endif
+
++#ifdef HAVE_NDO_SET_VF_MAC
+ static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf)
+ {
+ struct mlx4_en_priv *en_priv = netdev_priv(dev);
+@@ -2432,7 +2465,9 @@ static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_
+
+ return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf);
+ }
++#endif
+
++#ifdef HAVE_NETDEV_OPS_EXT_NDO_SET_VF_LINK_STATE
+ static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state)
+ {
+ struct mlx4_en_priv *en_priv = netdev_priv(dev);
+@@ -2440,6 +2475,7 @@ static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_st
+
+ return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
+ }
++#endif
+
+ static int mlx4_en_get_vf_stats(struct net_device *dev, int vf,
+ struct ifla_vf_stats *vf_stats)
+@@ -2450,9 +2486,14 @@ static int mlx4_en_get_vf_stats(struct net_device *dev, int vf,
+ return mlx4_get_vf_stats(mdev->dev, en_priv->port, vf, vf_stats);
+ }
+
++#if defined(HAVE_NETDEV_NDO_GET_PHYS_PORT_ID) || defined(HAVE_NETDEV_EXT_NDO_GET_PHYS_PORT_ID)
+ #define PORT_ID_BYTE_LEN 8
+ static int mlx4_en_get_phys_port_id(struct net_device *dev,
++#ifdef HAVE_NETDEV_PHYS_ITEM_ID
+ struct netdev_phys_item_id *ppid)
++#else
++ struct netdev_phys_port_id *ppid)
++#endif
+ {
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_dev *mdev = priv->mdev->dev;
+@@ -2469,6 +2510,7 @@ static int mlx4_en_get_phys_port_id(struct net_device *dev,
+ }
+ return 0;
+ }
++#endif
+
+ static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
+ {
+@@ -2489,12 +2531,20 @@ out:
+ }
+
+ /* set offloads */
++#ifdef HAVE_NET_DEVICE_GSO_PARTIAL_FEATURES
+ priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM |
+ NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL;
++#else
++ priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
++ NETIF_F_RXCSUM |
++ NETIF_F_TSO | NETIF_F_TSO6 |
++ NETIF_F_GSO_UDP_TUNNEL |
++ NETIF_F_GSO_UDP_TUNNEL_CSUM;
++#endif
+ }
+
+ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
+@@ -2503,12 +2553,20 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
+ struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
+ vxlan_del_task);
+ /* unset offloads */
++#ifdef HAVE_NET_DEVICE_GSO_PARTIAL_FEATURES
+ priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM |
+ NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL);
++#else
++ priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
++ NETIF_F_RXCSUM |
++ NETIF_F_TSO | NETIF_F_TSO6 |
++ NETIF_F_GSO_UDP_TUNNEL |
++ NETIF_F_GSO_UDP_TUNNEL_CSUM);
++#endif
+
+ ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
+ VXLAN_STEER_BY_OUTER_MAC, 0);
+@@ -2518,6 +2576,7 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
+ priv->vxlan_port = 0;
+ }
+
++#ifdef HAVE_NDO_UDP_TUNNEL_ADD
+ static void mlx4_en_add_vxlan_port(struct net_device *dev,
+ struct udp_tunnel_info *ti)
+ {
+@@ -2569,6 +2628,51 @@ static void mlx4_en_del_vxlan_port(struct net_device *dev,
+
+ queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
+ }
++#elif HAVE_VXLAN_DYNAMIC_PORT
++static void mlx4_en_add_vxlan_port(struct net_device *dev,
++ sa_family_t sa_family, __be16 port)
++{
++ struct mlx4_en_priv *priv = netdev_priv(dev);
++ __be16 current_port;
++
++ if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
++ return;
++
++ if (sa_family == AF_INET6)
++ return;
++
++ current_port = priv->vxlan_port;
++ if (current_port && current_port != port) {
++ en_warn(priv, "vxlan port %d configured, can't add port %d\n",
++ ntohs(current_port), ntohs(port));
++ return;
++ }
++
++ priv->vxlan_port = port;
++ queue_work(priv->mdev->workqueue, &priv->vxlan_add_task);
++}
++
++static void mlx4_en_del_vxlan_port(struct net_device *dev,
++ sa_family_t sa_family, __be16 port)
++{
++ struct mlx4_en_priv *priv = netdev_priv(dev);
++ __be16 current_port;
++
++ if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
++ return;
++
++ if (sa_family == AF_INET6)
++ return;
++
++ current_port = priv->vxlan_port;
++ if (current_port != port) {
++ en_dbg(DRV, priv, "vxlan port %d isn't configured, ignoring\n", ntohs(port));
++ return;
++ }
++
++ queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
++}
++#endif
+
+ static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+@@ -2594,6 +2698,7 @@ static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
+ return features;
+ }
+
++#ifdef HAVE_NDO_SET_TX_MAXRATE
+ static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate)
+ {
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+@@ -2620,7 +2725,9 @@ static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 m
+ ¶ms);
+ return err;
+ }
++#endif
+
++#ifdef HAVE_LINUX_BPF_H
+ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
+ {
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+@@ -2717,6 +2824,7 @@ static int mlx4_xdp(struct net_device *dev, struct netdev_xdp *xdp)
+ return -EINVAL;
+ }
+ }
++#endif
+
+ static const struct net_device_ops mlx4_netdev_ops = {
+ .ndo_open = mlx4_en_open,
+@@ -2737,16 +2845,33 @@ static const struct net_device_ops mlx4_netdev_ops = {
+ #endif
+ .ndo_set_features = mlx4_en_set_features,
+ .ndo_fix_features = mlx4_en_fix_features,
++#ifdef HAVE_NDO_SETUP_TC
++#ifdef HAVE_NDO_SETUP_TC_4_PARAMS
+ .ndo_setup_tc = __mlx4_en_setup_tc,
++#else /* HAVE_NDO_SETUP_TC_4_PARAMS */
++ .ndo_setup_tc = mlx4_en_setup_tc,
++#endif /* HAVE_NDO_SETUP_TC_4_PARAMS */
++#endif /* HAVE_NDO_SETUP_TC */
+ #ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = mlx4_en_filter_rfs,
+ #endif
++#ifdef HAVE_NETDEV_NDO_GET_PHYS_PORT_ID
+ .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
++#endif
++#ifdef HAVE_NDO_UDP_TUNNEL_ADD
+ .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port,
+ .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
++#elif HAVE_VXLAN_DYNAMIC_PORT
++ .ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
++ .ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
++#endif
+ .ndo_features_check = mlx4_en_features_check,
++#ifdef HAVE_NDO_SET_TX_MAXRATE
+ .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
++#endif
++#ifdef HAVE_LINUX_BPF_H
+ .ndo_xdp = mlx4_xdp,
++#endif
+ };
+
+ static const struct net_device_ops mlx4_netdev_ops_master = {
+@@ -2765,25 +2890,46 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
+ .ndo_set_vf_mac = mlx4_en_set_vf_mac,
+ .ndo_set_vf_vlan = mlx4_en_set_vf_vlan,
+ .ndo_set_vf_rate = mlx4_en_set_vf_rate,
++#ifdef HAVE_NETDEV_OPS_EXT_NDO_SET_VF_SPOOFCHK
+ .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk,
++#endif
++#ifdef HAVE_NETDEV_OPS_EXT_NDO_SET_VF_LINK_STATE
+ .ndo_set_vf_link_state = mlx4_en_set_vf_link_state,
++#endif
+ .ndo_get_vf_stats = mlx4_en_get_vf_stats,
++#ifdef HAVE_NDO_SET_VF_MAC
+ .ndo_get_vf_config = mlx4_en_get_vf_config,
++#endif
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = mlx4_en_netpoll,
+ #endif
+ .ndo_set_features = mlx4_en_set_features,
+ .ndo_fix_features = mlx4_en_fix_features,
++#ifdef HAVE_NDO_SETUP_TC_4_PARAMS
+ .ndo_setup_tc = __mlx4_en_setup_tc,
++#else /* HAVE_NDO_SETUP_TC_4_PARAMS */
++ .ndo_setup_tc = mlx4_en_setup_tc,
++#endif /* HAVE_NDO_SETUP_TC_4_PARAMS */
+ #ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = mlx4_en_filter_rfs,
+ #endif
++#ifdef HAVE_NETDEV_EXT_NDO_GET_PHYS_PORT_ID
+ .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
++#endif
++#ifdef HAVE_NDO_UDP_TUNNEL_ADD
+ .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port,
+ .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
++#elif HAVE_VXLAN_DYNAMIC_PORT
++ .ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
++ .ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
++#endif
+ .ndo_features_check = mlx4_en_features_check,
++#ifdef HAVE_NDO_SET_TX_MAXRATE
+ .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
++#endif
++#ifdef HAVE_LINUX_BPF_H
+ .ndo_xdp = mlx4_xdp,
++#endif
+ };
+
+ struct mlx4_en_bond {
+@@ -2793,6 +2939,7 @@ struct mlx4_en_bond {
+ struct mlx4_port_map port_map;
+ };
+
++#ifdef HAVE_NETDEV_BONDING_INFO
+ static void mlx4_en_bond_work(struct work_struct *work)
+ {
+ struct mlx4_en_bond *bond = container_of(work,
+@@ -2959,6 +3106,7 @@ int mlx4_en_netdev_event(struct notifier_block *this,
+
+ return NOTIFY_DONE;
+ }
++#endif
+
+ void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev,
+ struct mlx4_en_stats_bitmap *stats_bitmap,
+@@ -3052,12 +3200,20 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ struct tc_configuration *tc;
+ #endif
+
++#ifdef HAVE_NEW_TX_RING_SCHEME
+ dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
+ MAX_TX_RINGS, MAX_RX_RINGS);
++#else
++ dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), MAX_TX_RINGS);
++#endif
+ if (dev == NULL)
+ return -ENOMEM;
+
++#ifdef HAVE_NEW_TX_RING_SCHEME
+ netif_set_real_num_tx_queues(dev, prof->tx_ring_num);
++#else
++ dev->real_num_tx_queues = prof->tx_ring_num;
++#endif
+ netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
+
+ SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev);
+@@ -3194,7 +3350,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ else
+ dev->netdev_ops = &mlx4_netdev_ops;
+ dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
++#ifdef HAVE_NEW_TX_RING_SCHEME
+ netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
++#else
++ dev->real_num_tx_queues = priv->tx_ring_num;
++#endif
+ netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
+
+ dev->ethtool_ops = &mlx4_en_ethtool_ops;
+@@ -3262,6 +3422,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ }
+
+ if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
++#ifdef HAVE_NET_DEVICE_GSO_PARTIAL_FEATURES
+ dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL;
+@@ -3269,6 +3430,12 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL;
+ dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
++#else
++ dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
++ NETIF_F_GSO_UDP_TUNNEL_CSUM;
++ dev->features |= NETIF_F_GSO_UDP_TUNNEL |
++ NETIF_F_GSO_UDP_TUNNEL_CSUM;
++#endif
+ }
+
+ mdev->pndev[port] = dev;
+@@ -3332,8 +3499,10 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ }
+
+ priv->registered = 1;
++#ifdef HAVE_NET_DEVLINK_H
+ devlink_port_type_eth_set(mlx4_get_devlink_port(mdev->dev, priv->port),
+ dev);
++#endif
+
+ return 0;
+
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+@@ -32,7 +32,9 @@
+ */
+
+ #include <net/busy_poll.h>
++#ifdef HAVE_LINUX_BPF_H
+ #include <linux/bpf.h>
++#endif
+ #include <linux/mlx4/cq.h>
+ #include <linux/slab.h>
+ #include <linux/mlx4/qp.h>
+@@ -83,7 +85,12 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
+ /* Not doing get_page() for each frag is a big win
+ * on asymetric workloads. Note we can not use atomic_set().
+ */
++#ifdef HAVE_LINUX_PAGE_REF_H
+ page_ref_add(page, page_alloc->page_size / frag_info->frag_stride - 1);
++#else
++ atomic_add(page_alloc->page_size / frag_info->frag_stride - 1,
++ &page->_count);
++#endif
+ return 0;
+ }
+
+@@ -128,9 +135,13 @@ out:
+ page_alloc[i].page_size,
+ priv->frag_info[i].dma_dir);
+ page = page_alloc[i].page;
++#ifdef HAVE_LINUX_PAGE_REF_H
+ /* Revert changes done by mlx4_alloc_pages */
+ page_ref_sub(page, page_alloc[i].page_size /
+ priv->frag_info[i].frag_stride - 1);
++#else
++ atomic_set(&page->_count, 1);
++#endif
+ put_page(page);
+ }
+ }
+@@ -168,7 +179,11 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
+
+ en_dbg(DRV, priv, " frag %d allocator: - size:%d frags:%d\n",
+ i, ring->page_alloc[i].page_size,
++#ifdef HAVE_LINUX_PAGE_REF_H
+ page_ref_count(ring->page_alloc[i].page));
++#elif HAVE_MM_PAGE__COUNT
++ atomic_read(&ring->page_alloc[i].page->_count));
++#endif
+ }
+ return 0;
+
+@@ -181,9 +196,13 @@ out:
+ page_alloc->page_size,
+ priv->frag_info[i].dma_dir);
+ page = page_alloc->page;
++#ifdef HAVE_LINUX_PAGE_REF_H
+ /* Revert changes done by mlx4_alloc_pages */
+ page_ref_sub(page, page_alloc->page_size /
+ priv->frag_info[i].frag_stride - 1);
++#else
++ atomic_set(&page->_count, 1);
++#endif
+ put_page(page);
+ page_alloc->page = NULL;
+ }
+@@ -511,6 +530,7 @@ void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
+ }
+ }
+
++#ifdef HAVE_LINUX_BPF_H
+ /* When the rx ring is running in page-per-packet mode, a released frame can go
+ * directly into a small cache, to avoid unmapping or touching the page
+ * allocator. In bpf prog performance scenarios, buffers are either forwarded
+@@ -528,6 +548,7 @@ bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
+ cache->buf[cache->index++] = *frame;
+ return true;
+ }
++#endif
+
+ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring **pring,
+@@ -535,11 +556,13 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
+ {
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_rx_ring *ring = *pring;
++#ifdef HAVE_LINUX_BPF_H
+ struct bpf_prog *old_prog;
+
+ old_prog = READ_ONCE(ring->xdp_prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
++#endif
+ mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
+ vfree(ring->rx_info);
+ ring->rx_info = NULL;
+@@ -781,10 +804,14 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
+ struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
+ struct mlx4_en_rx_alloc *frags;
+ struct mlx4_en_rx_desc *rx_desc;
++#ifdef HAVE_LINUX_BPF_H
+ struct bpf_prog *xdp_prog;
+ int doorbell_pending;
+ struct sk_buff *skb;
+ int tx_index;
++#else
++ struct sk_buff *skb;
++#endif
+ int index;
+ int nr;
+ unsigned int length;
+@@ -800,9 +827,11 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
+ if (budget <= 0)
+ return polled;
+
++#ifdef HAVE_LINUX_BPF_H
+ xdp_prog = READ_ONCE(ring->xdp_prog);
+ doorbell_pending = 0;
+ tx_index = (priv->tx_ring_num - priv->xdp_ring_num) + cq->ring;
++#endif
+
+ /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
+ * descriptor offset can be deduced from the CQE index instead of
+@@ -880,6 +909,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
+ l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
+ (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
+
++#ifdef HAVE_LINUX_BPF_H
+ /* A bpf program gets first chance to drop the packet. It may
+ * read bytes but not past the end of the frag.
+ */
+@@ -916,6 +946,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
+ goto next;
+ }
+ }
++#endif
+
+ if (likely(dev->features & NETIF_F_RXCSUM)) {
+ if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
+@@ -1068,7 +1099,9 @@ next:
+ for (nr = 0; nr < priv->num_frags; nr++)
+ mlx4_en_free_frag(priv, frags, nr);
+
++#ifdef HAVE_LINUX_BPF_H
+ consumed:
++#endif
+ ++cq->mcq.cons_index;
+ index = (cq->mcq.cons_index) & ring->size_mask;
+ cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor;
+@@ -1077,8 +1110,10 @@ consumed:
+ }
+
+ out:
++#ifdef HAVE_LINUX_BPF_H
+ if (doorbell_pending)
+ mlx4_en_xmit_doorbell(priv->tx_ring[tx_index]);
++#endif
+
+ AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
+ mlx4_cq_set_ci(&cq->mcq);
+@@ -1114,14 +1149,20 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
+ /* If we used up all the quota - we're probably not done yet... */
+ if (done == budget) {
+ const struct cpumask *aff;
++#ifdef HAVE_IRQ_DATA_GET_AFFINITY_MASK
+ struct irq_data *idata;
++#endif
+ int cpu_curr;
+
+ INC_PERF_COUNTER(priv->pstats.napi_quota);
+
+ cpu_curr = smp_processor_id();
++#ifdef HAVE_IRQ_DATA_GET_AFFINITY_MASK
+ idata = irq_desc_get_irq_data(cq->irq_desc);
+ aff = irq_data_get_affinity_mask(idata);
++#else
++ aff = irq_desc_get_irq_data(cq->irq_desc)->affinity;
++#endif
+
+ if (likely(cpumask_test_cpu(cpu_curr, aff)))
+ return budget;
+@@ -1155,6 +1196,7 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
+ int buf_size = 0;
+ int i = 0;
+
++#ifdef HAVE_LINUX_BPF_H
+ /* bpf requires buffers to be set up as 1 packet per page.
+ * This only works when num_frags == 1.
+ */
+@@ -1166,6 +1208,7 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
+ align = PAGE_SIZE;
+ order = 0;
+ }
++#endif
+
+ while (buf_size < eff_mtu) {
+ priv->frag_info[i].order = order;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+@@ -196,7 +196,9 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
+ ring->last_nr_txbb = 1;
+ memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info));
+ memset(ring->buf, 0, ring->buf_size);
++#ifdef HAVE_LINUX_BPF_H
+ ring->free_tx_desc = mlx4_en_free_tx_desc;
++#endif
+
+ ring->qp_state = MLX4_QP_STATE_RST;
+ ring->doorbell_qpn = cpu_to_be32(ring->qp.qpn << 8);
+@@ -340,11 +342,18 @@ u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
+ }
+ }
+ }
++#ifdef HAVE_NAPI_CONSUME_SKB
+ napi_consume_skb(skb, napi_mode);
++#elif HAVE_DEV_CONSUME_SKB_ANY
++ dev_consume_skb_any(skb);
++#else
++ dev_kfree_skb_any(skb);
++#endif
+
+ return tx_info->nr_txbb;
+ }
+
++#ifdef HAVE_LINUX_BPF_H
+ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring,
+ int index, u8 owner, u64 timestamp,
+@@ -366,6 +375,7 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
+
+ return tx_info->nr_txbb;
+ }
++#endif
+
+ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
+ {
+@@ -384,7 +394,11 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
+ }
+
+ while (ring->cons != ring->prod) {
++#ifdef HAVE_LINUX_BPF_H
+ ring->last_nr_txbb = ring->free_tx_desc(priv, ring,
++#else
++ ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring,
++#endif
+ ring->cons & ring->size_mask,
+ !!(ring->cons & ring->size), 0,
+ 0 /* Non-NAPI caller */);
+@@ -426,7 +440,13 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
+ if (!priv->port_up)
+ return true;
+
++#ifdef HAVE_NETDEV_TXQ_BQL_PREFETCHW
+ netdev_txq_bql_complete_prefetchw(ring->tx_queue);
++#else
++#ifdef CONFIG_BQL
++ prefetchw(&ring->tx_queue->dql.limit);
++#endif
++#endif
+
+ index = cons_index & size_mask;
+ cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor;
+@@ -466,7 +486,11 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
+ timestamp = mlx4_en_get_cqe_ts(cqe);
+
+ /* free next descriptor */
++#ifdef HAVE_LINUX_BPF_H
+ last_nr_txbb = ring->free_tx_desc(
++#else
++ last_nr_txbb = mlx4_en_free_tx_desc(
++#endif
+ priv, ring, ring_index,
+ !!((ring_cons + txbbs_skipped) &
+ ring->size), timestamp, napi_budget);
+@@ -498,8 +522,10 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
+ ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb;
+ ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped;
+
++#ifdef HAVE_LINUX_BPF_H
+ if (ring->free_tx_desc == mlx4_en_recycle_tx_desc)
+ return done < budget;
++#endif
+
+ netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
+
+@@ -702,8 +728,16 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc,
+ }
+ }
+
++#if defined(NDO_SELECT_QUEUE_HAS_ACCEL_PRIV) || defined(HAVE_SELECT_QUEUE_FALLBACK_T)
+ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
++#ifdef HAVE_SELECT_QUEUE_FALLBACK_T
+ void *accel_priv, select_queue_fallback_t fallback)
++#else
++ void *accel_priv)
++#endif
++#else /* NDO_SELECT_QUEUE_HAS_ACCEL_PRIV || HAVE_SELECT_QUEUE_FALLBACK_T */
++u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
++#endif
+ {
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ u16 rings_p_up = priv->num_tx_rings_p_up;
+@@ -715,7 +749,11 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
+ if (skb_vlan_tag_present(skb))
+ up = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT;
+
++#ifdef HAVE_SELECT_QUEUE_FALLBACK_T
+ return fallback(dev, skb) % rings_p_up + up * rings_p_up;
++#else
++ return __netdev_pick_tx(dev, skb) % rings_p_up + up * rings_p_up;
++#endif
+ }
+
+ static void mlx4_bf_copy(void __iomem *dst, const void *src,
+@@ -842,7 +880,13 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ bf_ok = false;
+ }
+
++#ifdef HAVE_NETDEV_TXQ_BQL_PREFETCHW
+ netdev_txq_bql_enqueue_prefetchw(ring->tx_queue);
++#else
++#ifdef CONFIG_BQL
++ prefetchw(&ring->tx_queue->dql);
++#endif
++#endif
+
+ /* Track current inflight packets for performance analysis */
+ AVG_PERF_COUNTER(priv->pstats.inflight_avg,
+@@ -1077,6 +1121,7 @@ tx_drop:
+ return NETDEV_TX_OK;
+ }
+
++#ifdef HAVE_LINUX_BPF_H
+ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame,
+ struct net_device *dev, unsigned int length,
+ int tx_ind, int *doorbell_pending)
+@@ -1180,3 +1225,4 @@ tx_drop:
+ ring->tx_dropped++;
+ return NETDEV_TX_BUSY;
+ }
++#endif
+diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
++++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
+@@ -34,7 +34,9 @@
+ #include <linux/slab.h>
+ #include <linux/export.h>
+ #include <linux/errno.h>
++#ifdef HAVE_NET_DEVLINK_H
+ #include <net/devlink.h>
++#endif
+
+ #include "mlx4.h"
+
+@@ -254,6 +256,7 @@ void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int
+ }
+ EXPORT_SYMBOL_GPL(mlx4_get_protocol_dev);
+
++#ifdef HAVE_NET_DEVLINK_H
+ struct devlink_port *mlx4_get_devlink_port(struct mlx4_dev *dev, int port)
+ {
+ struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+@@ -261,3 +264,4 @@ struct devlink_port *mlx4_get_devlink_port(struct mlx4_dev *dev, int port)
+ return &info->devlink_port;
+ }
+ EXPORT_SYMBOL_GPL(mlx4_get_devlink_port);
++#endif
+diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx4/main.c
++++ b/drivers/net/ethernet/mellanox/mlx4/main.c
+@@ -42,7 +42,9 @@
+ #include <linux/io-mapping.h>
+ #include <linux/delay.h>
+ #include <linux/kmod.h>
++#ifdef HAVE_NET_DEVLINK_H
+ #include <net/devlink.h>
++#endif
+
+ #include <linux/mlx4/device.h>
+ #include <linux/mlx4/doorbell.h>
+@@ -2918,13 +2920,17 @@ no_msi:
+
+ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
+ {
++#ifdef HAVE_NET_DEVLINK_H
+ struct devlink *devlink = priv_to_devlink(mlx4_priv(dev));
++#endif
+ struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+ int err;
+
++#ifdef HAVE_NET_DEVLINK_H
+ err = devlink_port_register(devlink, &info->devlink_port, port);
+ if (err)
+ return err;
++#endif
+
+ info->dev = dev;
+ info->port = port;
+@@ -2949,7 +2955,9 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
+ err = device_create_file(&dev->persist->pdev->dev, &info->port_attr);
+ if (err) {
+ mlx4_err(dev, "Failed to create file for port %d\n", port);
++#ifdef HAVE_NET_DEVLINK_H
+ devlink_port_unregister(&info->devlink_port);
++#endif
+ info->port = -1;
+ }
+
+@@ -3750,6 +3758,7 @@ err_disable_pdev:
+ return err;
+ }
+
++#ifdef HAVE_NET_DEVLINK_H
+ static int mlx4_devlink_port_type_set(struct devlink_port *devlink_port,
+ enum devlink_port_type port_type)
+ {
+@@ -3778,26 +3787,40 @@ static int mlx4_devlink_port_type_set(struct devlink_port *devlink_port,
+ static const struct devlink_ops mlx4_devlink_ops = {
+ .port_type_set = mlx4_devlink_port_type_set,
+ };
++#endif
+
+ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ {
++#ifdef HAVE_NET_DEVLINK_H
+ struct devlink *devlink;
++#endif
+ struct mlx4_priv *priv;
+ struct mlx4_dev *dev;
+ int ret;
+
+ printk_once(KERN_INFO "%s", mlx4_version);
+
++#ifdef HAVE_NET_DEVLINK_H
+ devlink = devlink_alloc(&mlx4_devlink_ops, sizeof(*priv));
+ if (!devlink)
+ return -ENOMEM;
+ priv = devlink_priv(devlink);
++#else
++ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
++ if (!priv)
++ return -ENOMEM;
++#endif
+
+ dev = &priv->dev;
+ dev->persist = kzalloc(sizeof(*dev->persist), GFP_KERNEL);
+ if (!dev->persist) {
+ ret = -ENOMEM;
++#ifdef HAVE_NET_DEVLINK_H
+ goto err_devlink_free;
++#else
++ kfree(priv);
++ return ret;
++#endif
+ }
+ dev->persist->pdev = pdev;
+ dev->persist->dev = dev;
+@@ -3807,23 +3830,36 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ mutex_init(&dev->persist->interface_state_mutex);
+ mutex_init(&dev->persist->pci_status_mutex);
+
++#ifdef HAVE_NET_DEVLINK_H
+ ret = devlink_register(devlink, &pdev->dev);
+ if (ret)
+ goto err_persist_free;
++#endif
+
+ ret = __mlx4_init_one(pdev, id->driver_data, priv);
+ if (ret)
++#ifdef HAVE_NET_DEVLINK_H
+ goto err_devlink_unregister;
++#else
++ goto err_persist_free;
++#endif
+
+ pci_save_state(pdev);
+ return 0;
+
++#ifdef HAVE_NET_DEVLINK_H
+ err_devlink_unregister:
+ devlink_unregister(devlink);
++#endif
+ err_persist_free:
+ kfree(dev->persist);
++#ifndef HAVE_NET_DEVLINK_H
++ kfree(priv);
++#endif
++#ifdef HAVE_NET_DEVLINK_H
+ err_devlink_free:
+ devlink_free(devlink);
++#endif
+ return ret;
+ }
+
+@@ -3924,7 +3960,9 @@ static void mlx4_remove_one(struct pci_dev *pdev)
+ struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
+ struct mlx4_dev *dev = persist->dev;
+ struct mlx4_priv *priv = mlx4_priv(dev);
++#ifdef HAVE_NET_DEVLINK_H
+ struct devlink *devlink = priv_to_devlink(priv);
++#endif
+ int active_vfs = 0;
+
+ mutex_lock(&persist->interface_state_mutex);
+@@ -3955,9 +3993,14 @@ static void mlx4_remove_one(struct pci_dev *pdev)
+
+ pci_release_regions(pdev);
+ mlx4_pci_disable_device(dev);
++#ifdef HAVE_NET_DEVLINK_H
+ devlink_unregister(devlink);
+ kfree(dev->persist);
+ devlink_free(devlink);
++#else
++ kfree(dev->persist);
++ kfree(priv);
++#endif
+ pci_set_drvdata(pdev, NULL);
+ }
+
+diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+@@ -45,7 +45,9 @@
+ #include <linux/workqueue.h>
+ #include <linux/interrupt.h>
+ #include <linux/spinlock.h>
++#ifdef HAVE_NET_DEVLINK_H
+ #include <net/devlink.h>
++#endif
+
+ #include <linux/mlx4/device.h>
+ #include <linux/mlx4/driver.h>
+@@ -831,7 +833,9 @@ struct mlx4_port_info {
+ struct mlx4_roce_gid_table gid_table;
+ int base_qpn;
+ struct cpu_rmap *rmap;
++#ifdef HAVE_NET_DEVLINK_H
+ struct devlink_port devlink_port;
++#endif
+ };
+
+ struct mlx4_sense {
+diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+@@ -132,7 +132,9 @@ enum {
+ MLX4_EN_NUM_UP)
+
+ #define MLX4_EN_DEFAULT_TX_WORK 256
++#ifdef HAVE_LINUX_BPF_H
+ #define MLX4_EN_DOORBELL_BUDGET 8
++#endif
+
+ /* Target number of packets to coalesce with interrupt moderation */
+ #define MLX4_EN_RX_COAL_TARGET 44
+@@ -304,11 +306,13 @@ struct mlx4_en_tx_ring {
+ __be32 mr_key;
+ void *buf;
+ struct mlx4_en_tx_info *tx_info;
++#ifdef HAVE_LINUX_BPF_H
+ struct mlx4_en_rx_ring *recycle_ring;
+ u32 (*free_tx_desc)(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring,
+ int index, u8 owner,
+ u64 timestamp, int napi_mode);
++#endif
+ u8 *bounce_buf;
+ struct mlx4_qp_context context;
+ int qpn;
+@@ -340,7 +344,9 @@ struct mlx4_en_rx_ring {
+ u8 fcs_del;
+ void *buf;
+ void *rx_info;
++#ifdef HAVE_LINUX_BPF_H
+ struct bpf_prog *xdp_prog;
++#endif
+ struct mlx4_en_page_cache page_cache;
+ unsigned long bytes;
+ unsigned long packets;
+@@ -585,7 +591,9 @@ struct mlx4_en_priv {
+ struct mlx4_en_frag_info frag_info[MLX4_EN_MAX_RX_FRAGS];
+ u16 num_frags;
+ u16 log_rx_info;
++#ifdef HAVE_LINUX_BPF_H
+ int xdp_ring_num;
++#endif
+
+ struct mlx4_en_tx_ring **tx_ring;
+ struct mlx4_en_rx_ring *rx_ring[MAX_RX_RINGS];
+@@ -659,7 +667,9 @@ static inline struct mlx4_cqe *mlx4_en_get_cqe(void *buf, int idx, int cqe_sz)
+
+ #define MLX4_EN_WOL_DO_MODIFY (1ULL << 63)
+
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ void mlx4_en_init_ptys2ethtool_map(void);
++#endif
+ void mlx4_en_update_loopback_state(struct net_device *dev,
+ netdev_features_t features);
+
+@@ -675,11 +685,16 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
+ u8 rx_ppp, u8 rx_pause,
+ u8 tx_ppp, u8 tx_pause);
+
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
+ struct mlx4_en_priv *tmp,
+ struct mlx4_en_port_profile *prof);
+ void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
+ struct mlx4_en_priv *tmp);
++#else
++void mlx4_en_free_resources(struct mlx4_en_priv *priv);
++int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
++#endif
+
+ int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq,
+ int entries, int ring, enum cq_type mode, int node);
+@@ -691,15 +706,25 @@ int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
+ int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
+
+ void mlx4_en_tx_irq(struct mlx4_cq *mcq);
++#if defined(NDO_SELECT_QUEUE_HAS_ACCEL_PRIV) || defined(HAVE_SELECT_QUEUE_FALLBACK_T)
+ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
++#ifdef HAVE_SELECT_QUEUE_FALLBACK_T
+ void *accel_priv, select_queue_fallback_t fallback);
++#else
++ void *accel_priv);
++#endif
++#else /* NDO_SELECT_QUEUE_HAS_ACCEL_PRIV */
++u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
++#endif
+ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
++#ifdef HAVE_LINUX_BPF_H
+ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame,
+ struct net_device *dev, unsigned int length,
+ int tx_ind, int *doorbell_pending);
+ void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring);
+ bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
+ struct mlx4_en_rx_alloc *frame);
++#endif
+
+ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring **pring,
+@@ -728,6 +753,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
+ int budget);
+ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
+ int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget);
++#ifdef HAVE_LINUX_BPF_H
+ u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring,
+ int index, u8 owner, u64 timestamp,
+@@ -736,6 +762,7 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring,
+ int index, u8 owner, u64 timestamp,
+ int napi_mode);
++#endif
+ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
+ int is_tx, int rss, int qpn, int cqn, int user_prio,
+ struct mlx4_qp_context *context);
+@@ -781,8 +808,11 @@ void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev,
+ struct mlx4_en_stats_bitmap *stats_bitmap,
+ u8 rx_ppp, u8 rx_pause,
+ u8 tx_ppp, u8 tx_pause);
++
++#ifdef HAVE_NETDEV_BONDING_INFO
+ int mlx4_en_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr);
++#endif
+
+ /*
+ * Functions for time stamping
+diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
++++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
+@@ -205,9 +205,13 @@ int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node)
+ goto free_uar;
+ }
+
++#ifdef HAVE_IO_MAPPING_MAP_WC_3_PARAMS
+ uar->bf_map = io_mapping_map_wc(priv->bf_mapping,
+ uar->index << PAGE_SHIFT,
+ PAGE_SIZE);
++#else
++ uar->bf_map = io_mapping_map_wc(priv->bf_mapping, uar->index << PAGE_SHIFT);
++#endif
+ if (!uar->bf_map) {
+ err = -ENOMEM;
+ goto unamp_uar;