]> git.openfabrics.org - compat-rdma/compat-rdma.git/commitdiff
mlx4: Added RHEL7.5 support
authorVladimir Sokolovsky <vlad@mellanox.com>
Tue, 17 Jul 2018 18:58:26 +0000 (13:58 -0500)
committerVladimir Sokolovsky <vlad@mellanox.com>
Fri, 20 Jul 2018 20:56:23 +0000 (15:56 -0500)
Signed-off-by: Vladimir Sokolovsky <vlad@mellanox.com>
patches/0005-BACKPORT-mlx4.patch [new file with mode: 0644]

diff --git a/patches/0005-BACKPORT-mlx4.patch b/patches/0005-BACKPORT-mlx4.patch
new file mode 100644 (file)
index 0000000..cc76c2d
--- /dev/null
@@ -0,0 +1,2029 @@
+From: Vladimir Sokolovsky <vlad@mellanox.com>
+Subject: [PATCH] BACKPORT: mlx4
+
+Signed-off-by: Vladimir Sokolovsky <vlad@mellanox.com>
+---
+ drivers/net/ethernet/mellanox/mlx4/catas.c         |  17 +
+ drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c     |  29 ++
+ drivers/net/ethernet/mellanox/mlx4/en_ethtool.c    |   8 +
+ drivers/net/ethernet/mellanox/mlx4/en_netdev.c     | 378 ++++++++++++++++++++-
+ drivers/net/ethernet/mellanox/mlx4/en_rx.c         |  52 ++-
+ drivers/net/ethernet/mellanox/mlx4/main.c          |   2 +
+ drivers/net/ethernet/mellanox/mlx4/mlx4_en.h       |  12 +
+ drivers/net/ethernet/mellanox/mlx5/core/en.h       |   2 +-
+ .../net/ethernet/mellanox/mlx5/core/en_ethtool.c   |   8 -
+ drivers/net/ethernet/mellanox/mlx5/core/en_main.c  |  24 +-
+ drivers/net/ethernet/mellanox/mlx5/core/en_rep.c   | 102 ++++++
+ drivers/net/ethernet/mellanox/mlx5/core/en_rx.c    |  39 +++
+ drivers/net/ethernet/mellanox/mlx5/core/en_tc.c    |  16 +
+ drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c  |   4 +
+ drivers/net/ethernet/mellanox/mlx5/core/eswitch.c  |   2 +
+ 15 files changed, 676 insertions(+), 19 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
++++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
+@@ -231,10 +231,19 @@ static void dump_err_buf(struct mlx4_dev *dev)
+                        i, swab32(readl(priv->catas_err.map + i)));
+ }
++#ifdef HAVE_TIMER_SETUP
+ static void poll_catas(struct timer_list *t)
++#else
++static void poll_catas(unsigned long dev_ptr)
++#endif
+ {
++#ifdef HAVE_TIMER_SETUP
+       struct mlx4_priv *priv = from_timer(priv, t, catas_err.timer);
+       struct mlx4_dev *dev = &priv->dev;
++#else
++      struct mlx4_dev *dev = (struct mlx4_dev *) dev_ptr;
++      struct mlx4_priv *priv = mlx4_priv(dev);
++#endif
+       u32 slave_read;
+       if (mlx4_is_slave(dev)) {
+@@ -277,7 +286,11 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev)
+       phys_addr_t addr;
+       INIT_LIST_HEAD(&priv->catas_err.list);
++#ifdef HAVE_TIMER_SETUP
+       timer_setup(&priv->catas_err.timer, poll_catas, 0);
++#else
++      init_timer(&priv->catas_err.timer);
++#endif
+       priv->catas_err.map = NULL;
+       if (!mlx4_is_slave(dev)) {
+@@ -293,6 +306,10 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev)
+               }
+       }
++#ifndef HAVE_TIMER_SETUP
++      priv->catas_err.timer.data     = (unsigned long) dev;
++      priv->catas_err.timer.function = poll_catas;
++#endif
+       priv->catas_err.timer.expires  =
+               round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL);
+       add_timer(&priv->catas_err.timer);
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+@@ -138,7 +138,11 @@ static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
+       priv->cee_config.pfc_state = true;
+ }
++#ifdef NDO_GETNUMTCS_RETURNS_INT
+ static int mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
++#else
++static u8 mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
++#endif
+ {
+       struct mlx4_en_priv *priv = netdev_priv(netdev);
+@@ -254,7 +258,11 @@ static u8 mlx4_en_dcbnl_set_state(struct net_device *dev, u8 state)
+  * otherwise returns 0 as the invalid user priority bitmap to
+  * indicate an error.
+  */
++#ifdef NDO_GETAPP_RETURNS_INT
+ static int mlx4_en_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
++#else
++static u8 mlx4_en_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
++#endif
+ {
+       struct mlx4_en_priv *priv = netdev_priv(netdev);
+       struct dcb_app app = {
+@@ -267,8 +275,13 @@ static int mlx4_en_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
+       return dcb_getapp(netdev, &app);
+ }
++#ifdef NDO_SETAPP_RETURNS_INT
+ static int mlx4_en_dcbnl_setapp(struct net_device *netdev, u8 idtype,
+                               u16 id, u8 up)
++#else
++static u8 mlx4_en_dcbnl_setapp(struct net_device *netdev, u8 idtype,
++                             u16 id, u8 up)
++#endif
+ {
+       struct mlx4_en_priv *priv = netdev_priv(netdev);
+       struct dcb_app app;
+@@ -538,7 +551,12 @@ static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device *dev,
+ #define RPG_ENABLE_BIT        31
+ #define CN_TAG_BIT    30
++#ifdef HAVE_IEEE_GETQCN
++#ifndef CONFIG_SYSFS_QCN
+ static int mlx4_en_dcbnl_ieee_getqcn(struct net_device *dev,
++#else
++int mlx4_en_dcbnl_ieee_getqcn(struct net_device *dev,
++#endif
+                                    struct ieee_qcn *qcn)
+ {
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+@@ -601,7 +619,11 @@ static int mlx4_en_dcbnl_ieee_getqcn(struct net_device *dev,
+       return 0;
+ }
++#ifndef CONFIG_SYSFS_QCN
+ static int mlx4_en_dcbnl_ieee_setqcn(struct net_device *dev,
++#else
++int mlx4_en_dcbnl_ieee_setqcn(struct net_device *dev,
++#endif
+                                    struct ieee_qcn *qcn)
+ {
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+@@ -664,7 +686,11 @@ static int mlx4_en_dcbnl_ieee_setqcn(struct net_device *dev,
+       return 0;
+ }
++#ifndef CONFIG_SYSFS_QCN
+ static int mlx4_en_dcbnl_ieee_getqcnstats(struct net_device *dev,
++#else
++int mlx4_en_dcbnl_ieee_getqcnstats(struct net_device *dev,
++#endif
+                                         struct ieee_qcn_stats *qcn_stats)
+ {
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+@@ -706,15 +732,18 @@ static int mlx4_en_dcbnl_ieee_getqcnstats(struct net_device *dev,
+       mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
+       return 0;
+ }
++#endif /* HAVE_IEEE_GETQCN */
+ const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = {
+       .ieee_getets            = mlx4_en_dcbnl_ieee_getets,
+       .ieee_setets            = mlx4_en_dcbnl_ieee_setets,
+       .ieee_getmaxrate        = mlx4_en_dcbnl_ieee_getmaxrate,
+       .ieee_setmaxrate        = mlx4_en_dcbnl_ieee_setmaxrate,
++#ifdef HAVE_IEEE_GETQCN
+       .ieee_getqcn            = mlx4_en_dcbnl_ieee_getqcn,
+       .ieee_setqcn            = mlx4_en_dcbnl_ieee_setqcn,
+       .ieee_getqcnstats       = mlx4_en_dcbnl_ieee_getqcnstats,
++#endif
+       .ieee_getpfc            = mlx4_en_dcbnl_ieee_getpfc,
+       .ieee_setpfc            = mlx4_en_dcbnl_ieee_setpfc,
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+@@ -1158,7 +1158,11 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
+       memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
+       new_prof.tx_ring_size = tx_size;
+       new_prof.rx_ring_size = rx_size;
++#ifdef HAVE_XDP_BUFF
+       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
++#else
++      err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
++#endif
+       if (err)
+               goto out;
+@@ -1841,7 +1845,11 @@ static int mlx4_en_set_channels(struct net_device *dev,
+       new_prof.tx_ring_num[TX_XDP] = xdp_count;
+       new_prof.rx_ring_num = channel->rx_count;
++#ifdef HAVE_XDP_BUFF
+       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
++#else
++      err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
++#endif
+       if (err)
+               goto out;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+@@ -31,7 +31,9 @@
+  *
+  */
++#ifdef HAVE_XDP_BUFF
+ #include <linux/bpf.h>
++#endif
+ #include <linux/etherdevice.h>
+ #include <linux/tcp.h>
+ #include <linux/if_vlan.h>
+@@ -40,8 +42,12 @@
+ #include <linux/hash.h>
+ #include <net/ip.h>
+ #include <net/busy_poll.h>
++#ifdef HAVE_KERNEL_WITH_VXLAN_SUPPORT_ON
+ #include <net/vxlan.h>
++#endif
++#ifdef HAVE_DEVLINK_H
+ #include <net/devlink.h>
++#endif
+ #include <linux/mlx4/driver.h>
+ #include <linux/mlx4/device.h>
+@@ -105,7 +111,11 @@ int mlx4_en_alloc_tx_queue_per_tc(struct net_device *dev, u8 tc)
+                                     MLX4_EN_NUM_UP_HIGH;
+       new_prof.tx_ring_num[TX] = new_prof.num_tx_rings_p_up *
+                                  new_prof.num_up;
++#ifdef HAVE_XDP_BUFF
+       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
++#else
++      err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
++#endif
+       if (err)
+               goto out;
+@@ -130,6 +140,8 @@ out:
+       return err;
+ }
++#if defined(HAVE_NDO_SETUP_TC) || defined(HAVE_NDO_SETUP_TC_RH_EXTENDED)
++#if defined(HAVE_NDO_SETUP_TC_TAKES_TC_SETUP_TYPE) || defined(HAVE_NDO_SETUP_TC_RH_EXTENDED)
+ static int __mlx4_en_setup_tc(struct net_device *dev, enum tc_setup_type type,
+                             void *type_data)
+ {
+@@ -145,6 +157,38 @@ static int __mlx4_en_setup_tc(struct net_device *dev, enum tc_setup_type type,
+       return mlx4_en_alloc_tx_queue_per_tc(dev, mqprio->num_tc);
+ }
++#else /* before 4.14-15 TC changes */
++#if defined(HAVE_NDO_SETUP_TC_4_PARAMS) || defined(HAVE_NDO_SETUP_TC_TAKES_CHAIN_INDEX)
++static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle,
++#ifdef HAVE_NDO_SETUP_TC_TAKES_CHAIN_INDEX
++                            u32 chain_index, __be16 proto,
++#else
++                            __be16 proto,
++#endif
++                            struct tc_to_netdev *tc)
++{
++      if (tc->type != TC_SETUP_MQPRIO)
++              return -EINVAL;
++
++#ifdef HAVE_TC_TO_NETDEV_TC
++      return mlx4_en_setup_tc(dev, tc->tc);
++#else
++      if (tc->mqprio->num_tc && tc->mqprio->num_tc != MLX4_EN_NUM_UP_HIGH)
++              return -EINVAL;
++
++#ifdef HAVE_NDO_SETUP_TC_TAKES_CHAIN_INDEX
++      if (chain_index)
++              return -EOPNOTSUPP;
++#endif
++
++      tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
++
++      return mlx4_en_alloc_tx_queue_per_tc(dev, tc->mqprio->num_tc);
++#endif
++}
++#endif
++#endif
++#endif
+ #ifdef CONFIG_RFS_ACCEL
+@@ -460,8 +504,14 @@ static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
+ }
+ #endif
++#if defined(HAVE_NDO_RX_ADD_VID_HAS_3_PARAMS)
+ static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
+                                  __be16 proto, u16 vid)
++#elif defined(HAVE_NDO_RX_ADD_VID_HAS_2_PARAMS_RET_INT)
++static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
++#else
++static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
++#endif
+ {
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_en_dev *mdev = priv->mdev;
+@@ -487,11 +537,20 @@ static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
+ out:
+       mutex_unlock(&mdev->state_lock);
++#if (defined(HAVE_NDO_RX_ADD_VID_HAS_3_PARAMS) || \
++     defined(HAVE_NDO_RX_ADD_VID_HAS_2_PARAMS_RET_INT))
+       return err;
++#endif
+ }
++#if defined(HAVE_NDO_RX_ADD_VID_HAS_3_PARAMS)
+ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
+                                   __be16 proto, u16 vid)
++#elif defined(HAVE_NDO_RX_ADD_VID_HAS_2_PARAMS_RET_INT)
++static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
++#else
++static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
++#endif
+ {
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_en_dev *mdev = priv->mdev;
+@@ -1267,7 +1326,11 @@ static void mlx4_en_do_set_rx_mode(struct work_struct *work)
+               }
+       }
++#ifdef HAVE_NETDEV_IFF_UNICAST_FLT
+       if (dev->priv_flags & IFF_UNICAST_FLT)
++#else
++      if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
++#endif
+               mlx4_en_do_uc_filter(priv, dev, mdev);
+       /* Promsicuous mode: disable all filters */
+@@ -1393,16 +1456,28 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
+       queue_work(mdev->workqueue, &priv->watchdog_task);
+ }
+-
+-static void
+-mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
++#ifdef HAVE_NDO_GET_STATS64_RET_VOID
++static void mlx4_en_get_stats64(struct net_device *dev,
++                              struct rtnl_link_stats64 *stats)
++#elif defined(HAVE_NDO_GET_STATS64)
++struct rtnl_link_stats64 *mlx4_en_get_stats64(struct net_device *dev,
++                                            struct rtnl_link_stats64 *stats)
++#else
++static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
++#endif
+ {
+       struct mlx4_en_priv *priv = netdev_priv(dev);
++#if !defined(HAVE_NDO_GET_STATS64) && !defined(HAVE_NDO_GET_STATS64_RET_VOID)
++      struct net_device_stats *stats = &priv->ret_stats;
++#endif
+       spin_lock_bh(&priv->stats_lock);
+       mlx4_en_fold_software_stats(dev);
+       netdev_stats_to_stats64(stats, &dev->stats);
+       spin_unlock_bh(&priv->stats_lock);
++#ifndef HAVE_NDO_GET_STATS64_RET_VOID
++      return stats;
++#endif
+ }
+ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
+@@ -1607,6 +1682,7 @@ static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
+       free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask);
+ }
++#ifdef HAVE_XDP_BUFF
+ static void mlx4_en_init_recycle_ring(struct mlx4_en_priv *priv,
+                                     int tx_ring_idx)
+ {
+@@ -1618,6 +1694,7 @@ static void mlx4_en_init_recycle_ring(struct mlx4_en_priv *priv,
+       en_dbg(DRV, priv, "Set tx_ring[%d][%d]->recycle_ring = rx_ring[%d]\n",
+              TX_XDP, tx_ring_idx, rr_index);
+ }
++#endif
+ int mlx4_en_start_port(struct net_device *dev)
+ {
+@@ -1753,7 +1830,9 @@ int mlx4_en_start_port(struct net_device *dev)
+                       } else {
+                               mlx4_en_init_tx_xdp_ring_descs(priv, tx_ring);
++#ifdef HAVE_XDP_BUFF
+                               mlx4_en_init_recycle_ring(priv, i);
++#endif
+                               /* XDP TX CQ should never be armed */
+                       }
+@@ -1826,8 +1905,14 @@ int mlx4_en_start_port(struct net_device *dev)
+       /* Schedule multicast task to populate multicast list */
+       queue_work(mdev->workqueue, &priv->rx_mode_task);
++#ifdef HAVE_KERNEL_WITH_VXLAN_SUPPORT_ON
+       if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
++#if defined(HAVE_NDO_UDP_TUNNEL_ADD) || defined(HAVE_NDO_UDP_TUNNEL_ADD_EXTENDED)
+               udp_tunnel_get_rx_info(dev);
++#elif defined(HAVE_NDO_ADD_VXLAN_PORT)
++              vxlan_get_rx_port(dev);
++#endif
++#endif
+       priv->port_up = true;
+@@ -2116,9 +2201,11 @@ static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
+ {
+       int i, t;
++#ifdef HAVE_NETDEV_RX_CPU_RMAP
+ #ifdef CONFIG_RFS_ACCEL
+       priv->dev->rx_cpu_rmap = NULL;
+ #endif
++#endif
+       for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
+               for (i = 0; i < priv->tx_ring_num[t]; i++) {
+@@ -2274,11 +2361,19 @@ static void mlx4_en_update_priv(struct mlx4_en_priv *dst,
+ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
+                               struct mlx4_en_priv *tmp,
++#ifdef HAVE_XDP_BUFF
+                               struct mlx4_en_port_profile *prof,
+                               bool carry_xdp_prog)
++#else
++                              struct mlx4_en_port_profile *prof)
++#endif
+ {
++#ifdef HAVE_XDP_BUFF
+       struct bpf_prog *xdp_prog;
+       int i, t;
++#else
++      int t;
++#endif
+       mlx4_en_copy_priv(tmp, priv, prof);
+@@ -2293,6 +2388,7 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
+               return -ENOMEM;
+       }
++#ifdef HAVE_XDP_BUFF
+       /* All rx_rings has the same xdp_prog.  Pick the first one. */
+       xdp_prog = rcu_dereference_protected(
+               priv->rx_ring[0]->xdp_prog,
+@@ -2308,6 +2404,7 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
+                       rcu_assign_pointer(tmp->rx_ring[i]->xdp_prog,
+                                          xdp_prog);
+       }
++#endif
+       return 0;
+ }
+@@ -2328,8 +2425,10 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
+       /* Unregister device - this will close the port if it was up */
+       if (priv->registered) {
++#ifdef HAVE_DEVLINK_H
+               devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev,
+                                                             priv->port));
++#endif
+               unregister_netdev(dev);
+       }
+@@ -2359,6 +2458,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
+       free_netdev(dev);
+ }
++#ifdef HAVE_XDP_BUFF
+ static bool mlx4_en_check_xdp_mtu(struct net_device *dev, int mtu)
+ {
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+@@ -2371,6 +2471,7 @@ static bool mlx4_en_check_xdp_mtu(struct net_device *dev, int mtu)
+       return true;
+ }
++#endif
+ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
+ {
+@@ -2381,9 +2482,11 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
+       en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
+                dev->mtu, new_mtu);
++#ifdef HAVE_XDP_BUFF
+       if (priv->tx_ring_num[TX_XDP] &&
+           !mlx4_en_check_xdp_mtu(dev, new_mtu))
+               return -EOPNOTSUPP;
++#endif
+       dev->mtu = new_mtu;
+@@ -2467,6 +2570,7 @@ static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+                           sizeof(config)) ? -EFAULT : 0;
+ }
++#ifdef SIOCGHWTSTAMP
+ static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+ {
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+@@ -2474,19 +2578,23 @@ static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+       return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config,
+                           sizeof(priv->hwtstamp_config)) ? -EFAULT : 0;
+ }
++#endif
+ static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+ {
+       switch (cmd) {
+       case SIOCSHWTSTAMP:
+               return mlx4_en_hwtstamp_set(dev, ifr);
++#ifdef SIOCGHWTSTAMP
+       case SIOCGHWTSTAMP:
+               return mlx4_en_hwtstamp_get(dev, ifr);
++#endif
+       default:
+               return -EOPNOTSUPP;
+       }
+ }
++#ifdef HAVE_NETIF_F_HW_VLAN_STAG_RX
+ static netdev_features_t mlx4_en_fix_features(struct net_device *netdev,
+                                             netdev_features_t features)
+ {
+@@ -2505,20 +2613,31 @@ static netdev_features_t mlx4_en_fix_features(struct net_device *netdev,
+       return features;
+ }
++#endif
+-static int mlx4_en_set_features(struct net_device *netdev,
+-              netdev_features_t features)
++#ifndef CONFIG_SYSFS_LOOPBACK
++static
++#endif
++int mlx4_en_set_features(struct net_device *netdev,
++#ifdef HAVE_NET_DEVICE_OPS_EXT
++                       u32 features)
++#else
++                       netdev_features_t features)
++#endif
+ {
+       struct mlx4_en_priv *priv = netdev_priv(netdev);
+       bool reset = false;
+       int ret = 0;
++#ifdef HAVE_NETIF_F_RXFCS
+       if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXFCS)) {
+               en_info(priv, "Turn %s RX-FCS\n",
+                       (features & NETIF_F_RXFCS) ? "ON" : "OFF");
+               reset = true;
+       }
++#endif
++#ifdef HAVE_NETIF_F_RXALL
+       if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXALL)) {
+               u8 ignore_fcs_value = (features & NETIF_F_RXALL) ? 1 : 0;
+@@ -2529,6 +2648,7 @@ static int mlx4_en_set_features(struct net_device *netdev,
+               if (ret)
+                       return ret;
+       }
++#endif
+       if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
+               en_info(priv, "Turn %s RX vlan strip offload\n",
+@@ -2540,9 +2660,11 @@ static int mlx4_en_set_features(struct net_device *netdev,
+               en_info(priv, "Turn %s TX vlan strip offload\n",
+                       (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
++#ifdef HAVE_NETIF_F_HW_VLAN_STAG_RX
+       if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_STAG_TX))
+               en_info(priv, "Turn %s TX S-VLAN strip offload\n",
+                       (features & NETIF_F_HW_VLAN_STAG_TX) ? "ON" : "OFF");
++#endif
+       if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) {
+               en_info(priv, "Turn %s loopback\n",
+@@ -2560,6 +2682,7 @@ static int mlx4_en_set_features(struct net_device *netdev,
+       return 0;
+ }
++#ifdef HAVE_NDO_SET_VF_MAC
+ static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
+ {
+       struct mlx4_en_priv *en_priv = netdev_priv(dev);
+@@ -2567,17 +2690,28 @@ static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
+       return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac);
+ }
++#endif
++#if defined(HAVE_NDO_SET_VF_VLAN) || defined(HAVE_NDO_SET_VF_VLAN_EXTENDED)
++#ifdef HAVE_VF_VLAN_PROTO
+ static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
+                              __be16 vlan_proto)
++#else
++static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
++#endif
+ {
+       struct mlx4_en_priv *en_priv = netdev_priv(dev);
+       struct mlx4_en_dev *mdev = en_priv->mdev;
++#ifndef HAVE_VF_VLAN_PROTO
++      __be16 vlan_proto = htons(ETH_P_8021Q);
++#endif
+       return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos,
+                               vlan_proto);
+ }
++#endif /* HAVE_NDO_SET_VF_VLAN */
++#ifdef HAVE_TX_RATE_LIMIT
+ static int mlx4_en_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
+                              int max_tx_rate)
+ {
+@@ -2587,7 +2721,9 @@ static int mlx4_en_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
+       return mlx4_set_vf_rate(mdev->dev, en_priv->port, vf, min_tx_rate,
+                               max_tx_rate);
+ }
++#endif
++#if defined(HAVE_NETDEV_OPS_NDO_SET_VF_SPOOFCHK) || defined(HAVE_NETDEV_OPS_EXT_NDO_SET_VF_SPOOFCHK)
+ static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
+ {
+       struct mlx4_en_priv *en_priv = netdev_priv(dev);
+@@ -2595,6 +2731,7 @@ static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
+       return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting);
+ }
++#endif
+ static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf)
+ {
+@@ -2604,6 +2741,7 @@ static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_
+       return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf);
+ }
++#if defined(HAVE_NETDEV_OPS_NDO_SET_VF_LINK_STATE) || defined(HAVE_NETDEV_OPS_EXT_NDO_SET_VF_LINK_STATE)
+ static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state)
+ {
+       struct mlx4_en_priv *en_priv = netdev_priv(dev);
+@@ -2611,7 +2749,9 @@ static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_st
+       return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
+ }
++#endif
++#ifdef HAVE_NDO_GET_VF_STATS
+ static int mlx4_en_get_vf_stats(struct net_device *dev, int vf,
+                               struct ifla_vf_stats *vf_stats)
+ {
+@@ -2620,10 +2760,16 @@ static int mlx4_en_get_vf_stats(struct net_device *dev, int vf,
+       return mlx4_get_vf_stats(mdev->dev, en_priv->port, vf, vf_stats);
+ }
++#endif
++#if defined(HAVE_NETDEV_NDO_GET_PHYS_PORT_ID) || defined(HAVE_NETDEV_EXT_NDO_GET_PHYS_PORT_ID)
+ #define PORT_ID_BYTE_LEN 8
+ static int mlx4_en_get_phys_port_id(struct net_device *dev,
++#ifdef HAVE_NETDEV_PHYS_ITEM_ID
+                                   struct netdev_phys_item_id *ppid)
++#else
++                                  struct netdev_phys_port_id *ppid)
++#endif
+ {
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_dev *mdev = priv->mdev->dev;
+@@ -2640,6 +2786,7 @@ static int mlx4_en_get_phys_port_id(struct net_device *dev,
+       }
+       return 0;
+ }
++#endif
+ static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
+ {
+@@ -2659,13 +2806,21 @@ out:
+               return;
+       }
++#ifdef HAVE_NETDEV_HW_ENC_FEATURES
+       /* set offloads */
+       priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+                                     NETIF_F_RXCSUM |
+                                     NETIF_F_TSO | NETIF_F_TSO6 |
+                                     NETIF_F_GSO_UDP_TUNNEL |
++#ifdef NETIF_F_GSO_UDP_TUNNEL_CSUM
+                                     NETIF_F_GSO_UDP_TUNNEL_CSUM |
++#endif
++#ifdef NETIF_F_GSO_PARTIAL
+                                     NETIF_F_GSO_PARTIAL;
++#else
++                                    0;
++#endif
++#endif
+ }
+ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
+@@ -2673,13 +2828,21 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
+       int ret;
+       struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
+                                                vxlan_del_task);
++#ifdef HAVE_NETDEV_HW_ENC_FEATURES
+       /* unset offloads */
+       priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+                                       NETIF_F_RXCSUM |
+                                       NETIF_F_TSO | NETIF_F_TSO6 |
+                                       NETIF_F_GSO_UDP_TUNNEL |
++#ifdef NETIF_F_GSO_UDP_TUNNEL_CSUM
+                                       NETIF_F_GSO_UDP_TUNNEL_CSUM |
++#endif
++#ifdef NETIF_F_GSO_PARTIAL
+                                       NETIF_F_GSO_PARTIAL);
++#else
++                                      0);
++#endif
++#endif
+       ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
+                                 VXLAN_STEER_BY_OUTER_MAC, 0);
+@@ -2741,12 +2904,19 @@ static void mlx4_en_del_vxlan_port(struct  net_device *dev,
+       queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
+ }
++#ifdef HAVE_NETDEV_FEATURES_T
+ static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
+                                               struct net_device *dev,
+                                               netdev_features_t features)
+ {
++#ifdef HAVE_VLAN_FEATURES_CHECK
+       features = vlan_features_check(skb, features);
++#endif
++#ifdef HAVE_KERNEL_WITH_VXLAN_SUPPORT_ON
++#ifdef HAVE_VXLAN_FEATURES_CHECK
+       features = vxlan_features_check(skb, features);
++#endif
++#endif
+       /* The ConnectX-3 doesn't support outer IPv6 checksums but it does
+        * support inner IPv6 checksums and segmentation so  we need to
+@@ -2765,6 +2935,7 @@ static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
+       return features;
+ }
++#if defined(HAVE_NDO_SET_TX_MAXRATE) || defined(HAVE_NDO_SET_TX_MAXRATE_EXTENDED)
+ static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate)
+ {
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+@@ -2791,7 +2962,9 @@ static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 m
+                            &params);
+       return err;
+ }
++#endif
++#ifdef HAVE_XDP_BUFF
+ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
+ {
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+@@ -2855,7 +3028,11 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
+               en_warn(priv, "Reducing the number of TX rings, to not exceed the max total rings number.\n");
+       }
++#ifdef HAVE_XDP_BUFF
+       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, false);
++#else
++      err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
++#endif
+       if (err) {
+               if (prog)
+                       bpf_prog_sub(prog, priv->rx_ring_num - 1);
+@@ -2896,6 +3073,7 @@ out:
+       return err;
+ }
++#ifdef HAVE_BPF_PROG_AUX_FEILD_ID
+ static u32 mlx4_xdp_query(struct net_device *dev)
+ {
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+@@ -2916,6 +3094,14 @@ static u32 mlx4_xdp_query(struct net_device *dev)
+       return prog_id;
+ }
++#else
++static bool mlx4_xdp_attached(struct net_device *dev)
++{
++      struct mlx4_en_priv *priv = netdev_priv(dev);
++
++      return !!priv->tx_ring_num[TX_XDP];
++}
++#endif
+ static int mlx4_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+ {
+@@ -2923,24 +3109,40 @@ static int mlx4_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+       case XDP_SETUP_PROG:
+               return mlx4_xdp_set(dev, xdp->prog);
+       case XDP_QUERY_PROG:
++#ifdef HAVE_BPF_PROG_AUX_FEILD_ID
+               xdp->prog_id = mlx4_xdp_query(dev);
+               xdp->prog_attached = !!xdp->prog_id;
++#else
++              xdp->prog_attached = mlx4_xdp_attached(dev);
++#endif
+               return 0;
+       default:
+               return -EINVAL;
+       }
+ }
++#endif
+ static const struct net_device_ops mlx4_netdev_ops = {
++#ifdef HAVE_NET_DEVICE_OPS_EXTENDED
++      .ndo_size = sizeof(struct net_device_ops),
++#endif
+       .ndo_open               = mlx4_en_open,
+       .ndo_stop               = mlx4_en_close,
+       .ndo_start_xmit         = mlx4_en_xmit,
+       .ndo_select_queue       = mlx4_en_select_queue,
++#if defined(HAVE_NDO_GET_STATS64_RET_VOID) || defined(HAVE_NDO_GET_STATS64)
+       .ndo_get_stats64        = mlx4_en_get_stats64,
++#else
++      .ndo_get_stats          = mlx4_en_get_stats,
++#endif
+       .ndo_set_rx_mode        = mlx4_en_set_rx_mode,
+       .ndo_set_mac_address    = mlx4_en_set_mac,
+       .ndo_validate_addr      = eth_validate_addr,
++#ifdef HAVE_NDO_CHANGE_MTU_EXTENDED
++      .extended.ndo_change_mtu = mlx4_en_change_mtu,
++#else
+       .ndo_change_mtu         = mlx4_en_change_mtu,
++#endif
+       .ndo_do_ioctl           = mlx4_en_ioctl,
+       .ndo_tx_timeout         = mlx4_en_tx_timeout,
+       .ndo_vlan_rx_add_vid    = mlx4_en_vlan_rx_add_vid,
+@@ -2948,21 +3150,74 @@ static const struct net_device_ops mlx4_netdev_ops = {
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = mlx4_en_netpoll,
+ #endif
++#if (defined(HAVE_NDO_SET_FEATURES) && !defined(HAVE_NET_DEVICE_OPS_EXT))
+       .ndo_set_features       = mlx4_en_set_features,
++#endif
++#ifdef HAVE_NETIF_F_HW_VLAN_STAG_RX
+       .ndo_fix_features       = mlx4_en_fix_features,
++#endif
++#ifdef HAVE_NDO_SETUP_TC_RH_EXTENDED
++      .extended.ndo_setup_tc_rh = __mlx4_en_setup_tc,
++#else
++#ifdef HAVE_NDO_SETUP_TC
++#if defined(HAVE_NDO_SETUP_TC_4_PARAMS) || \
++    defined(HAVE_NDO_SETUP_TC_TAKES_CHAIN_INDEX) || \
++    defined(HAVE_TC_FLOWER_OFFLOAD) && \
++    !defined(CONFIG_COMPAT_CLS_FLOWER_MOD)
+       .ndo_setup_tc           = __mlx4_en_setup_tc,
++#else
++      .ndo_setup_tc           = mlx4_en_setup_tc,
++#endif
++#endif
++#endif
++#ifdef HAVE_NDO_RX_FLOW_STEER
+ #ifdef CONFIG_RFS_ACCEL
+       .ndo_rx_flow_steer      = mlx4_en_filter_rfs,
+ #endif
++#endif
++#ifdef MLX4_EN_BUSY_POLL
++#ifndef HAVE_NETDEV_EXTENDED_NDO_BUSY_POLL
++      .ndo_busy_poll          = mlx4_en_low_latency_recv,
++#endif
++#endif
++#ifdef HAVE_NETDEV_NDO_GET_PHYS_PORT_ID
+       .ndo_get_phys_port_id   = mlx4_en_get_phys_port_id,
++#elif defined(HAVE_NDO_GET_PHYS_PORT_NAME_EXTENDED)
++      .extended.ndo_get_phys_port_id  = mlx4_en_get_phys_port_id,
++#endif
++#ifdef HAVE_KERNEL_WITH_VXLAN_SUPPORT_ON
++#ifdef HAVE_NDO_UDP_TUNNEL_ADD
+       .ndo_udp_tunnel_add     = mlx4_en_add_vxlan_port,
+       .ndo_udp_tunnel_del     = mlx4_en_del_vxlan_port,
++#elif defined(HAVE_NDO_UDP_TUNNEL_ADD_EXTENDED)
++      .extended.ndo_udp_tunnel_add      = mlx4_en_add_vxlan_port,
++      .extended.ndo_udp_tunnel_del      = mlx4_en_del_vxlan_port,
++#elif defined(HAVE_NDO_ADD_VXLAN_PORT)
++      .ndo_add_vxlan_port     = mlx4_en_add_vxlan_port,
++      .ndo_del_vxlan_port     = mlx4_en_del_vxlan_port,
++#endif
++#endif
++#ifdef HAVE_NETDEV_FEATURES_T
+       .ndo_features_check     = mlx4_en_features_check,
++#elif defined(HAVE_KERNEL_WITH_VXLAN_SUPPORT_ON) && defined(HAVE_VXLAN_GSO_CHECK)
++      .ndo_gso_check          = mlx4_en_gso_check,
++#endif
++#ifdef HAVE_NDO_SET_TX_MAXRATE
+       .ndo_set_tx_maxrate     = mlx4_en_set_tx_maxrate,
++#elif defined(HAVE_NDO_SET_TX_MAXRATE_EXTENDED)
++      .extended.ndo_set_tx_maxrate    = mlx4_en_set_tx_maxrate,
++#endif
++#ifdef HAVE_NDO_XDP_EXTENDED
++      .extended.ndo_xdp        = mlx4_xdp,
++#elif defined(HAVE_XDP_BUFF)
+       .ndo_bpf                = mlx4_xdp,
++#endif
+ };
+ static const struct net_device_ops mlx4_netdev_ops_master = {
++#ifdef HAVE_NET_DEVICE_OPS_EXTENDED
++      .ndo_size               = sizeof(struct net_device_ops),
++#endif
+       .ndo_open               = mlx4_en_open,
+       .ndo_stop               = mlx4_en_close,
+       .ndo_start_xmit         = mlx4_en_xmit,
+@@ -2971,12 +3226,20 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
+       .ndo_set_rx_mode        = mlx4_en_set_rx_mode,
+       .ndo_set_mac_address    = mlx4_en_set_mac,
+       .ndo_validate_addr      = eth_validate_addr,
++#ifdef HAVE_NDO_CHANGE_MTU_EXTENDED
++      .extended.ndo_change_mtu        = mlx4_en_change_mtu,
++#else
+       .ndo_change_mtu         = mlx4_en_change_mtu,
++#endif
+       .ndo_tx_timeout         = mlx4_en_tx_timeout,
+       .ndo_vlan_rx_add_vid    = mlx4_en_vlan_rx_add_vid,
+       .ndo_vlan_rx_kill_vid   = mlx4_en_vlan_rx_kill_vid,
+       .ndo_set_vf_mac         = mlx4_en_set_vf_mac,
++#ifdef HAVE_NDO_SET_VF_VLAN_EXTENDED
++      .extended.ndo_set_vf_vlan       = mlx4_en_set_vf_vlan,
++#else
+       .ndo_set_vf_vlan        = mlx4_en_set_vf_vlan,
++#endif
+       .ndo_set_vf_rate        = mlx4_en_set_vf_rate,
+       .ndo_set_vf_spoofchk    = mlx4_en_set_vf_spoofchk,
+       .ndo_set_vf_link_state  = mlx4_en_set_vf_link_state,
+@@ -2987,16 +3250,46 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
+ #endif
+       .ndo_set_features       = mlx4_en_set_features,
+       .ndo_fix_features       = mlx4_en_fix_features,
++#ifdef HAVE_NDO_SETUP_TC_RH_EXTENDED
++      .extended.ndo_setup_tc_rh = __mlx4_en_setup_tc,
++#else
++#if defined(HAVE_NDO_SETUP_TC_4_PARAMS) || \
++    defined(HAVE_NDO_SETUP_TC_TAKES_CHAIN_INDEX) || \
++    defined(HAVE_TC_FLOWER_OFFLOAD) && \
++    !defined(CONFIG_COMPAT_CLS_FLOWER_MOD)
+       .ndo_setup_tc           = __mlx4_en_setup_tc,
++#else
++      .ndo_setup_tc           = mlx4_en_setup_tc,
++#endif
++#endif
+ #ifdef CONFIG_RFS_ACCEL
+       .ndo_rx_flow_steer      = mlx4_en_filter_rfs,
+ #endif
+       .ndo_get_phys_port_id   = mlx4_en_get_phys_port_id,
++#ifdef HAVE_KERNEL_WITH_VXLAN_SUPPORT_ON
++#ifdef HAVE_NDO_UDP_TUNNEL_ADD
+       .ndo_udp_tunnel_add     = mlx4_en_add_vxlan_port,
+       .ndo_udp_tunnel_del     = mlx4_en_del_vxlan_port,
++#elif defined(HAVE_NDO_UDP_TUNNEL_ADD_EXTENDED)
++      .extended.ndo_udp_tunnel_add      = mlx4_en_add_vxlan_port,
++      .extended.ndo_udp_tunnel_del      = mlx4_en_del_vxlan_port,
++#elif defined(HAVE_NDO_ADD_VXLAN_PORT)
++      .ndo_add_vxlan_port     = mlx4_en_add_vxlan_port,
++      .ndo_del_vxlan_port     = mlx4_en_del_vxlan_port,
++#endif
++#endif
+       .ndo_features_check     = mlx4_en_features_check,
++#ifdef HAVE_NDO_SET_TX_MAXRATE
+       .ndo_set_tx_maxrate     = mlx4_en_set_tx_maxrate,
++#elif defined(HAVE_NDO_SET_TX_MAXRATE_EXTENDED)
++      .extended.ndo_set_tx_maxrate    = mlx4_en_set_tx_maxrate,
++#endif
++#endif
++#ifdef HAVE_NDO_XDP_EXTENDED
++      .extended.ndo_xdp        = mlx4_xdp,
++#elif defined(HAVE_XDP_BUFF)
+       .ndo_bpf                = mlx4_xdp,
++#endif
+ };
+ struct mlx4_en_bond {
+@@ -3424,24 +3717,35 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+       netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
+       netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
++#ifdef HAVE_ETHTOOL_OPS_EXT
++      SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
++      set_ethtool_ops_ext(dev, &mlx4_en_ethtool_ops_ext);
++#else
+       dev->ethtool_ops = &mlx4_en_ethtool_ops;
++#endif
+       /*
+        * Set driver features
+        */
++#ifdef HAVE_NETDEV_HW_FEATURES
+       dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+       if (mdev->LSO_support)
+               dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
+       dev->vlan_features = dev->hw_features;
++#ifdef HAVE_NETIF_F_RXHASH
+       dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
++#else
++      dev->hw_features |= NETIF_F_RXCSUM;
++#endif
+       dev->features = dev->hw_features | NETIF_F_HIGHDMA |
+                       NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+                       NETIF_F_HW_VLAN_CTAG_FILTER;
+       dev->hw_features |= NETIF_F_LOOPBACK |
+                       NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
++#ifdef HAVE_NETIF_F_HW_VLAN_STAG_RX
+       if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
+               dev->features |= NETIF_F_HW_VLAN_STAG_RX |
+                       NETIF_F_HW_VLAN_STAG_FILTER;
+@@ -3475,45 +3779,93 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+                     MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
+                       dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
+       }
++#endif
++#ifdef HAVE_NETIF_F_RXFCS
+       if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
+               dev->hw_features |= NETIF_F_RXFCS;
++#endif
++#ifdef HAVE_NETIF_F_RXALL
+       if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS)
+               dev->hw_features |= NETIF_F_RXALL;
++#endif
+       if (mdev->dev->caps.steering_mode ==
+           MLX4_STEERING_MODE_DEVICE_MANAGED &&
+           mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC)
+               dev->hw_features |= NETIF_F_NTUPLE;
++#endif
++#ifdef HAVE_NETDEV_IFF_UNICAST_FLT
+       if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
+               dev->priv_flags |= IFF_UNICAST_FLT;
++#endif
+       /* Setting a default hash function value */
+       if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP) {
++#ifdef HAVE_ETH_SS_RSS_HASH_FUNCS
+               priv->rss_hash_fn = ETH_RSS_HASH_TOP;
++#else
++              priv->pflags &= ~MLX4_EN_PRIV_FLAGS_RSS_HASH_XOR;
++#ifdef HAVE_NETIF_F_RXHASH
++              dev->features |= NETIF_F_RXHASH;
++#endif
++#endif
+       } else if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR) {
++#ifdef HAVE_ETH_SS_RSS_HASH_FUNCS
+               priv->rss_hash_fn = ETH_RSS_HASH_XOR;
++#else
++              priv->pflags |= MLX4_EN_PRIV_FLAGS_RSS_HASH_XOR;
++#ifdef HAVE_NETIF_F_RXHASH
++              dev->features &= ~NETIF_F_RXHASH;
++#endif
++#endif
+       } else {
+               en_warn(priv,
+                       "No RSS hash capabilities exposed, using Toeplitz\n");
++#ifdef HAVE_ETH_SS_RSS_HASH_FUNCS
+               priv->rss_hash_fn = ETH_RSS_HASH_TOP;
++#else
++              priv->pflags &= ~MLX4_EN_PRIV_FLAGS_RSS_HASH_XOR;
++#ifdef HAVE_NETIF_F_RXHASH
++              dev->features |= NETIF_F_RXHASH;
++#endif
++#endif
+       }
+       if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
++#ifdef HAVE_NETDEV_HW_FEATURES
+               dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
++#ifdef NETIF_F_GSO_UDP_TUNNEL_CSUM
+                                   NETIF_F_GSO_UDP_TUNNEL_CSUM |
++#endif
++#ifdef NETIF_F_GSO_PARTIAL
+                                   NETIF_F_GSO_PARTIAL;
++#else
++                                  0;
++#endif
++#endif
+               dev->features    |= NETIF_F_GSO_UDP_TUNNEL |
++#ifdef NETIF_F_GSO_UDP_TUNNEL_CSUM
+                                   NETIF_F_GSO_UDP_TUNNEL_CSUM |
++#endif
++#ifdef NETIF_F_GSO_PARTIAL
+                                   NETIF_F_GSO_PARTIAL;
+               dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
++#else
++                                  0;
++#endif
+       }
++#ifdef HAVE_NET_DEVICE_MIN_MAX_MTU
+       /* MTU range: 46 - hw-specific max */
+       dev->min_mtu = MLX4_EN_MIN_MTU;
+       dev->max_mtu = priv->max_mtu;
++#elif defined(HAVE_NET_DEVICE_MIN_MAX_MTU_EXTENDED)
++      dev->extended->min_mtu = MLX4_EN_MIN_MTU;
++      dev->extended->max_mtu = priv->max_mtu;
++#endif
+       mdev->pndev[port] = dev;
+       mdev->upper[port] = NULL;
+@@ -3599,8 +3951,12 @@ int mlx4_en_reset_config(struct net_device *dev,
+       if (priv->hwtstamp_config.tx_type == ts_config.tx_type &&
+           priv->hwtstamp_config.rx_filter == ts_config.rx_filter &&
++#ifdef HAVE_NETIF_F_RXFCS
+           !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
+           !DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS))
++#else
++          !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX))
++#endif
+               return 0; /* Nothing to change */
+       if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
+@@ -3619,7 +3975,11 @@ int mlx4_en_reset_config(struct net_device *dev,
+       memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
+       memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config));
++#ifdef HAVE_XDP_BUFF
+       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
++#else
++      err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
++#endif
+       if (err)
+               goto out;
+@@ -3639,18 +3999,26 @@ int mlx4_en_reset_config(struct net_device *dev,
+               /* RX time-stamping is OFF, update the RX vlan offload
+                * to the latest wanted state
+                */
++#if defined(HAVE_NETDEV_WANTED_FEATURES) || defined(HAVE_NETDEV_EXTENDED_WANTED_FEATURES)
++#ifdef HAVE_NETDEV_WANTED_FEATURES
+               if (dev->wanted_features & NETIF_F_HW_VLAN_CTAG_RX)
++#else
++              if (netdev_extended(dev)->wanted_features & NETIF_F_HW_VLAN_CTAG_RX)
++#endif
+                       dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+               else
+                       dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
++#endif
+       }
++#ifdef HAVE_NETIF_F_RXFCS
+       if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) {
+               if (features & NETIF_F_RXFCS)
+                       dev->features |= NETIF_F_RXFCS;
+               else
+                       dev->features &= ~NETIF_F_RXFCS;
+       }
++#endif
+       /* RX vlan offload and RX time-stamping can't co-exist !
+        * Regardless of the caller's choice,
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+@@ -32,7 +32,9 @@
+  */
+ #include <net/busy_poll.h>
++#ifdef HAVE_XDP_BUFF
+ #include <linux/bpf.h>
++#endif
+ #include <linux/bpf_trace.h>
+ #include <linux/mlx4/cq.h>
+ #include <linux/slab.h>
+@@ -286,8 +288,10 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
+       ring->log_stride = ffs(ring->stride) - 1;
+       ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
++#ifdef HAVE_XDP_RXQ_INFO
+       if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index) < 0)
+               goto err_ring;
++#endif
+       tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
+                                       sizeof(struct mlx4_en_rx_alloc));
+@@ -318,8 +322,10 @@ err_info:
+       kvfree(ring->rx_info);
+       ring->rx_info = NULL;
+ err_xdp_info:
++#ifdef HAVE_XDP_RXQ_INFO
+       xdp_rxq_info_unreg(&ring->xdp_rxq);
+ err_ring:
++#endif
+       kfree(ring);
+       *pring = NULL;
+@@ -435,6 +441,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
+ {
+       struct mlx4_en_dev *mdev = priv->mdev;
+       struct mlx4_en_rx_ring *ring = *pring;
++#ifdef HAVE_XDP_BUFF
+       struct bpf_prog *old_prog;
+       old_prog = rcu_dereference_protected(
+@@ -442,7 +449,10 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
+                                       lockdep_is_held(&mdev->state_lock));
+       if (old_prog)
+               bpf_prog_put(old_prog);
++#ifdef HAVE_XDP_RXQ_INFO
+       xdp_rxq_info_unreg(&ring->xdp_rxq);
++#endif
++#endif
+       mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
+       kvfree(ring->rx_info);
+       ring->rx_info = NULL;
+@@ -657,11 +667,17 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       int factor = priv->cqe_factor;
+       struct mlx4_en_rx_ring *ring;
++#ifdef HAVE_XDP_BUFF
+       struct bpf_prog *xdp_prog;
++#endif
+       int cq_ring = cq->ring;
++#ifdef HAVE_XDP_BUFF
+       bool doorbell_pending;
++#endif
+       struct mlx4_cqe *cqe;
++#ifdef HAVE_XDP_BUFF
+       struct xdp_buff xdp;
++#endif
+       int polled = 0;
+       int index;
+@@ -670,11 +686,15 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
+       ring = priv->rx_ring[cq_ring];
++#ifdef HAVE_XDP_BUFF
+       /* Protect accesses to: ring->xdp_prog, priv->mac_hash list */
+       rcu_read_lock();
+       xdp_prog = rcu_dereference(ring->xdp_prog);
++#ifdef HAVE_XDP_RXQ_INFO
+       xdp.rxq = &ring->xdp_rxq;
++#endif
+       doorbell_pending = 0;
++#endif
+       /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
+        * descriptor offset can be deduced from the CQE index instead of
+@@ -757,9 +777,12 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
+               /* A bpf program gets first chance to drop the packet. It may
+                * read bytes but not past the end of the frag.
+                */
++#ifdef HAVE_XDP_BUFF
+               if (xdp_prog) {
+                       dma_addr_t dma;
++#ifdef HAVE_XDP_BUFF_DATA_HARD_START
+                       void *orig_data;
++#endif
+                       u32 act;
+                       dma = frags[0].dma + frags[0].page_offset;
+@@ -767,20 +790,29 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
+                                               priv->frag_info[0].frag_size,
+                                               DMA_FROM_DEVICE);
++#ifdef HAVE_XDP_BUFF_DATA_HARD_START
+                       xdp.data_hard_start = va - frags[0].page_offset;
+                       xdp.data = va;
++#ifdef HAVE_XDP_SET_DATA_META_INVALID
+                       xdp_set_data_meta_invalid(&xdp);
++#endif
+                       xdp.data_end = xdp.data + length;
+                       orig_data = xdp.data;
++#else
++                      xdp.data = va;
++                      xdp.data_end = xdp.data + length;
++#endif
+                       act = bpf_prog_run_xdp(xdp_prog, &xdp);
++#ifdef HAVE_XDP_BUFF_DATA_HARD_START
+                       if (xdp.data != orig_data) {
+                               length = xdp.data_end - xdp.data;
+                               frags[0].page_offset = xdp.data -
+                                       xdp.data_hard_start;
+                               va = xdp.data;
+                       }
++#endif
+                       switch (act) {
+                       case XDP_PASS:
+@@ -792,18 +824,23 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
+                                       frags[0].page = NULL;
+                                       goto next;
+                               }
++#ifdef HAVE_TRACE_XDP_EXCEPTION
+                               trace_xdp_exception(dev, xdp_prog, act);
++#endif
+                               goto xdp_drop_no_cnt; /* Drop on xmit failure */
+                       default:
+                               bpf_warn_invalid_xdp_action(act);
+                       case XDP_ABORTED:
++#ifdef HAVE_TRACE_XDP_EXCEPTION
+                               trace_xdp_exception(dev, xdp_prog, act);
++#endif
+                       case XDP_DROP:
+                               ring->xdp_drop++;
+ xdp_drop_no_cnt:
+                               goto next;
+                       }
+               }
++#endif
+               ring->bytes += length;
+               ring->packets++;
+@@ -885,13 +922,17 @@ next:
+                       break;
+       }
++#ifdef HAVE_XDP_BUFF
+       rcu_read_unlock();
++#endif
+       if (likely(polled)) {
++#ifdef HAVE_XDP_BUFF
+               if (doorbell_pending) {
+                       priv->tx_cq[TX_XDP][cq_ring]->xdp_busy = true;
+                       mlx4_en_xmit_doorbell(priv->tx_ring[TX_XDP][cq_ring]);
+               }
++#endif
+               mlx4_cq_set_ci(&cq->mcq);
+               wmb(); /* ensure HW sees CQ consumer before we post new buffers */
+@@ -940,7 +981,9 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
+       /* If we used up all the quota - we're probably not done yet... */
+       if (done == budget || !clean_complete) {
+               const struct cpumask *aff;
++#ifndef HAVE_IRQ_DATA_AFFINITY
+               struct irq_data *idata;
++#endif
+               int cpu_curr;
+               /* in case we got here because of !clean_complete */
+@@ -949,8 +992,12 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
+               INC_PERF_COUNTER(priv->pstats.napi_quota);
+               cpu_curr = smp_processor_id();
++#ifndef HAVE_IRQ_DATA_AFFINITY
+               idata = irq_desc_get_irq_data(cq->irq_desc);
+               aff = irq_data_get_affinity_mask(idata);
++#else
++              aff = irq_desc_get_irq_data(cq->irq_desc)->affinity;
++#endif
+               if (likely(cpumask_test_cpu(cpu_curr, aff)))
+                       return budget;
+@@ -976,6 +1023,7 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
+       int eff_mtu = MLX4_EN_EFF_MTU(dev->mtu);
+       int i = 0;
++#ifdef HAVE_XDP_BUFF
+       /* bpf requires buffers to be set up as 1 packet per page.
+        * This only works when num_frags == 1.
+        */
+@@ -988,7 +1036,9 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
+               priv->dma_dir = PCI_DMA_BIDIRECTIONAL;
+               priv->rx_headroom = XDP_PACKET_HEADROOM;
+               i = 1;
+-      } else {
++      } else
++#endif
++      {
+               int frag_size_max = 2048, buf_size = 0;
+               /* should not happen, right ? */
+diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx4/main.c
++++ b/drivers/net/ethernet/mellanox/mlx4/main.c
+@@ -3394,12 +3394,14 @@ slave_start:
+               }
+       }
++#ifdef HAVE_PCIE_PRINT_LINK_STATUS
+       /* check if the device is functioning at its maximum possible speed.
+        * No return code for this call, just warn the user in case of PCI
+        * express device capabilities are under-satisfied by the bus.
+        */
+       if (!mlx4_is_slave(dev))
+               pcie_print_link_status(dev->persist->pdev);
++#endif
+       /* In master functions, the communication channel must be initialized
+        * after obtaining its address from fw */
+diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+@@ -46,7 +46,9 @@
+ #endif
+ #include <linux/cpu_rmap.h>
+ #include <linux/ptp_clock_kernel.h>
++#ifdef HAVE_NET_XDP_H
+ #include <net/xdp.h>
++#endif
+ #include <linux/mlx4/device.h>
+ #include <linux/mlx4/qp.h>
+@@ -346,7 +348,9 @@ struct mlx4_en_rx_ring {
+       u8  fcs_del;
+       void *buf;
+       void *rx_info;
++#ifdef HAVE_XDP_BUFF
+       struct bpf_prog __rcu *xdp_prog;
++#endif
+       struct mlx4_en_page_cache page_cache;
+       unsigned long bytes;
+       unsigned long packets;
+@@ -360,7 +364,9 @@ struct mlx4_en_rx_ring {
+       unsigned long dropped;
+       int hwtstamp_rx_filter;
+       cpumask_var_t affinity_mask;
++#ifdef HAVE_XDP_RXQ_INFO
+       struct xdp_rxq_info xdp_rxq;
++#endif
+ };
+ struct mlx4_en_cq {
+@@ -683,8 +689,12 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
+ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
+                               struct mlx4_en_priv *tmp,
++#ifdef HAVE_XDP_BUFF
+                               struct mlx4_en_port_profile *prof,
+                               bool carry_xdp_prog);
++#else
++                              struct mlx4_en_port_profile *prof);
++#endif
+ void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
+                                   struct mlx4_en_priv *tmp);
+@@ -701,10 +711,12 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq);
+ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
+                        void *accel_priv, select_queue_fallback_t fallback);
+ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
++#ifdef HAVE_XDP_BUFF
+ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
+                              struct mlx4_en_rx_alloc *frame,
+                              struct mlx4_en_priv *priv, unsigned int length,
+                              int tx_ind, bool *doorbell_pending);
++#endif
+ void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring);
+ bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
+                       struct mlx4_en_rx_alloc *frame);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+@@ -546,7 +546,7 @@ struct mlx5e_rq {
+       struct mlx5_core_dev  *mdev;
+       struct mlx5_core_mkey  umr_mkey;
+-#ifdef HAVE_NET_XDP_H
++#ifdef HAVE_XDP_BUFF_RXQ
+       /* XDP read-mostly */
+       struct xdp_rxq_info    xdp_rxq;
+ #endif
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+@@ -1170,7 +1170,6 @@ static int mlx5e_set_tunable(struct net_device *dev,
+       mutex_unlock(&priv->state_lock);
+       return err;
+ }
+-#endif
+ static void mlx5e_get_pauseparam(struct net_device *netdev,
+                                struct ethtool_pauseparam *pauseparam)
+@@ -1847,8 +1846,6 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
+       .get_link_ksettings  = mlx5e_get_link_ksettings,
+       .set_link_ksettings  = mlx5e_set_link_ksettings,
+ #endif
+-      .get_settings  = mlx5e_get_settings,
+-      .set_settings  = mlx5e_set_settings,
+ #if defined(HAVE_GET_SET_RXFH) && !defined(HAVE_GET_SET_RXFH_INDIR_EXT)
+       .get_rxfh_key_size   = mlx5e_get_rxfh_key_size,
+ #endif
+@@ -1915,11 +1912,6 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
+       .set_msglevel      = mlx5e_set_msglevel,
+ #endif
+       .set_priv_flags    = mlx5e_set_priv_flags,
+-#ifdef HAVE_GET_SET_DUMP
+-      .get_dump_flag     = mlx5e_get_dump_flag,
+-      .get_dump_data     = mlx5e_get_dump_data,
+-      .set_dump          = mlx5e_set_dump,
+-#endif
+ };
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -46,6 +46,7 @@
+ #include "en.h"
+ #ifdef HAVE_TC_OFFLOAD
+ #include "en_tc.h"
++#endif
+ #include "en_rep.h"
+ #include "en_accel/ipsec.h"
+ #include "en_accel/ipsec_rxtx.h"
+@@ -451,7 +452,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
+               goto err_rq_wq_destroy;
+       }
+-#ifdef HAVE_NET_XDP_H
++#ifdef HAVE_XDP_BUFF_RXQ
+       err = xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix);
+       if (err < 0)
+               goto err_rq_wq_destroy;
+@@ -578,7 +579,7 @@ err_rq_wq_destroy:
+ #ifdef HAVE_NETDEV_BPF
+       if (rq->xdp_prog)
+               bpf_prog_put(rq->xdp_prog);
+-#ifdef HAVE_NET_XDP_H
++#ifdef HAVE_XDP_BUFF_RXQ
+       xdp_rxq_info_unreg(&rq->xdp_rxq);
+ #endif
+ #endif
+@@ -595,7 +596,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
+       if (rq->xdp_prog)
+               bpf_prog_put(rq->xdp_prog);
+-#ifdef HAVE_NET_XDP_H
++#ifdef HAVE_XDP_BUFF_RXQ
+       xdp_rxq_info_unreg(&rq->xdp_rxq);
+ #endif
+ #endif
+@@ -2831,7 +2832,7 @@ static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
+       if (err)
+               return err;
+-#ifdef HAVE_NET_XDP_H
++#ifdef HAVE_XDP_BUFF_RXQ
+       /* Mark as unused given "Drop-RQ" packets never reach XDP */
+       xdp_rxq_info_unused(&rq->xdp_rxq);
+ #endif
+@@ -3848,6 +3849,21 @@ static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
+ {
+       struct mlx5e_priv *priv = netdev_priv(dev);
+       struct mlx5_core_dev *mdev = priv->mdev;
++#ifdef HAVE_VF_TX_RATE
++      struct mlx5_eswitch *esw = mdev->priv.eswitch;
++      int min_tx_rate;
++      int vport = vf + 1;
++
++      if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
++          MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
++              return -EPERM;
++      if (vport < 0 || vport >= esw->total_vports)
++              return -EINVAL;
++
++      mutex_lock(&esw->state_lock);
++      min_tx_rate = esw->vports[vport].info.min_rate;
++      mutex_unlock(&esw->state_lock);
++#endif
+       return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1,
+                                          max_tx_rate, min_tx_rate);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+@@ -706,6 +706,7 @@ static int mlx5e_rep_close(struct net_device *dev)
+       return ret;
+ }
++#if defined(HAVE_NDO_GET_PHYS_PORT_NAME) || defined(HAVE_SWITCHDEV_H_COMPAT) || defined(HAVE_NDO_GET_PHYS_PORT_NAME_EXTENDED)
+ static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
+                                       char *buf, size_t len)
+ {
+@@ -720,11 +721,33 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
+       return 0;
+ }
++#endif
++#if defined(HAVE_TC_FLOWER_OFFLOAD) && !defined(CONFIG_COMPAT_CLS_FLOWER_MOD)
+ static int
++#if defined(HAVE_NDO_SETUP_TC_TAKES_TC_SETUP_TYPE) || defined(HAVE_NDO_SETUP_TC_RH_EXTENDED)
++#ifdef HAVE_TC_BLOCK_OFFLOAD
+ mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
++#else
++mlx5e_rep_setup_tc_cls_flower(struct net_device *dev,
++#endif
+                             struct tc_cls_flower_offload *cls_flower)
++#else
++mlx5e_rep_setup_tc_cls_flower(struct net_device *dev,
++                            u32 handle,
++#ifdef HAVE_NDO_SETUP_TC_TAKES_CHAIN_INDEX
++                            u32 chain_index,
++#endif
++                            __be16 proto,
++                            struct tc_to_netdev *tc)
++#endif
+ {
++#if defined(HAVE_NDO_SETUP_TC_TAKES_TC_SETUP_TYPE) || defined(HAVE_NDO_SETUP_TC_RH_EXTENDED)
++#ifndef HAVE_TC_BLOCK_OFFLOAD
++      struct mlx5e_priv *priv = netdev_priv(dev);
++#endif
++#endif
++
+       switch (cls_flower->command) {
+       case TC_CLSFLOWER_REPLACE:
+               return mlx5e_configure_flower(priv, cls_flower);
+@@ -736,7 +759,9 @@ mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
+               return -EOPNOTSUPP;
+       }
+ }
++#endif /* HAVE_TC_FLOWER_OFFLOAD */
++#ifdef HAVE_TC_BLOCK_OFFLOAD
+ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
+                                void *cb_priv)
+ {
+@@ -772,17 +797,47 @@ static int mlx5e_rep_setup_tc_block(struct net_device *dev,
+               return -EOPNOTSUPP;
+       }
+ }
++#endif
++#if defined(HAVE_TC_FLOWER_OFFLOAD) && !defined(CONFIG_COMPAT_CLS_FLOWER_MOD)
++#if defined(HAVE_NDO_SETUP_TC_TAKES_TC_SETUP_TYPE) || defined(HAVE_NDO_SETUP_TC_RH_EXTENDED)
+ static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
+                             void *type_data)
++#else
++static int mlx5e_rep_setup_tc(struct net_device *dev, u32 handle,
++#ifdef HAVE_NDO_SETUP_TC_TAKES_CHAIN_INDEX
++                            u32 chain_index, __be16 proto,
++#else
++                            __be16 proto,
++#endif
++                            struct tc_to_netdev *tc)
++#endif
+ {
++#if !defined(HAVE_NDO_SETUP_TC_TAKES_TC_SETUP_TYPE) && !defined(HAVE_NDO_SETUP_TC_RH_EXTENDED)
++      unsigned int type = tc->type;
++#endif
++
+       switch (type) {
++#ifdef HAVE_TC_BLOCK_OFFLOAD
+       case TC_SETUP_BLOCK:
+               return mlx5e_rep_setup_tc_block(dev, type_data);
++#else
++      case TC_SETUP_CLSFLOWER:
++#if defined(HAVE_NDO_SETUP_TC_TAKES_TC_SETUP_TYPE) || defined(HAVE_NDO_SETUP_TC_RH_EXTENDED)
++              return mlx5e_rep_setup_tc_cls_flower(dev, type_data);
++#else
++              return mlx5e_rep_setup_tc_cls_flower(dev, handle,
++#ifdef HAVE_NDO_SETUP_TC_TAKES_CHAIN_INDEX
++                                                   chain_index,
++#endif
++                                                   proto, tc);
++#endif
++#endif
+       default:
+               return -EOPNOTSUPP;
+       }
+ }
++#endif
+ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
+ {
+@@ -801,6 +856,7 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
+       return false;
+ }
++#if defined(NDO_HAS_OFFLOAD_STATS_GETS_NET_DEVICE) || defined(HAVE_NDO_HAS_OFFLOAD_STATS_EXTENDED)
+ static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
+ {
+       struct mlx5e_rep_priv *rpriv = priv->ppriv;
+@@ -824,6 +880,7 @@ bool mlx5e_has_offload_stats(const struct net_device *dev, int attr_id)
+       return false;
+ }
++#endif
+ static int
+ mlx5e_get_sw_stats64(const struct net_device *dev,
+@@ -869,11 +926,29 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
+       .ndo_open                = mlx5e_rep_open,
+       .ndo_stop                = mlx5e_rep_close,
+       .ndo_start_xmit          = mlx5e_xmit,
++#ifdef HAVE_NDO_GET_PHYS_PORT_NAME
+       .ndo_get_phys_port_name  = mlx5e_rep_get_phys_port_name,
++#elif defined(HAVE_NDO_GET_PHYS_PORT_NAME_EXTENDED)
++      .extended.ndo_get_phys_port_name  = mlx5e_rep_get_phys_port_name,
++#endif
++#if defined(HAVE_TC_FLOWER_OFFLOAD) && !defined(CONFIG_COMPAT_CLS_FLOWER_MOD)
++#ifdef HAVE_NDO_SETUP_TC_RH_EXTENDED
++      .extended.ndo_setup_tc_rh          = mlx5e_rep_setup_tc,
++#else
+       .ndo_setup_tc            = mlx5e_rep_setup_tc,
++#endif
++#endif
+       .ndo_get_stats64         = mlx5e_rep_get_stats,
++#ifdef NDO_HAS_OFFLOAD_STATS_GETS_NET_DEVICE
+       .ndo_has_offload_stats   = mlx5e_has_offload_stats,
++#elif defined(HAVE_NDO_HAS_OFFLOAD_STATS_EXTENDED)
++      .extended.ndo_has_offload_stats   = mlx5e_has_offload_stats,
++#endif
++#ifdef HAVE_NDO_GET_OFFLOAD_STATS
+       .ndo_get_offload_stats   = mlx5e_get_offload_stats,
++#elif defined(HAVE_NDO_GET_OFFLOAD_STATS_EXTENDED)
++      .extended.ndo_get_offload_stats   = mlx5e_get_offload_stats,
++#endif
+ };
+ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
+@@ -906,7 +981,9 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
+       netdev->ethtool_ops       = &mlx5e_rep_ethtool_ops;
++#ifdef CONFIG_NET_SWITCHDEV
+       netdev->switchdev_ops = &mlx5e_rep_switchdev_ops;
++#endif
+       netdev->features         |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL;
+       netdev->hw_features      |= NETIF_F_HW_TC;
+@@ -1071,7 +1148,9 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
+       struct mlx5e_rep_priv *uplink_rpriv;
+       struct mlx5e_rep_priv *rpriv;
+       struct net_device *netdev;
++#ifdef HAVE_TC_BLOCK_OFFLOAD
+       struct mlx5e_priv *upriv;
++#endif
+       int err;
+       rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
+@@ -1106,11 +1185,18 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
+       }
+       uplink_rpriv = mlx5_eswitch_get_uplink_priv(dev->priv.eswitch, REP_ETH);
++#ifdef HAVE_TC_SETUP_CB_EGDEV_REGISTER
+       upriv = netdev_priv(uplink_rpriv->netdev);
++#ifdef HAVE_TC_BLOCK_OFFLOAD
+       err = tc_setup_cb_egdev_register(netdev, mlx5e_setup_tc_block_cb,
+                                        upriv);
++#else
++      err = tc_setup_cb_egdev_register(netdev, mlx5e_rep_setup_tc_cb,
++                                       upriv);
++#endif
+       if (err)
+               goto err_neigh_cleanup;
++#endif
+       err = register_netdev(netdev);
+       if (err) {
+@@ -1122,10 +1208,17 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
+       return 0;
+ err_egdev_cleanup:
++#ifdef HAVE_TC_SETUP_CB_EGDEV_REGISTER
++#ifdef HAVE_TC_BLOCK_OFFLOAD
+       tc_setup_cb_egdev_unregister(netdev, mlx5e_setup_tc_block_cb,
+                                    upriv);
++#else
++      tc_setup_cb_egdev_unregister(netdev, mlx5e_rep_setup_tc_cb,
++                                   upriv);
++#endif
+ err_neigh_cleanup:
++#endif
+       mlx5e_rep_neigh_cleanup(rpriv);
+ err_detach_netdev:
+@@ -1145,14 +1238,23 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       struct mlx5e_rep_priv *uplink_rpriv;
+       void *ppriv = priv->ppriv;
++#ifdef HAVE_TC_BLOCK_OFFLOAD
+       struct mlx5e_priv *upriv;
++#endif
+       unregister_netdev(netdev);
+       uplink_rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch,
+                                                   REP_ETH);
++#ifdef HAVE_TC_SETUP_CB_EGDEV_REGISTER
+       upriv = netdev_priv(uplink_rpriv->netdev);
++#ifdef HAVE_TC_BLOCK_OFFLOAD
+       tc_setup_cb_egdev_unregister(netdev, mlx5e_setup_tc_block_cb,
+                                    upriv);
++#else
++      tc_setup_cb_egdev_unregister(netdev, mlx5e_rep_setup_tc_cb,
++                                   upriv);
++#endif
++#endif
+       mlx5e_rep_neigh_cleanup(rpriv);
+       mlx5e_detach_netdev(priv);
+       mlx5e_destroy_netdev(priv);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -758,6 +758,7 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
+       mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
+ }
++#ifdef HAVE_NETDEV_BPF
+ static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
+ {
+       struct mlx5_wq_cyc *wq = &sq->wq;
+@@ -792,6 +793,11 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
+               rq->stats.xdp_drop++;
+               return false;
+       }
++#else
++      unsigned int dma_len = len - MLX5E_XDP_MIN_INLINE;
++      void *data           = page_address(di->page) + data_offset;
++
++#endif
+       if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1))) {
+               if (sq->db.doorbell) {
+@@ -850,10 +856,14 @@ static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq,
+               return false;
+       xdp.data = va + *rx_headroom;
++#ifdef HAVE_XDP_SET_DATA_META_INVALID
+       xdp_set_data_meta_invalid(&xdp);
++#endif
+       xdp.data_end = xdp.data + *len;
+       xdp.data_hard_start = va;
++#ifdef HAVE_XDP_RXQ_INFO
+       xdp.rxq = &rq->xdp_rxq;
++#endif
+       act = bpf_prog_run_xdp(prog, &xdp);
+       switch (act) {
+@@ -875,6 +885,7 @@ static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq,
+       }
+ }
++#ifndef HAVE_BUILD_SKB
+ static inline
+ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
+                                      u32 frag_size, u16 headroom,
+@@ -892,6 +903,7 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
+       return skb;
+ }
++#endif
+ static inline
+ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
+@@ -901,7 +913,9 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
+       u16 rx_headroom = rq->buff.headroom;
+       struct sk_buff *skb;
+       void *va, *data;
++#ifdef HAVE_NETDEV_BPF
+       bool consumed;
++#endif
+       u32 frag_size;
+       va             = page_address(di->page) + wi->offset;
+@@ -918,19 +932,29 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
+               return NULL;
+       }
++#ifdef HAVE_NETDEV_BPF
+       rcu_read_lock();
+       consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt);
+       rcu_read_unlock();
+       if (consumed)
+               return NULL; /* page/packet was consumed by XDP */
++#endif
++#ifdef HAVE_BUILD_SKB
++      skb = build_skb(va, frag_size);
++#else
+       skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt);
++#endif
+       if (unlikely(!skb))
+               return NULL;
+       /* queue up for recycling/reuse */
+       page_ref_inc(di->page);
++#ifdef HAVE_BUILD_SKB
++      skb_reserve(skb, rx_headroom);
++      skb_put(skb, cqe_bcnt);
++#endif
+       return skb;
+ }
+@@ -1091,13 +1115,22 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
+               return NULL; /* page/packet was consumed by XDP */
+       }
++#ifdef HAVE_BUILD_SKB
++      skb = build_skb(va, frag_size);
++#else
+       skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32);
++#endif
+       if (unlikely(!skb))
+               return NULL;
+       /* queue up for recycling/reuse */
+       page_ref_inc(di->page);
++#ifdef HAVE_BUILD_SKB
++      skb_reserve(skb, rx_headroom);
++      skb_put(skb, cqe_bcnt);
++#endif
++
+       return skb;
+ }
+@@ -1148,7 +1181,9 @@ mpwrq_cqe_out:
+ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
+ {
+       struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
++#ifdef HAVE_NETDEV_BPF
+       struct mlx5e_xdpsq *xdpsq;
++#endif
+       struct mlx5_cqe64 *cqe;
+       int work_done = 0;
+@@ -1162,7 +1197,9 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
+       if (!cqe)
+               return 0;
++#ifdef HAVE_NETDEV_BPF
+       xdpsq = &rq->xdpsq;
++#endif
+       do {
+               if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
+@@ -1177,10 +1214,12 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
+               rq->handle_rx_cqe(rq, cqe);
+       } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
++#ifdef HAVE_NETDEV_BPF
+       if (xdpsq->db.doorbell) {
+               mlx5e_xmit_xdp_doorbell(xdpsq);
+               xdpsq->db.doorbell = false;
+       }
++#endif
+       mlx5_cqwq_update_db_record(&cq->wq);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -1955,7 +1955,13 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
+               }
+               if (is_tcf_mirred_egress_redirect(a)) {
++#ifndef HAVE_TCF_MIRRED_DEV
++                      int ifindex = tcf_mirred_ifindex(a);
++
++                      struct net_device *peer_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
++#else
+                       struct net_device *peer_dev = tcf_mirred_dev(a);
++#endif
+                       if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
+                           same_hw_devs(priv, netdev_priv(peer_dev))) {
+@@ -2496,8 +2502,14 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
+               if (is_tcf_mirred_egress_redirect(a)) {
+                       struct net_device *out_dev;
+                       struct mlx5e_priv *out_priv;
++#ifndef HAVE_TCF_MIRRED_DEV
++                      int ifindex = tcf_mirred_ifindex(a);
++
++                      out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
++#else
+                       out_dev = tcf_mirred_dev(a);
++#endif
+                       if (switchdev_port_same_parent_id(priv->netdev,
+                                                         out_dev)) {
+@@ -2507,7 +2519,11 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
+                               rpriv = out_priv->ppriv;
+                               attr->out_rep = rpriv->rep;
+                       } else if (encap) {
++#ifndef HAVE_TCF_MIRRED_DEV
++                              parse_attr->mirred_ifindex = ifindex;
++#else
+                               parse_attr->mirred_ifindex = out_dev->ifindex;
++#endif
+                               parse_attr->tun_info = *info;
+                               attr->parse_attr = parse_attr;
+                               attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+@@ -37,10 +37,14 @@ static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c)
+ {
+       int current_cpu = smp_processor_id();
+       const struct cpumask *aff;
++#ifndef HAVE_IRQ_DATA_AFFINITY
+       struct irq_data *idata;
+       idata = irq_desc_get_irq_data(c->irq_desc);
+       aff = irq_data_get_affinity_mask(idata);
++#else
++      aff = irq_desc_get_irq_data(c->irq_desc)->affinity;
++#endif
+       return cpumask_test_cpu(current_cpu, aff);
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+@@ -2211,8 +2211,10 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
+       err = mlx5_eswitch_query_vport_drop_stats(esw->dev, vport, &stats);
+       if (err)
+               goto free_out;
++#ifdef HAVE_IFLA_VF_STATS_RX_DROPPED
+       vf_stats->rx_dropped = stats.rx_dropped;
+       vf_stats->tx_dropped = stats.tx_dropped;
++#endif
+ free_out:
+       kvfree(out);