--- /dev/null
+From: Vladimir Sokolovsky <vlad@mellanox.com>
+Subject: [PATCH] BACKPORT: mlx4
+
+Signed-off-by: Vladimir Sokolovsky <vlad@mellanox.com>
+---
+ drivers/net/ethernet/mellanox/mlx4/catas.c | 17 +
+ .../net/ethernet/mellanox/mlx4/en_dcb_nl.c | 29 ++
+ .../net/ethernet/mellanox/mlx4/en_ethtool.c | 8 +
+ .../net/ethernet/mellanox/mlx4/en_netdev.c | 424 +++++++++++++++++-
+ drivers/net/ethernet/mellanox/mlx4/en_rx.c | 70 ++-
+ .../net/ethernet/mellanox/mlx4/en_selftest.c | 4 +-
+ drivers/net/ethernet/mellanox/mlx4/main.c | 2 +
+ drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 12 +
+ 8 files changed, 558 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
+index xxxxxxx..xxxxxxx 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
++++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
+@@ -229,10 +229,19 @@ static void dump_err_buf(struct mlx4_dev *dev)
+ i, swab32(readl(priv->catas_err.map + i)));
+ }
+
++#ifdef HAVE_TIMER_SETUP
+ static void poll_catas(struct timer_list *t)
++#else
++static void poll_catas(unsigned long dev_ptr)
++#endif
+ {
++#ifdef HAVE_TIMER_SETUP
+ struct mlx4_priv *priv = from_timer(priv, t, catas_err.timer);
+ struct mlx4_dev *dev = &priv->dev;
++#else
++ struct mlx4_dev *dev = (struct mlx4_dev *) dev_ptr;
++ struct mlx4_priv *priv = mlx4_priv(dev);
++#endif
+ u32 slave_read;
+
+ if (mlx4_is_slave(dev)) {
+@@ -275,7 +284,11 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev)
+ phys_addr_t addr;
+
+ INIT_LIST_HEAD(&priv->catas_err.list);
++#ifdef HAVE_TIMER_SETUP
+ timer_setup(&priv->catas_err.timer, poll_catas, 0);
++#else
++ init_timer(&priv->catas_err.timer);
++#endif
+ priv->catas_err.map = NULL;
+
+ if (!mlx4_is_slave(dev)) {
+@@ -291,6 +304,10 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev)
+ }
+ }
+
++#ifndef HAVE_TIMER_SETUP
++ priv->catas_err.timer.data = (unsigned long) dev;
++ priv->catas_err.timer.function = poll_catas;
++#endif
+ priv->catas_err.timer.expires =
+ round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL);
+ add_timer(&priv->catas_err.timer);
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+index xxxxxxx..xxxxxxx 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+@@ -138,7 +138,11 @@ static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
+ priv->cee_config.pfc_state = true;
+ }
+
++#ifdef NDO_GETNUMTCS_RETURNS_INT
+ static int mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
++#else
++static u8 mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
++#endif
+ {
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+
+@@ -254,7 +258,11 @@ static u8 mlx4_en_dcbnl_set_state(struct net_device *dev, u8 state)
+ * otherwise returns 0 as the invalid user priority bitmap to
+ * indicate an error.
+ */
++#ifdef NDO_GETAPP_RETURNS_INT
+ static int mlx4_en_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
++#else
++static u8 mlx4_en_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
++#endif
+ {
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+ struct dcb_app app = {
+@@ -267,8 +275,13 @@ static int mlx4_en_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
+ return dcb_getapp(netdev, &app);
+ }
+
++#ifdef NDO_SETAPP_RETURNS_INT
+ static int mlx4_en_dcbnl_setapp(struct net_device *netdev, u8 idtype,
+ u16 id, u8 up)
++#else
++static u8 mlx4_en_dcbnl_setapp(struct net_device *netdev, u8 idtype,
++ u16 id, u8 up)
++#endif
+ {
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+ struct dcb_app app;
+@@ -538,7 +551,12 @@ static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device *dev,
+ #define RPG_ENABLE_BIT 31
+ #define CN_TAG_BIT 30
+
++#ifdef HAVE_IEEE_GETQCN
++#ifndef CONFIG_SYSFS_QCN
+ static int mlx4_en_dcbnl_ieee_getqcn(struct net_device *dev,
++#else
++int mlx4_en_dcbnl_ieee_getqcn(struct net_device *dev,
++#endif
+ struct ieee_qcn *qcn)
+ {
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+@@ -601,7 +619,11 @@ static int mlx4_en_dcbnl_ieee_getqcn(struct net_device *dev,
+ return 0;
+ }
+
++#ifndef CONFIG_SYSFS_QCN
+ static int mlx4_en_dcbnl_ieee_setqcn(struct net_device *dev,
++#else
++int mlx4_en_dcbnl_ieee_setqcn(struct net_device *dev,
++#endif
+ struct ieee_qcn *qcn)
+ {
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+@@ -664,7 +686,11 @@ static int mlx4_en_dcbnl_ieee_setqcn(struct net_device *dev,
+ return 0;
+ }
+
++#ifndef CONFIG_SYSFS_QCN
+ static int mlx4_en_dcbnl_ieee_getqcnstats(struct net_device *dev,
++#else
++int mlx4_en_dcbnl_ieee_getqcnstats(struct net_device *dev,
++#endif
+ struct ieee_qcn_stats *qcn_stats)
+ {
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+@@ -706,15 +732,18 @@ static int mlx4_en_dcbnl_ieee_getqcnstats(struct net_device *dev,
+ mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
+ return 0;
+ }
++#endif /* HAVE_IEEE_GETQCN */
+
+ const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = {
+ .ieee_getets = mlx4_en_dcbnl_ieee_getets,
+ .ieee_setets = mlx4_en_dcbnl_ieee_setets,
+ .ieee_getmaxrate = mlx4_en_dcbnl_ieee_getmaxrate,
+ .ieee_setmaxrate = mlx4_en_dcbnl_ieee_setmaxrate,
++#ifdef HAVE_IEEE_GETQCN
+ .ieee_getqcn = mlx4_en_dcbnl_ieee_getqcn,
+ .ieee_setqcn = mlx4_en_dcbnl_ieee_setqcn,
+ .ieee_getqcnstats = mlx4_en_dcbnl_ieee_getqcnstats,
++#endif
+ .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc,
+ .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc,
+
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+index xxxxxxx..xxxxxxx 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+@@ -1158,7 +1158,11 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
+ memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
+ new_prof.tx_ring_size = tx_size;
+ new_prof.rx_ring_size = rx_size;
++#ifdef HAVE_XDP_BUFF
+ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
++#else
++ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
++#endif
+ if (err)
+ goto out;
+
+@@ -1841,7 +1845,11 @@ static int mlx4_en_set_channels(struct net_device *dev,
+ new_prof.tx_ring_num[TX_XDP] = xdp_count;
+ new_prof.rx_ring_num = channel->rx_count;
+
++#ifdef HAVE_XDP_BUFF
+ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
++#else
++ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
++#endif
+ if (err)
+ goto out;
+
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+index xxxxxxx..xxxxxxx 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+@@ -31,7 +31,9 @@
+ *
+ */
+
++#ifdef HAVE_XDP_BUFF
+ #include <linux/bpf.h>
++#endif
+ #include <linux/etherdevice.h>
+ #include <linux/tcp.h>
+ #include <linux/if_vlan.h>
+@@ -39,8 +41,12 @@
+ #include <linux/slab.h>
+ #include <linux/hash.h>
+ #include <net/ip.h>
++#ifdef HAVE_KERNEL_WITH_VXLAN_SUPPORT_ON
+ #include <net/vxlan.h>
++#endif
++#ifdef HAVE_DEVLINK_H
+ #include <net/devlink.h>
++#endif
+
+ #include <linux/mlx4/driver.h>
+ #include <linux/mlx4/device.h>
+@@ -104,7 +110,11 @@ int mlx4_en_alloc_tx_queue_per_tc(struct net_device *dev, u8 tc)
+ MLX4_EN_NUM_UP_HIGH;
+ new_prof.tx_ring_num[TX] = new_prof.num_tx_rings_p_up *
+ new_prof.num_up;
++#ifdef HAVE_XDP_BUFF
+ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
++#else
++ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
++#endif
+ if (err)
+ goto out;
+
+@@ -129,6 +139,8 @@ out:
+ return err;
+ }
+
++#if defined(HAVE_NDO_SETUP_TC) || defined(HAVE_NDO_SETUP_TC_RH_EXTENDED)
++#if defined(HAVE_NDO_SETUP_TC_TAKES_TC_SETUP_TYPE) || defined(HAVE_NDO_SETUP_TC_RH_EXTENDED)
+ static int __mlx4_en_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+ {
+@@ -144,6 +156,38 @@ static int __mlx4_en_setup_tc(struct net_device *dev, enum tc_setup_type type,
+
+ return mlx4_en_alloc_tx_queue_per_tc(dev, mqprio->num_tc);
+ }
++#else /* before 4.14-15 TC changes */
++#if defined(HAVE_NDO_SETUP_TC_4_PARAMS) || defined(HAVE_NDO_SETUP_TC_TAKES_CHAIN_INDEX)
++static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle,
++#ifdef HAVE_NDO_SETUP_TC_TAKES_CHAIN_INDEX
++ u32 chain_index, __be16 proto,
++#else
++ __be16 proto,
++#endif
++ struct tc_to_netdev *tc)
++{
++ if (tc->type != TC_SETUP_MQPRIO)
++ return -EINVAL;
++
++#ifdef HAVE_TC_TO_NETDEV_TC
++ return mlx4_en_setup_tc(dev, tc->tc);
++#else
++ if (tc->mqprio->num_tc && tc->mqprio->num_tc != MLX4_EN_NUM_UP_HIGH)
++ return -EINVAL;
++
++#ifdef HAVE_NDO_SETUP_TC_TAKES_CHAIN_INDEX
++ if (chain_index)
++ return -EOPNOTSUPP;
++#endif
++
++ tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
++
++ return mlx4_en_alloc_tx_queue_per_tc(dev, tc->mqprio->num_tc);
++#endif
++}
++#endif
++#endif
++#endif
+
+ #ifdef CONFIG_RFS_ACCEL
+
+@@ -459,8 +503,14 @@ static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
+ }
+ #endif
+
++#if defined(HAVE_NDO_RX_ADD_VID_HAS_3_PARAMS)
+ static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
+ __be16 proto, u16 vid)
++#elif defined(HAVE_NDO_RX_ADD_VID_HAS_2_PARAMS_RET_INT)
++static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
++#else
++static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
++#endif
+ {
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+@@ -486,11 +536,20 @@ static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
+
+ out:
+ mutex_unlock(&mdev->state_lock);
++#if (defined(HAVE_NDO_RX_ADD_VID_HAS_3_PARAMS) || \
++ defined(HAVE_NDO_RX_ADD_VID_HAS_2_PARAMS_RET_INT))
+ return err;
++#endif
+ }
+
++#if defined(HAVE_NDO_RX_ADD_VID_HAS_3_PARAMS)
+ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
+ __be16 proto, u16 vid)
++#elif defined(HAVE_NDO_RX_ADD_VID_HAS_2_PARAMS_RET_INT)
++static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
++#else
++static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
++#endif
+ {
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+@@ -1266,7 +1325,11 @@ static void mlx4_en_do_set_rx_mode(struct work_struct *work)
+ }
+ }
+
++#ifdef HAVE_NETDEV_IFF_UNICAST_FLT
+ if (dev->priv_flags & IFF_UNICAST_FLT)
++#else
++ if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
++#endif
+ mlx4_en_do_uc_filter(priv, dev, mdev);
+
+ /* Promsicuous mode: disable all filters */
+@@ -1378,16 +1441,28 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
+ queue_work(mdev->workqueue, &priv->watchdog_task);
+ }
+
+-
+-static void
+-mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
++#ifdef HAVE_NDO_GET_STATS64_RET_VOID
++static void mlx4_en_get_stats64(struct net_device *dev,
++ struct rtnl_link_stats64 *stats)
++#elif defined(HAVE_NDO_GET_STATS64)
++struct rtnl_link_stats64 *mlx4_en_get_stats64(struct net_device *dev,
++ struct rtnl_link_stats64 *stats)
++#else
++static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
++#endif
+ {
+ struct mlx4_en_priv *priv = netdev_priv(dev);
++#if !defined(HAVE_NDO_GET_STATS64) && !defined(HAVE_NDO_GET_STATS64_RET_VOID)
++ struct net_device_stats *stats = &priv->ret_stats;
++#endif
+
+ spin_lock_bh(&priv->stats_lock);
+ mlx4_en_fold_software_stats(dev);
+ netdev_stats_to_stats64(stats, &dev->stats);
+ spin_unlock_bh(&priv->stats_lock);
++#ifndef HAVE_NDO_GET_STATS64_RET_VOID
++ return stats;
++#endif
+ }
+
+ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
+@@ -1592,6 +1667,7 @@ static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
+ free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask);
+ }
+
++#ifdef HAVE_XDP_BUFF
+ static void mlx4_en_init_recycle_ring(struct mlx4_en_priv *priv,
+ int tx_ring_idx)
+ {
+@@ -1603,6 +1679,7 @@ static void mlx4_en_init_recycle_ring(struct mlx4_en_priv *priv,
+ en_dbg(DRV, priv, "Set tx_ring[%d][%d]->recycle_ring = rx_ring[%d]\n",
+ TX_XDP, tx_ring_idx, rr_index);
+ }
++#endif
+
+ int mlx4_en_start_port(struct net_device *dev)
+ {
+@@ -1738,7 +1815,9 @@ int mlx4_en_start_port(struct net_device *dev)
+
+ } else {
+ mlx4_en_init_tx_xdp_ring_descs(priv, tx_ring);
++#ifdef HAVE_XDP_BUFF
+ mlx4_en_init_recycle_ring(priv, i);
++#endif
+ /* XDP TX CQ should never be armed */
+ }
+
+@@ -1811,8 +1890,14 @@ int mlx4_en_start_port(struct net_device *dev)
+ /* Schedule multicast task to populate multicast list */
+ queue_work(mdev->workqueue, &priv->rx_mode_task);
+
++#ifdef HAVE_KERNEL_WITH_VXLAN_SUPPORT_ON
+ if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
++#if defined(HAVE_NDO_UDP_TUNNEL_ADD) || defined(HAVE_NDO_UDP_TUNNEL_ADD_EXTENDED)
+ udp_tunnel_get_rx_info(dev);
++#elif defined(HAVE_NDO_ADD_VXLAN_PORT)
++ vxlan_get_rx_port(dev);
++#endif
++#endif
+
+ priv->port_up = true;
+
+@@ -2101,8 +2186,10 @@ static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
+ {
+ int i, t;
+
++#ifdef HAVE_NETDEV_RX_CPU_RMAP
+ #ifdef CONFIG_RFS_ACCEL
+ priv->dev->rx_cpu_rmap = NULL;
++#endif
+ #endif
+
+ for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
+@@ -2261,11 +2348,19 @@ static void mlx4_en_update_priv(struct mlx4_en_priv *dst,
+
+ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
+ struct mlx4_en_priv *tmp,
++#ifdef HAVE_XDP_BUFF
+ struct mlx4_en_port_profile *prof,
+ bool carry_xdp_prog)
++#else
++ struct mlx4_en_port_profile *prof)
++#endif
+ {
++#ifdef HAVE_XDP_BUFF
+ struct bpf_prog *xdp_prog;
+ int i, t;
++#else
++ int t;
++#endif
+
+ mlx4_en_copy_priv(tmp, priv, prof);
+
+@@ -2280,6 +2375,7 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
+ return -ENOMEM;
+ }
+
++#ifdef HAVE_XDP_BUFF
+ /* All rx_rings has the same xdp_prog. Pick the first one. */
+ xdp_prog = rcu_dereference_protected(
+ priv->rx_ring[0]->xdp_prog,
+@@ -2295,6 +2391,7 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
+ rcu_assign_pointer(tmp->rx_ring[i]->xdp_prog,
+ xdp_prog);
+ }
++#endif
+
+ return 0;
+ }
+@@ -2315,8 +2412,10 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
+
+ /* Unregister device - this will close the port if it was up */
+ if (priv->registered) {
++#ifdef HAVE_DEVLINK_H
+ devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev,
+ priv->port));
++#endif
+ unregister_netdev(dev);
+ }
+
+@@ -2346,6 +2445,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
+ free_netdev(dev);
+ }
+
++#ifdef HAVE_XDP_BUFF
+ static bool mlx4_en_check_xdp_mtu(struct net_device *dev, int mtu)
+ {
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+@@ -2358,6 +2458,7 @@ static bool mlx4_en_check_xdp_mtu(struct net_device *dev, int mtu)
+
+ return true;
+ }
++#endif
+
+ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
+ {
+@@ -2368,9 +2469,11 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
+ en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
+ dev->mtu, new_mtu);
+
++#ifdef HAVE_XDP_BUFF
+ if (priv->tx_ring_num[TX_XDP] &&
+ !mlx4_en_check_xdp_mtu(dev, new_mtu))
+ return -EOPNOTSUPP;
++#endif
+
+ dev->mtu = new_mtu;
+
+@@ -2454,6 +2557,7 @@ static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+ sizeof(config)) ? -EFAULT : 0;
+ }
+
++#ifdef SIOCGHWTSTAMP
+ static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+ {
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+@@ -2461,19 +2565,23 @@ static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+ return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config,
+ sizeof(priv->hwtstamp_config)) ? -EFAULT : 0;
+ }
++#endif
+
+ static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+ {
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return mlx4_en_hwtstamp_set(dev, ifr);
++#ifdef SIOCGHWTSTAMP
+ case SIOCGHWTSTAMP:
+ return mlx4_en_hwtstamp_get(dev, ifr);
++#endif
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
++#ifdef HAVE_NETIF_F_HW_VLAN_STAG_RX
+ static netdev_features_t mlx4_en_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+ {
+@@ -2492,20 +2600,31 @@ static netdev_features_t mlx4_en_fix_features(struct net_device *netdev,
+
+ return features;
+ }
++#endif
+
+-static int mlx4_en_set_features(struct net_device *netdev,
+- netdev_features_t features)
++#ifndef CONFIG_SYSFS_LOOPBACK
++static
++#endif
++int mlx4_en_set_features(struct net_device *netdev,
++#ifdef HAVE_NET_DEVICE_OPS_EXT
++ u32 features)
++#else
++ netdev_features_t features)
++#endif
+ {
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+ bool reset = false;
+ int ret = 0;
+
++#ifdef HAVE_NETIF_F_RXFCS
+ if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXFCS)) {
+ en_info(priv, "Turn %s RX-FCS\n",
+ (features & NETIF_F_RXFCS) ? "ON" : "OFF");
+ reset = true;
+ }
++#endif
+
++#ifdef HAVE_NETIF_F_RXALL
+ if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXALL)) {
+ u8 ignore_fcs_value = (features & NETIF_F_RXALL) ? 1 : 0;
+
+@@ -2516,6 +2635,7 @@ static int mlx4_en_set_features(struct net_device *netdev,
+ if (ret)
+ return ret;
+ }
++#endif
+
+ if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
+ en_info(priv, "Turn %s RX vlan strip offload\n",
+@@ -2527,9 +2647,11 @@ static int mlx4_en_set_features(struct net_device *netdev,
+ en_info(priv, "Turn %s TX vlan strip offload\n",
+ (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
+
++#ifdef HAVE_NETIF_F_HW_VLAN_STAG_RX
+ if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_STAG_TX))
+ en_info(priv, "Turn %s TX S-VLAN strip offload\n",
+ (features & NETIF_F_HW_VLAN_STAG_TX) ? "ON" : "OFF");
++#endif
+
+ if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) {
+ en_info(priv, "Turn %s loopback\n",
+@@ -2547,6 +2669,7 @@ static int mlx4_en_set_features(struct net_device *netdev,
+ return 0;
+ }
+
++#ifdef HAVE_NDO_SET_VF_MAC
+ static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
+ {
+ struct mlx4_en_priv *en_priv = netdev_priv(dev);
+@@ -2554,17 +2677,28 @@ static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
+
+ return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac);
+ }
++#endif
+
++#if defined(HAVE_NDO_SET_VF_VLAN) || defined(HAVE_NDO_SET_VF_VLAN_EXTENDED)
++#ifdef HAVE_VF_VLAN_PROTO
+ static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto)
++#else
++static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
++#endif
+ {
+ struct mlx4_en_priv *en_priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = en_priv->mdev;
++#ifndef HAVE_VF_VLAN_PROTO
++ __be16 vlan_proto = htons(ETH_P_8021Q);
++#endif
+
+ return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos,
+ vlan_proto);
+ }
++#endif /* HAVE_NDO_SET_VF_VLAN */
+
++#ifdef HAVE_TX_RATE_LIMIT
+ static int mlx4_en_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
+ int max_tx_rate)
+ {
+@@ -2574,7 +2708,9 @@ static int mlx4_en_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
+ return mlx4_set_vf_rate(mdev->dev, en_priv->port, vf, min_tx_rate,
+ max_tx_rate);
+ }
++#endif
+
++#if defined(HAVE_NETDEV_OPS_NDO_SET_VF_SPOOFCHK) || defined(HAVE_NETDEV_OPS_EXT_NDO_SET_VF_SPOOFCHK)
+ static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
+ {
+ struct mlx4_en_priv *en_priv = netdev_priv(dev);
+@@ -2582,6 +2718,7 @@ static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
+
+ return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting);
+ }
++#endif
+
+ static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf)
+ {
+@@ -2591,6 +2728,7 @@ static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_
+ return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf);
+ }
+
++#if defined(HAVE_NETDEV_OPS_NDO_SET_VF_LINK_STATE) || defined(HAVE_NETDEV_OPS_EXT_NDO_SET_VF_LINK_STATE)
+ static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state)
+ {
+ struct mlx4_en_priv *en_priv = netdev_priv(dev);
+@@ -2598,7 +2736,9 @@ static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_st
+
+ return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
+ }
++#endif
+
++#ifdef HAVE_NDO_GET_VF_STATS
+ static int mlx4_en_get_vf_stats(struct net_device *dev, int vf,
+ struct ifla_vf_stats *vf_stats)
+ {
+@@ -2607,10 +2747,16 @@ static int mlx4_en_get_vf_stats(struct net_device *dev, int vf,
+
+ return mlx4_get_vf_stats(mdev->dev, en_priv->port, vf, vf_stats);
+ }
++#endif
+
++#if defined(HAVE_NETDEV_NDO_GET_PHYS_PORT_ID) || defined(HAVE_NETDEV_EXT_NDO_GET_PHYS_PORT_ID)
+ #define PORT_ID_BYTE_LEN 8
+ static int mlx4_en_get_phys_port_id(struct net_device *dev,
++#ifdef HAVE_NETDEV_PHYS_ITEM_ID
+ struct netdev_phys_item_id *ppid)
++#else
++ struct netdev_phys_port_id *ppid)
++#endif
+ {
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_dev *mdev = priv->mdev->dev;
+@@ -2627,6 +2773,7 @@ static int mlx4_en_get_phys_port_id(struct net_device *dev,
+ }
+ return 0;
+ }
++#endif
+
+ static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
+ {
+@@ -2646,13 +2793,21 @@ out:
+ return;
+ }
+
++#ifdef HAVE_NETDEV_HW_ENC_FEATURES
+ /* set offloads */
+ priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM |
+ NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL |
++#ifdef NETIF_F_GSO_UDP_TUNNEL_CSUM
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
++#endif
++#ifdef NETIF_F_GSO_PARTIAL
+ NETIF_F_GSO_PARTIAL;
++#else
++ 0;
++#endif
++#endif
+ }
+
+ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
+@@ -2660,13 +2815,21 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
+ int ret;
+ struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
+ vxlan_del_task);
++#ifdef HAVE_NETDEV_HW_ENC_FEATURES
+ /* unset offloads */
+ priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM |
+ NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL |
++#ifdef NETIF_F_GSO_UDP_TUNNEL_CSUM
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
++#endif
++#ifdef NETIF_F_GSO_PARTIAL
+ NETIF_F_GSO_PARTIAL);
++#else
++ 0);
++#endif
++#endif
+
+ ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
+ VXLAN_STEER_BY_OUTER_MAC, 0);
+@@ -2676,6 +2839,8 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
+ priv->vxlan_port = 0;
+ }
+
++#ifdef HAVE_KERNEL_WITH_VXLAN_SUPPORT_ON
++#if defined(HAVE_NDO_UDP_TUNNEL_ADD) || defined(HAVE_NDO_UDP_TUNNEL_ADD_EXTENDED)
+ static void mlx4_en_add_vxlan_port(struct net_device *dev,
+ struct udp_tunnel_info *ti)
+ {
+@@ -2727,13 +2892,66 @@ static void mlx4_en_del_vxlan_port(struct net_device *dev,
+
+ queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
+ }
++#elif defined(HAVE_NDO_ADD_VXLAN_PORT)
++static void mlx4_en_add_vxlan_port(struct net_device *dev,
++ sa_family_t sa_family, __be16 port)
++{
++ struct mlx4_en_priv *priv = netdev_priv(dev);
++ __be16 current_port;
++
++ if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
++ return;
++
++ if (sa_family == AF_INET6)
++ return;
++
++ current_port = priv->vxlan_port;
++ if (current_port && current_port != port) {
++ en_warn(priv, "vxlan port %d configured, can't add port %d\n",
++ ntohs(current_port), ntohs(port));
++ return;
++ }
++
++ priv->vxlan_port = port;
++ queue_work(priv->mdev->workqueue, &priv->vxlan_add_task);
++}
++
++static void mlx4_en_del_vxlan_port(struct net_device *dev,
++ sa_family_t sa_family, __be16 port)
++{
++ struct mlx4_en_priv *priv = netdev_priv(dev);
++ __be16 current_port;
++
++ if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
++ return;
++
++ if (sa_family == AF_INET6)
++ return;
++
++ current_port = priv->vxlan_port;
++ if (current_port != port) {
++ en_dbg(DRV, priv, "vxlan port %d isn't configured, ignoring\n", ntohs(port));
++ return;
++ }
++
++ queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
++}
++#endif
++#endif /* HAVE_KERNEL_WITH_VXLAN_SUPPORT_ON */
+
++#ifdef HAVE_NETDEV_FEATURES_T
+ static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+ {
++#ifdef HAVE_VLAN_FEATURES_CHECK
+ features = vlan_features_check(skb, features);
++#endif
++#ifdef HAVE_KERNEL_WITH_VXLAN_SUPPORT_ON
++#ifdef HAVE_VXLAN_FEATURES_CHECK
+ features = vxlan_features_check(skb, features);
++#endif
++#endif
+
+ /* The ConnectX-3 doesn't support outer IPv6 checksums but it does
+ * support inner IPv6 checksums and segmentation so we need to
+@@ -2752,6 +2970,7 @@ static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
+ return features;
+ }
+
++#if defined(HAVE_NDO_SET_TX_MAXRATE) || defined(HAVE_NDO_SET_TX_MAXRATE_EXTENDED)
+ static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate)
+ {
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+@@ -2778,7 +2997,9 @@ static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 m
+ ¶ms);
+ return err;
+ }
++#endif
+
++#ifdef HAVE_XDP_BUFF
+ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
+ {
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+@@ -2842,7 +3063,11 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
+ en_warn(priv, "Reducing the number of TX rings, to not exceed the max total rings number.\n");
+ }
+
++#ifdef HAVE_XDP_BUFF
+ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, false);
++#else
++ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
++#endif
+ if (err) {
+ if (prog)
+ bpf_prog_sub(prog, priv->rx_ring_num - 1);
+@@ -2883,6 +3108,7 @@ out:
+ return err;
+ }
+
++#ifdef HAVE_BPF_PROG_AUX_FEILD_ID
+ static u32 mlx4_xdp_query(struct net_device *dev)
+ {
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+@@ -2903,6 +3129,14 @@ static u32 mlx4_xdp_query(struct net_device *dev)
+
+ return prog_id;
+ }
++#else
++static bool mlx4_xdp_attached(struct net_device *dev)
++{
++ struct mlx4_en_priv *priv = netdev_priv(dev);
++
++ return !!priv->tx_ring_num[TX_XDP];
++}
++#endif
+
+ static int mlx4_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+ {
+@@ -2910,42 +3144,108 @@ static int mlx4_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+ case XDP_SETUP_PROG:
+ return mlx4_xdp_set(dev, xdp->prog);
+ case XDP_QUERY_PROG:
++#ifdef HAVE_BPF_PROG_AUX_FEILD_ID
+ xdp->prog_id = mlx4_xdp_query(dev);
++#endif
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ }
++#endif
+
+ static const struct net_device_ops mlx4_netdev_ops = {
++#ifdef HAVE_NET_DEVICE_OPS_EXTENDED
++ .ndo_size = sizeof(struct net_device_ops),
++#endif
+ .ndo_open = mlx4_en_open,
+ .ndo_stop = mlx4_en_close,
+ .ndo_start_xmit = mlx4_en_xmit,
+ .ndo_select_queue = mlx4_en_select_queue,
++#if defined(HAVE_NDO_GET_STATS64_RET_VOID) || defined(HAVE_NDO_GET_STATS64)
+ .ndo_get_stats64 = mlx4_en_get_stats64,
++#else
++ .ndo_get_stats = mlx4_en_get_stats,
++#endif
+ .ndo_set_rx_mode = mlx4_en_set_rx_mode,
+ .ndo_set_mac_address = mlx4_en_set_mac,
+ .ndo_validate_addr = eth_validate_addr,
++#ifdef HAVE_NDO_CHANGE_MTU_EXTENDED
++ .extended.ndo_change_mtu = mlx4_en_change_mtu,
++#else
+ .ndo_change_mtu = mlx4_en_change_mtu,
++#endif
+ .ndo_do_ioctl = mlx4_en_ioctl,
+ .ndo_tx_timeout = mlx4_en_tx_timeout,
+ .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
++#if (defined(HAVE_NDO_SET_FEATURES) && !defined(HAVE_NET_DEVICE_OPS_EXT))
+ .ndo_set_features = mlx4_en_set_features,
++#endif
++#ifdef HAVE_NETIF_F_HW_VLAN_STAG_RX
+ .ndo_fix_features = mlx4_en_fix_features,
++#endif
++#ifdef HAVE_NDO_SETUP_TC_RH_EXTENDED
++ .extended.ndo_setup_tc_rh = __mlx4_en_setup_tc,
++#else
++#ifdef HAVE_NDO_SETUP_TC
++#if defined(HAVE_NDO_SETUP_TC_4_PARAMS) || \
++ defined(HAVE_NDO_SETUP_TC_TAKES_CHAIN_INDEX) || \
++ defined(HAVE_TC_FLOWER_OFFLOAD)
+ .ndo_setup_tc = __mlx4_en_setup_tc,
++#else
++ .ndo_setup_tc = mlx4_en_setup_tc,
++#endif
++#endif
++#endif
++#ifdef HAVE_NDO_RX_FLOW_STEER
+ #ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = mlx4_en_filter_rfs,
+ #endif
++#endif
++#ifdef MLX4_EN_BUSY_POLL
++#ifndef HAVE_NETDEV_EXTENDED_NDO_BUSY_POLL
++ .ndo_busy_poll = mlx4_en_low_latency_recv,
++#endif
++#endif
++#ifdef HAVE_NETDEV_NDO_GET_PHYS_PORT_ID
+ .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
++#elif defined(HAVE_NDO_GET_PHYS_PORT_NAME_EXTENDED)
++ .extended.ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
++#endif
++#ifdef HAVE_KERNEL_WITH_VXLAN_SUPPORT_ON
++#ifdef HAVE_NDO_UDP_TUNNEL_ADD
+ .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port,
+ .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
++#elif defined(HAVE_NDO_UDP_TUNNEL_ADD_EXTENDED)
++ .extended.ndo_udp_tunnel_add = mlx4_en_add_vxlan_port,
++ .extended.ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
++#elif defined(HAVE_NDO_ADD_VXLAN_PORT)
++ .ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
++ .ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
++#endif
++#endif
++#ifdef HAVE_NETDEV_FEATURES_T
+ .ndo_features_check = mlx4_en_features_check,
++#elif defined(HAVE_KERNEL_WITH_VXLAN_SUPPORT_ON) && defined(HAVE_VXLAN_GSO_CHECK)
++ .ndo_gso_check = mlx4_en_gso_check,
++#endif
++#ifdef HAVE_NDO_SET_TX_MAXRATE
+ .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
++#elif defined(HAVE_NDO_SET_TX_MAXRATE_EXTENDED)
++ .extended.ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
++#endif
++#ifdef HAVE_NDO_XDP_EXTENDED
++ .extended.ndo_xdp = mlx4_xdp,
++#elif defined(HAVE_XDP_BUFF)
+ .ndo_bpf = mlx4_xdp,
++#endif
+ };
+
+ static const struct net_device_ops mlx4_netdev_ops_master = {
++#ifdef HAVE_NET_DEVICE_OPS_EXTENDED
++ .ndo_size = sizeof(struct net_device_ops),
++#endif
+ .ndo_open = mlx4_en_open,
+ .ndo_stop = mlx4_en_close,
+ .ndo_start_xmit = mlx4_en_xmit,
+@@ -2954,12 +3254,20 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
+ .ndo_set_rx_mode = mlx4_en_set_rx_mode,
+ .ndo_set_mac_address = mlx4_en_set_mac,
+ .ndo_validate_addr = eth_validate_addr,
++#ifdef HAVE_NDO_CHANGE_MTU_EXTENDED
++ .extended.ndo_change_mtu = mlx4_en_change_mtu,
++#else
+ .ndo_change_mtu = mlx4_en_change_mtu,
++#endif
+ .ndo_tx_timeout = mlx4_en_tx_timeout,
+ .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
+ .ndo_set_vf_mac = mlx4_en_set_vf_mac,
++#ifdef HAVE_NDO_SET_VF_VLAN_EXTENDED
++ .extended.ndo_set_vf_vlan = mlx4_en_set_vf_vlan,
++#else
+ .ndo_set_vf_vlan = mlx4_en_set_vf_vlan,
++#endif
+ .ndo_set_vf_rate = mlx4_en_set_vf_rate,
+ .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk,
+ .ndo_set_vf_link_state = mlx4_en_set_vf_link_state,
+@@ -2967,16 +3275,47 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
+ .ndo_get_vf_config = mlx4_en_get_vf_config,
+ .ndo_set_features = mlx4_en_set_features,
+ .ndo_fix_features = mlx4_en_fix_features,
++#ifdef HAVE_NDO_SETUP_TC_RH_EXTENDED
++ .extended.ndo_setup_tc_rh = __mlx4_en_setup_tc,
++#else
++#if defined(HAVE_NDO_SETUP_TC_4_PARAMS) || \
++ defined(HAVE_NDO_SETUP_TC_TAKES_CHAIN_INDEX) || \
++ defined(HAVE_TC_FLOWER_OFFLOAD)
+ .ndo_setup_tc = __mlx4_en_setup_tc,
++#else
++ .ndo_setup_tc = mlx4_en_setup_tc,
++#endif
++#endif
+ #ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = mlx4_en_filter_rfs,
+ #endif
++#ifdef HAVE_NETDEV_EXT_NDO_GET_PHYS_PORT_ID
+ .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
++#endif
++#ifdef HAVE_KERNEL_WITH_VXLAN_SUPPORT_ON
++#ifdef HAVE_NDO_UDP_TUNNEL_ADD
+ .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port,
+ .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
++#elif defined(HAVE_NDO_UDP_TUNNEL_ADD_EXTENDED)
++ .extended.ndo_udp_tunnel_add = mlx4_en_add_vxlan_port,
++ .extended.ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
++#elif defined(HAVE_NDO_ADD_VXLAN_PORT)
++ .ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
++ .ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
++#endif
++#endif
+ .ndo_features_check = mlx4_en_features_check,
++#ifdef HAVE_NDO_SET_TX_MAXRATE
+ .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
++#elif defined(HAVE_NDO_SET_TX_MAXRATE_EXTENDED)
++ .extended.ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
++#endif
++#endif
++#ifdef HAVE_NDO_XDP_EXTENDED
++ .extended.ndo_xdp = mlx4_xdp,
++#elif defined(HAVE_XDP_BUFF)
+ .ndo_bpf = mlx4_xdp,
++#endif
+ };
+
+ struct mlx4_en_bond {
+@@ -3406,29 +3745,41 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
+ netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
+
++#ifdef HAVE_ETHTOOL_OPS_EXT
++ SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
++ set_ethtool_ops_ext(dev, &mlx4_en_ethtool_ops_ext);
++#else
+ dev->ethtool_ops = &mlx4_en_ethtool_ops;
++#endif
+
+ /*
+ * Set driver features
+ */
++#ifdef HAVE_NETDEV_HW_FEATURES
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ if (mdev->LSO_support)
+ dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
+
+ dev->vlan_features = dev->hw_features;
+
++#ifdef HAVE_NETIF_F_RXHASH
+ dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
++#else
++ dev->hw_features |= NETIF_F_RXCSUM;
++#endif
+ dev->features = dev->hw_features | NETIF_F_HIGHDMA |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER;
+ dev->hw_features |= NETIF_F_LOOPBACK |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
+
++#ifdef HAVE_NETIF_F_HW_VLAN_STAG_RX
+ if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
+ dev->features |= NETIF_F_HW_VLAN_STAG_RX |
+ NETIF_F_HW_VLAN_STAG_FILTER;
+ dev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
+ }
++#endif
+
+ if (mlx4_is_slave(mdev->dev)) {
+ bool vlan_offload_disabled;
+@@ -3458,44 +3809,91 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
+ }
+
++#ifdef HAVE_NETIF_F_RXFCS
+ if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
+ dev->hw_features |= NETIF_F_RXFCS;
++#endif
+
++#ifdef HAVE_NETIF_F_RXALL
+ if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS)
+ dev->hw_features |= NETIF_F_RXALL;
++#endif
+
+ if (mdev->dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED &&
+ mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC)
+ dev->hw_features |= NETIF_F_NTUPLE;
++#endif
+
++#ifdef HAVE_NETDEV_IFF_UNICAST_FLT
+ if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
+ dev->priv_flags |= IFF_UNICAST_FLT;
++#endif
+
+ /* Setting a default hash function value */
+ if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP) {
++#ifdef HAVE_ETH_SS_RSS_HASH_FUNCS
+ priv->rss_hash_fn = ETH_RSS_HASH_TOP;
++#else
++ priv->pflags &= ~MLX4_EN_PRIV_FLAGS_RSS_HASH_XOR;
++#ifdef HAVE_NETIF_F_RXHASH
++ dev->features |= NETIF_F_RXHASH;
++#endif
++#endif
++
+ } else if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR) {
++#ifdef HAVE_ETH_SS_RSS_HASH_FUNCS
+ priv->rss_hash_fn = ETH_RSS_HASH_XOR;
++#else
++ priv->pflags |= MLX4_EN_PRIV_FLAGS_RSS_HASH_XOR;
++#ifdef HAVE_NETIF_F_RXHASH
++ dev->features &= ~NETIF_F_RXHASH;
++#endif
++#endif
+ } else {
+ en_warn(priv,
+ "No RSS hash capabilities exposed, using Toeplitz\n");
++#ifdef HAVE_ETH_SS_RSS_HASH_FUNCS
+ priv->rss_hash_fn = ETH_RSS_HASH_TOP;
++#else
++ priv->pflags &= ~MLX4_EN_PRIV_FLAGS_RSS_HASH_XOR;
++#ifdef HAVE_NETIF_F_RXHASH
++ dev->features |= NETIF_F_RXHASH;
++#endif
++#endif
+ }
+
+ if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
++#ifdef HAVE_NETDEV_HW_FEATURES
+ dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
++#ifdef NETIF_F_GSO_UDP_TUNNEL_CSUM
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
++#endif
++#ifdef NETIF_F_GSO_PARTIAL
+ NETIF_F_GSO_PARTIAL;
++#else
++ 0;
++#endif
+ dev->features |= NETIF_F_GSO_UDP_TUNNEL |
++#ifdef NETIF_F_GSO_UDP_TUNNEL_CSUM
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
++#endif
++#ifdef NETIF_F_GSO_PARTIAL
+ NETIF_F_GSO_PARTIAL;
+ dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
++#else
++ 0;
++#endif
+ }
+
++#ifdef HAVE_NET_DEVICE_MIN_MAX_MTU
+ /* MTU range: 68 - hw-specific max */
+ dev->min_mtu = ETH_MIN_MTU;
+ dev->max_mtu = priv->max_mtu;
++#elif defined(HAVE_NET_DEVICE_MIN_MAX_MTU_EXTENDED)
++ dev->extended->min_mtu = MLX4_EN_MIN_MTU;
++ dev->extended->max_mtu = priv->max_mtu;
++#endif
+
+ mdev->pndev[port] = dev;
+ mdev->upper[port] = NULL;
+@@ -3581,8 +3979,12 @@ int mlx4_en_reset_config(struct net_device *dev,
+
+ if (priv->hwtstamp_config.tx_type == ts_config.tx_type &&
+ priv->hwtstamp_config.rx_filter == ts_config.rx_filter &&
++#ifdef HAVE_NETIF_F_RXFCS
+ !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
+ !DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS))
++#else
++ !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX))
++#endif
+ return 0; /* Nothing to change */
+
+ if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
+@@ -3601,7 +4003,11 @@ int mlx4_en_reset_config(struct net_device *dev,
+ memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
+ memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config));
+
++#ifdef HAVE_XDP_BUFF
+ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
++#else
++ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
++#endif
+ if (err)
+ goto out;
+
+@@ -3621,18 +4027,26 @@ int mlx4_en_reset_config(struct net_device *dev,
+ /* RX time-stamping is OFF, update the RX vlan offload
+ * to the latest wanted state
+ */
++#if defined(HAVE_NETDEV_WANTED_FEATURES) || defined(HAVE_NETDEV_EXTENDED_WANTED_FEATURES)
++#ifdef HAVE_NETDEV_WANTED_FEATURES
+ if (dev->wanted_features & NETIF_F_HW_VLAN_CTAG_RX)
++#else
++ if (netdev_extended(dev)->wanted_features & NETIF_F_HW_VLAN_CTAG_RX)
++#endif
+ dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+ else
+ dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
++#endif
+ }
+
++#ifdef HAVE_NETIF_F_RXFCS
+ if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) {
+ if (features & NETIF_F_RXFCS)
+ dev->features |= NETIF_F_RXFCS;
+ else
+ dev->features &= ~NETIF_F_RXFCS;
+ }
++#endif
+
+ /* RX vlan offload and RX time-stamping can't co-exist !
+ * Regardless of the caller's choice,
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+index xxxxxxx..xxxxxxx 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+@@ -31,7 +31,9 @@
+ *
+ */
+
++#ifdef HAVE_XDP_BUFF
+ #include <linux/bpf.h>
++#endif
+ #include <linux/bpf_trace.h>
+ #include <linux/mlx4/cq.h>
+ #include <linux/slab.h>
+@@ -145,9 +147,13 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
+ frags->page = ring->page_cache.buf[ring->page_cache.index].page;
+ frags->dma = ring->page_cache.buf[ring->page_cache.index].dma;
+ }
++#ifdef HAVE_XDP_BUFF
+ frags->page_offset = XDP_PACKET_HEADROOM;
+ rx_desc->data[0].addr = cpu_to_be64(frags->dma +
+ XDP_PACKET_HEADROOM);
++#else
++ rx_desc->data[0].addr = cpu_to_be64(frags->dma);
++#endif
+ return 0;
+ }
+
+@@ -283,8 +289,10 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
+ ring->log_stride = ffs(ring->stride) - 1;
+ ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
+
++#ifdef HAVE_XDP_RXQ_INFO
+ if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index) < 0)
+ goto err_ring;
++#endif
+
+ tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
+ sizeof(struct mlx4_en_rx_alloc));
+@@ -315,8 +323,10 @@ err_info:
+ kvfree(ring->rx_info);
+ ring->rx_info = NULL;
+ err_xdp_info:
++#ifdef HAVE_XDP_RXQ_INFO
+ xdp_rxq_info_unreg(&ring->xdp_rxq);
+ err_ring:
++#endif
+ kfree(ring);
+ *pring = NULL;
+
+@@ -432,6 +442,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
+ {
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_rx_ring *ring = *pring;
++#ifdef HAVE_XDP_BUFF
+ struct bpf_prog *old_prog;
+
+ old_prog = rcu_dereference_protected(
+@@ -439,7 +450,10 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
+ lockdep_is_held(&mdev->state_lock));
+ if (old_prog)
+ bpf_prog_put(old_prog);
++#ifdef HAVE_XDP_RXQ_INFO
+ xdp_rxq_info_unreg(&ring->xdp_rxq);
++#endif
++#endif
+ mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
+ kvfree(ring->rx_info);
+ ring->rx_info = NULL;
+@@ -511,7 +525,11 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
+ dma_unmap_page(priv->ddev, dma, PAGE_SIZE, priv->dma_dir);
+ frags->page = NULL;
+ } else {
++#ifdef HAVE_PAGE_REF_COUNT_ADD_SUB_INC
+ page_ref_inc(page);
++#else
++ atomic_inc(&frags->page->_count);
++#endif
+ }
+
+ nr++;
+@@ -666,11 +684,17 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ int factor = priv->cqe_factor;
+ struct mlx4_en_rx_ring *ring;
++#ifdef HAVE_XDP_BUFF
+ struct bpf_prog *xdp_prog;
++#endif
+ int cq_ring = cq->ring;
++#ifdef HAVE_XDP_BUFF
+ bool doorbell_pending;
++#endif
+ struct mlx4_cqe *cqe;
++#ifdef HAVE_XDP_BUFF
+ struct xdp_buff xdp;
++#endif
+ int polled = 0;
+ int index;
+
+@@ -679,11 +703,15 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
+
+ ring = priv->rx_ring[cq_ring];
+
++#ifdef HAVE_XDP_BUFF
+ /* Protect accesses to: ring->xdp_prog, priv->mac_hash list */
+ rcu_read_lock();
+ xdp_prog = rcu_dereference(ring->xdp_prog);
++#ifdef HAVE_XDP_RXQ_INFO
+ xdp.rxq = &ring->xdp_rxq;
++#endif
+ doorbell_pending = 0;
++#endif
+
+ /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
+ * descriptor offset can be deduced from the CQE index instead of
+@@ -766,9 +794,12 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
+ /* A bpf program gets first chance to drop the packet. It may
+ * read bytes but not past the end of the frag.
+ */
++#ifdef HAVE_XDP_BUFF
+ if (xdp_prog) {
+ dma_addr_t dma;
++#ifdef HAVE_XDP_BUFF_DATA_HARD_START
+ void *orig_data;
++#endif
+ u32 act;
+
+ dma = frags[0].dma + frags[0].page_offset;
+@@ -776,20 +807,29 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
+ priv->frag_info[0].frag_size,
+ DMA_FROM_DEVICE);
+
++#ifdef HAVE_XDP_BUFF_DATA_HARD_START
+ xdp.data_hard_start = va - frags[0].page_offset;
+ xdp.data = va;
++#ifdef HAVE_XDP_SET_DATA_META_INVALID
+ xdp_set_data_meta_invalid(&xdp);
++#endif
+ xdp.data_end = xdp.data + length;
+ orig_data = xdp.data;
++#else
++ xdp.data = va;
++ xdp.data_end = xdp.data + length;
++#endif
+
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
+
++#ifdef HAVE_XDP_BUFF_DATA_HARD_START
+ length = xdp.data_end - xdp.data;
+ if (xdp.data != orig_data) {
+ frags[0].page_offset = xdp.data -
+ xdp.data_hard_start;
+ va = xdp.data;
+ }
++#endif
+
+ switch (act) {
+ case XDP_PASS:
+@@ -801,13 +841,17 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
+ frags[0].page = NULL;
+ goto next;
+ }
++#ifdef HAVE_TRACE_XDP_EXCEPTION
+ trace_xdp_exception(dev, xdp_prog, act);
++#endif
+ goto xdp_drop_no_cnt; /* Drop on xmit failure */
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ /* fall through */
+ case XDP_ABORTED:
++#ifdef HAVE_TRACE_XDP_EXCEPTION
+ trace_xdp_exception(dev, xdp_prog, act);
++#endif
+ /* fall through */
+ case XDP_DROP:
+ ring->xdp_drop++;
+@@ -815,6 +859,7 @@ xdp_drop_no_cnt:
+ goto next;
+ }
+ }
++#endif
+
+ ring->bytes += length;
+ ring->packets++;
+@@ -901,13 +946,17 @@ next:
+ break;
+ }
+
++#ifdef HAVE_XDP_BUFF
+ rcu_read_unlock();
++#endif
+
+ if (likely(polled)) {
++#ifdef HAVE_XDP_BUFF
+ if (doorbell_pending) {
+ priv->tx_cq[TX_XDP][cq_ring]->xdp_busy = true;
+ mlx4_en_xmit_doorbell(priv->tx_ring[TX_XDP][cq_ring]);
+ }
++#endif
+
+ mlx4_cq_set_ci(&cq->mcq);
+ wmb(); /* ensure HW sees CQ consumer before we post new buffers */
+@@ -956,7 +1005,9 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
+ /* If we used up all the quota - we're probably not done yet... */
+ if (done == budget || !clean_complete) {
+ const struct cpumask *aff;
++#ifndef HAVE_IRQ_DATA_AFFINITY
+ struct irq_data *idata;
++#endif
+ int cpu_curr;
+
+ /* in case we got here because of !clean_complete */
+@@ -965,8 +1016,12 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
+ INC_PERF_COUNTER(priv->pstats.napi_quota);
+
+ cpu_curr = smp_processor_id();
++#ifndef HAVE_IRQ_DATA_AFFINITY
+ idata = irq_desc_get_irq_data(cq->irq_desc);
+ aff = irq_data_get_affinity_mask(idata);
++#else
++ aff = irq_desc_get_irq_data(cq->irq_desc)->affinity;
++#endif
+
+ if (likely(cpumask_test_cpu(cpu_curr, aff)))
+ return budget;
+@@ -981,8 +1036,18 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
+ done--;
+ }
+ /* Done for now */
++#ifdef HAVE_NAPI_COMPLETE_DONE
++#ifdef NAPI_COMPLETE_DONE_RET_VALUE
+ if (likely(napi_complete_done(napi, done)))
+ mlx4_en_arm_cq(priv, cq);
++#else
++ napi_complete_done(napi, done);
++ mlx4_en_arm_cq(priv, cq);
++#endif
++#else
++ napi_complete(napi);
++ mlx4_en_arm_cq(priv, cq);
++#endif
+ return done;
+ }
+
+@@ -992,6 +1057,7 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
+ int eff_mtu = MLX4_EN_EFF_MTU(dev->mtu);
+ int i = 0;
+
++#ifdef HAVE_XDP_BUFF
+ /* bpf requires buffers to be set up as 1 packet per page.
+ * This only works when num_frags == 1.
+ */
+@@ -1004,7 +1070,9 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
+ priv->dma_dir = PCI_DMA_BIDIRECTIONAL;
+ priv->rx_headroom = XDP_PACKET_HEADROOM;
+ i = 1;
+- } else {
++ } else
++#endif
++ {
+ int frag_size_max = 2048, buf_size = 0;
+
+ /* should not happen, right ? */
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+index xxxxxxx..xxxxxxx 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+@@ -63,8 +63,8 @@ static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv)
+
+ skb_reserve(skb, NET_IP_ALIGN);
+
+- ethh = skb_put(skb, sizeof(struct ethhdr));
+- packet = skb_put(skb, packet_size);
++ ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr));
++ packet = (unsigned char *)skb_put(skb, packet_size);
+ memcpy(ethh->h_dest, priv->dev->dev_addr, ETH_ALEN);
+ eth_zero_addr(ethh->h_source);
+ ethh->h_proto = htons(ETH_P_ARP);
+diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
+index xxxxxxx..xxxxxxx 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/main.c
++++ b/drivers/net/ethernet/mellanox/mlx4/main.c
+@@ -3511,12 +3511,14 @@ slave_start:
+ }
+ }
+
++#ifdef HAVE_PCIE_PRINT_LINK_STATUS
+ /* check if the device is functioning at its maximum possible speed.
+ * No return code for this call, just warn the user in case of PCI
+ * express device capabilities are under-satisfied by the bus.
+ */
+ if (!mlx4_is_slave(dev))
+ pcie_print_link_status(dev->persist->pdev);
++#endif
+
+ /* In master functions, the communication channel must be initialized
+ * after obtaining its address from fw */
+diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+index xxxxxxx..xxxxxxx 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+@@ -46,7 +46,9 @@
+ #endif
+ #include <linux/cpu_rmap.h>
+ #include <linux/ptp_clock_kernel.h>
++#ifdef HAVE_NET_XDP_H
+ #include <net/xdp.h>
++#endif
+
+ #include <linux/mlx4/device.h>
+ #include <linux/mlx4/qp.h>
+@@ -345,7 +347,9 @@ struct mlx4_en_rx_ring {
+ u8 fcs_del;
+ void *buf;
+ void *rx_info;
++#ifdef HAVE_XDP_BUFF
+ struct bpf_prog __rcu *xdp_prog;
++#endif
+ struct mlx4_en_page_cache page_cache;
+ unsigned long bytes;
+ unsigned long packets;
+@@ -359,7 +363,9 @@ struct mlx4_en_rx_ring {
+ unsigned long dropped;
+ int hwtstamp_rx_filter;
+ cpumask_var_t affinity_mask;
++#ifdef HAVE_XDP_RXQ_INFO
+ struct xdp_rxq_info xdp_rxq;
++#endif
+ };
+
+ struct mlx4_en_cq {
+@@ -682,8 +688,12 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
+
+ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
+ struct mlx4_en_priv *tmp,
++#ifdef HAVE_XDP_BUFF
+ struct mlx4_en_port_profile *prof,
+ bool carry_xdp_prog);
++#else
++ struct mlx4_en_port_profile *prof);
++#endif
+ void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
+ struct mlx4_en_priv *tmp);
+
+@@ -700,10 +710,12 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq);
+ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev);
+ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
++#ifdef HAVE_XDP_BUFF
+ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
+ struct mlx4_en_rx_alloc *frame,
+ struct mlx4_en_priv *priv, unsigned int length,
+ int tx_ind, bool *doorbell_pending);
++#endif
+ void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring);
+ bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
+ struct mlx4_en_rx_alloc *frame);