]> git.openfabrics.org - ~emulex/tmp/compat-rdma/.git/commitdiff
Added backports for RHEL6.[56]
authorVladimir Sokolovsky <vlad@mellanox.com>
Sun, 14 Dec 2014 12:09:09 +0000 (14:09 +0200)
committerVladimir Sokolovsky <vlad@mellanox.com>
Tue, 16 Dec 2014 14:15:18 +0000 (16:15 +0200)
Signed-off-by: Vladimir Sokolovsky <vlad@mellanox.com>
patches/0003-BACKPORT-mlx4-core-and-en.patch [deleted file]
patches/0003-BACKPORT-mlx4.patch [new file with mode: 0644]
patches/0011-BACKPORT-ib-core-for-kernels-under-3.10.patch

diff --git a/patches/0003-BACKPORT-mlx4-core-and-en.patch b/patches/0003-BACKPORT-mlx4-core-and-en.patch
deleted file mode 100644 (file)
index 612ea70..0000000
+++ /dev/null
@@ -1,650 +0,0 @@
-From: Vladimir Sokolovsky <vlad@mellanox.com>
-Subject: [PATCH] BACKPORT: mlx4 core and en
-
-Signed-off-by: Vladimir Sokolovsky <vlad@mellanox.com>
----
- drivers/net/ethernet/mellanox/mlx4/cmd.c        |    6 +++
- drivers/net/ethernet/mellanox/mlx4/en_clock.c   |    2 +
- drivers/net/ethernet/mellanox/mlx4/en_cq.c      |    5 ++
- drivers/net/ethernet/mellanox/mlx4/en_ethtool.c |    9 ++++-
- drivers/net/ethernet/mellanox/mlx4/en_netdev.c  |   26 +++++++++++++
- drivers/net/ethernet/mellanox/mlx4/en_rx.c      |   46 ++++++++++++++++++++++-
- drivers/net/ethernet/mellanox/mlx4/en_tx.c      |   35 +++++++++++++++++-
- drivers/net/ethernet/mellanox/mlx4/eq.c         |   10 +++++
- drivers/net/ethernet/mellanox/mlx4/main.c       |   24 ++++++++++++
- drivers/net/ethernet/mellanox/mlx4/mlx4_en.h    |    4 ++
- 10 files changed, 164 insertions(+), 3 deletions(-)
-
-diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
-index xxxxxxx..xxxxxxx xxxxxx
---- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
-+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
-@@ -2545,15 +2545,20 @@ int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_in
-       ivf->vlan               = s_info->default_vlan;
-       ivf->qos                = s_info->default_qos;
-+#ifdef HAVE_TX_RATE_LIMIT
-       ivf->max_tx_rate        = s_info->tx_rate;
-       ivf->min_tx_rate        = 0;
-+#endif
-       ivf->spoofchk           = s_info->spoofchk;
-+#ifdef HAVE_LINKSTATE
-       ivf->linkstate          = s_info->link_state;
-+#endif
-       return 0;
- }
- EXPORT_SYMBOL_GPL(mlx4_get_vf_config);
-+#ifdef HAVE_LINKSTATE
- int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state)
- {
-       struct mlx4_priv *priv = mlx4_priv(dev);
-@@ -2601,6 +2606,7 @@ int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_stat
-       return 0;
- }
- EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state);
-+#endif
- int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port)
- {
-diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
-index xxxxxxx..xxxxxxx xxxxxx
---- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
-+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
-@@ -276,7 +276,9 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
-       .n_alarm        = 0,
-       .n_ext_ts       = 0,
-       .n_per_out      = 0,
-+#ifdef HAVE_PTP_CLOCK_INFO_N_PINS
-       .n_pins         = 0,
-+#endif
-       .pps            = 0,
-       .adjfreq        = mlx4_en_phc_adjfreq,
-       .adjtime        = mlx4_en_phc_adjtime,
-diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
-index xxxxxxx..xxxxxxx xxxxxx
---- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
-+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
-@@ -34,6 +34,7 @@
- #include <linux/mlx4/cq.h>
- #include <linux/mlx4/qp.h>
- #include <linux/mlx4/cmd.h>
-+#include <linux/interrupt.h>
- #include "mlx4_en.h"
-@@ -176,7 +177,9 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
-                       mlx4_warn(mdev, "Failed setting affinity hint\n");
-               netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
-+#ifdef HAVE_NAPI_HASH_ADD
-               napi_hash_add(&cq->napi);
-+#endif
-       }
-       napi_enable(&cq->napi);
-@@ -205,8 +208,10 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
- {
-       napi_disable(&cq->napi);
-       if (!cq->is_tx) {
-+#ifdef HAVE_NAPI_HASH_ADD
-               napi_hash_del(&cq->napi);
-               synchronize_rcu();
-+#endif
-               irq_set_affinity_hint(cq->mcq.irq, NULL);
-       }
-       netif_napi_del(&cq->napi);
-diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
-index xxxxxxx..xxxxxxx xxxxxx
---- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
-+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
-@@ -580,6 +580,7 @@ static void mlx4_en_get_ringparam(struct net_device *dev,
-       param->tx_pending = priv->tx_ring[0]->size;
- }
-+#ifdef HAVE_GET_SET_RXFH
- static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
- {
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-@@ -650,6 +651,7 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
-       mutex_unlock(&mdev->state_lock);
-       return err;
- }
-+#endif
- #define all_zeros_or_all_ones(field)          \
-       ((field) == 0 || (field) == (__force typeof(field))-1)
-@@ -1267,6 +1269,7 @@ static u32 mlx4_en_get_priv_flags(struct net_device *dev)
-       return priv->pflags;
- }
-+#ifdef HAVE_GET_SET_TUNABLE
- static int mlx4_en_get_tunable(struct net_device *dev,
-                              const struct ethtool_tunable *tuna,
-                              void *data)
-@@ -1308,7 +1311,7 @@ static int mlx4_en_set_tunable(struct net_device *dev,
-       return ret;
- }
--
-+#endif
- const struct ethtool_ops mlx4_en_ethtool_ops = {
-       .get_drvinfo = mlx4_en_get_drvinfo,
-@@ -1331,16 +1334,20 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
-       .set_ringparam = mlx4_en_set_ringparam,
-       .get_rxnfc = mlx4_en_get_rxnfc,
-       .set_rxnfc = mlx4_en_set_rxnfc,
-+#ifdef HAVE_GET_SET_RXFH
-       .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
-       .get_rxfh = mlx4_en_get_rxfh,
-       .set_rxfh = mlx4_en_set_rxfh,
-+#endif
-       .get_channels = mlx4_en_get_channels,
-       .set_channels = mlx4_en_set_channels,
-       .get_ts_info = mlx4_en_get_ts_info,
-       .set_priv_flags = mlx4_en_set_priv_flags,
-       .get_priv_flags = mlx4_en_get_priv_flags,
-+#ifdef HAVE_GET_SET_TUNABLE
-       .get_tunable            = mlx4_en_get_tunable,
-       .set_tunable            = mlx4_en_set_tunable,
-+#endif
- };
-diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
-index xxxxxxx..xxxxxxx xxxxxx
---- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
-+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
-@@ -38,8 +38,12 @@
- #include <linux/slab.h>
- #include <linux/hash.h>
- #include <net/ip.h>
-+#ifdef HAVE_SKB_MARK_NAPI_ID
- #include <net/busy_poll.h>
-+#endif
-+#ifdef CONFIG_MLX4_EN_VXLAN
- #include <net/vxlan.h>
-+#endif
- #include <linux/mlx4/driver.h>
- #include <linux/mlx4/device.h>
-@@ -2107,7 +2111,11 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
-       return 0;
- }
-+#ifdef HAVE_SIOCGHWTSTAMP
- static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
-+#else
-+static int mlx4_en_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
-+#endif
- {
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-       struct mlx4_en_dev *mdev = priv->mdev;
-@@ -2166,6 +2174,7 @@ static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
-                           sizeof(config)) ? -EFAULT : 0;
- }
-+#ifdef HAVE_SIOCGHWTSTAMP
- static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
- {
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-@@ -2173,14 +2182,19 @@ static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
-       return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config,
-                           sizeof(priv->hwtstamp_config)) ? -EFAULT : 0;
- }
-+#endif
- static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
- {
-       switch (cmd) {
-       case SIOCSHWTSTAMP:
-+#ifdef HAVE_SIOCGHWTSTAMP
-               return mlx4_en_hwtstamp_set(dev, ifr);
-       case SIOCGHWTSTAMP:
-               return mlx4_en_hwtstamp_get(dev, ifr);
-+#else
-+              return mlx4_en_hwtstamp_ioctl(dev, ifr);
-+#endif
-       default:
-               return -EOPNOTSUPP;
-       }
-@@ -2239,6 +2253,7 @@ static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_
-       return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf);
- }
-+#ifdef HAVE_LINKSTATE
- static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state)
- {
-       struct mlx4_en_priv *en_priv = netdev_priv(dev);
-@@ -2246,7 +2261,9 @@ static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_st
-       return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
- }
-+#endif
-+#ifdef HAVE_NETDEV_PHYS_PORT_ID
- #define PORT_ID_BYTE_LEN 8
- static int mlx4_en_get_phys_port_id(struct net_device *dev,
-                                   struct netdev_phys_port_id *ppid)
-@@ -2266,6 +2283,7 @@ static int mlx4_en_get_phys_port_id(struct net_device *dev,
-       }
-       return 0;
- }
-+#endif
- #ifdef CONFIG_MLX4_EN_VXLAN
- static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
-@@ -2387,7 +2405,9 @@ static const struct net_device_ops mlx4_netdev_ops = {
- #ifdef CONFIG_NET_RX_BUSY_POLL
-       .ndo_busy_poll          = mlx4_en_low_latency_recv,
- #endif
-+#ifdef HAVE_NETDEV_PHYS_PORT_ID
-       .ndo_get_phys_port_id   = mlx4_en_get_phys_port_id,
-+#endif
- #ifdef CONFIG_MLX4_EN_VXLAN
-       .ndo_add_vxlan_port     = mlx4_en_add_vxlan_port,
-       .ndo_del_vxlan_port     = mlx4_en_del_vxlan_port,
-@@ -2411,7 +2431,9 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
-       .ndo_set_vf_mac         = mlx4_en_set_vf_mac,
-       .ndo_set_vf_vlan        = mlx4_en_set_vf_vlan,
-       .ndo_set_vf_spoofchk    = mlx4_en_set_vf_spoofchk,
-+#ifdef HAVE_LINKSTATE
-       .ndo_set_vf_link_state  = mlx4_en_set_vf_link_state,
-+#endif
-       .ndo_get_vf_config      = mlx4_en_get_vf_config,
- #ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = mlx4_en_netpoll,
-@@ -2421,7 +2443,9 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
- #ifdef CONFIG_RFS_ACCEL
-       .ndo_rx_flow_steer      = mlx4_en_filter_rfs,
- #endif
-+#ifdef HAVE_NETDEV_PHYS_PORT_ID
-       .ndo_get_phys_port_id   = mlx4_en_get_phys_port_id,
-+#endif
- #ifdef CONFIG_MLX4_EN_VXLAN
-       .ndo_add_vxlan_port     = mlx4_en_add_vxlan_port,
-       .ndo_del_vxlan_port     = mlx4_en_del_vxlan_port,
-@@ -2447,7 +2471,9 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
-       netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
-       SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
-+#ifdef HAVE_NET_DEVICE_DEV_PORT
-       dev->dev_port = port - 1;
-+#endif
-       /*
-        * Initialize driver private data
-diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
-index xxxxxxx..xxxxxxx xxxxxx
---- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
-+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
-@@ -30,8 +30,9 @@
-  * SOFTWARE.
-  *
-  */
--
-+#ifdef HAVE_SKB_MARK_NAPI_ID
- #include <net/busy_poll.h>
-+#endif
- #include <linux/mlx4/cq.h>
- #include <linux/slab.h>
- #include <linux/mlx4/qp.h>
-@@ -588,7 +589,9 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
-               skb_copy_to_linear_data(skb, va, length);
-               skb->tail += length;
-       } else {
-+#ifdef HAVE_ETH_GET_HEADLEN
-               unsigned int pull_len;
-+#endif
-               /* Move relevant fragments to skb */
-               used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, frags,
-@@ -599,6 +602,7 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
-               }
-               skb_shinfo(skb)->nr_frags = used_frags;
-+#ifdef HAVE_ETH_GET_HEADLEN
-               pull_len = eth_get_headlen(va, SMALL_PACKET_SIZE);
-               /* Copy headers into the skb linear buffer */
-               memcpy(skb->data, va, pull_len);
-@@ -610,6 +614,17 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
-               /* Adjust size of first fragment */
-               skb_frag_size_sub(&skb_shinfo(skb)->frags[0], pull_len);
-               skb->data_len = length - pull_len;
-+#else
-+              memcpy(skb->data, va, HEADER_COPY_SIZE);
-+              skb->tail += HEADER_COPY_SIZE;
-+
-+              /* Skip headers in first fragment */
-+              skb_shinfo(skb)->frags[0].page_offset += HEADER_COPY_SIZE;
-+
-+              /* Adjust size of first fragment */
-+              skb_frag_size_sub(&skb_shinfo(skb)->frags[0], HEADER_COPY_SIZE);
-+              skb->data_len = length - HEADER_COPY_SIZE;
-+#endif
-       }
-       return skb;
- }
-@@ -754,8 +769,12 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
-                                * - not an IP fragment
-                                * - no LLS polling in progress
-                                */
-+#ifdef HAVE_SKB_MARK_NAPI_ID
-                               if (!mlx4_en_cq_busy_polling(cq) &&
-                                   (dev->features & NETIF_F_GRO)) {
-+#else
-+                                      if (dev->features & NETIF_F_GRO) {
-+#endif
-                                       struct sk_buff *gro_skb = napi_get_frags(&cq->napi);
-                                       if (!gro_skb)
-                                               goto next;
-@@ -772,7 +791,11 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
-                                       gro_skb->ip_summed = CHECKSUM_UNNECESSARY;
-                                       if (l2_tunnel)
-+#ifdef HAVE_SK_BUFF_CSUM_LEVEL
-                                               gro_skb->csum_level = 1;
-+#else
-+                                              gro_skb->encapsulation = 1;
-+#endif
-                                       if ((cqe->vlan_my_qpn &
-                                           cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) &&
-                                           (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
-@@ -782,12 +805,18 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
-                                       }
-                                       if (dev->features & NETIF_F_RXHASH)
-+#ifdef HAVE_SKB_SET_HASH
-                                               skb_set_hash(gro_skb,
-                                                            be32_to_cpu(cqe->immed_rss_invalid),
-                                                            PKT_HASH_TYPE_L3);
-+#else
-+                                      gro_skb->rxhash = be32_to_cpu(cqe->immed_rss_invalid);
-+#endif
-                                       skb_record_rx_queue(gro_skb, cq->ring);
-+#ifdef HAVE_SKB_MARK_NAPI_ID
-                                       skb_mark_napi_id(gro_skb, &cq->napi);
-+#endif
-                                       if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
-                                               timestamp = mlx4_en_get_cqe_ts(cqe);
-@@ -826,13 +855,22 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
-               skb->protocol = eth_type_trans(skb, dev);
-               skb_record_rx_queue(skb, cq->ring);
-+#ifdef HAVE_SK_BUFF_CSUM_LEVEL
-               if (l2_tunnel && ip_summed == CHECKSUM_UNNECESSARY)
-                       skb->csum_level = 1;
-+#else
-+              if (l2_tunnel)
-+                      skb->encapsulation = 1;
-+#endif
-+#ifdef HAVE_SKB_SET_HASH
-               if (dev->features & NETIF_F_RXHASH)
-                       skb_set_hash(skb,
-                                    be32_to_cpu(cqe->immed_rss_invalid),
-                                    PKT_HASH_TYPE_L3);
-+#else
-+                      skb->rxhash = be32_to_cpu(cqe->immed_rss_invalid);
-+#endif
-               if ((be32_to_cpu(cqe->vlan_my_qpn) &
-                   MLX4_CQE_VLAN_PRESENT_MASK) &&
-@@ -845,7 +883,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
-                                              timestamp);
-               }
-+#ifdef HAVE_SKB_MARK_NAPI_ID
-               skb_mark_napi_id(skb, &cq->napi);
-+#endif
-               if (!mlx4_en_cq_busy_polling(cq))
-                       napi_gro_receive(&cq->napi, skb);
-@@ -893,12 +933,16 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-       int done;
-+#ifdef HAVE_SKB_MARK_NAPI_ID
-       if (!mlx4_en_cq_lock_napi(cq))
-               return budget;
-+#endif
-       done = mlx4_en_process_rx_cq(dev, cq, budget);
-+#ifdef HAVE_SKB_MARK_NAPI_ID
-       mlx4_en_cq_unlock_napi(cq);
-+#endif
-       /* If we used up all the quota - we're probably not done yet... */
-       if (done == budget) {
-diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
-index xxxxxxx..xxxxxxx xxxxxx
---- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
-+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
-@@ -151,7 +151,11 @@ err_bounce:
-       kfree(ring->bounce_buf);
-       ring->bounce_buf = NULL;
- err_info:
-+#ifdef HAVE_KVFREE
-       kvfree(ring->tx_info);
-+#else
-+      vfree(ring->tx_info);
-+#endif
-       ring->tx_info = NULL;
- err_ring:
-       kfree(ring);
-@@ -174,7 +178,11 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
-       mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
-       kfree(ring->bounce_buf);
-       ring->bounce_buf = NULL;
-+#ifdef HAVE_KVFREE
-       kvfree(ring->tx_info);
-+#else
-+      vfree(ring->tx_info);
-+#endif
-       ring->tx_info = NULL;
-       kfree(ring);
-       *pring = NULL;
-@@ -328,7 +336,11 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
-                       }
-               }
-       }
-+#ifdef HAVE_DEV_CONSUME_SKB_ANY
-       dev_consume_skb_any(skb);
-+#else
-+      dev_kfree_skb_any(skb);
-+#endif
-       return tx_info->nr_txbb;
- }
-@@ -392,7 +404,11 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
-       if (!priv->port_up)
-               return true;
-+#ifdef HAVE_NETDEV_TXQ_BQL_PREFETCHW
-       netdev_txq_bql_complete_prefetchw(ring->tx_queue);
-+#else
-+      prefetchw(&ring->tx_queue->dql.limit);
-+#endif
-       index = cons_index & size_mask;
-       cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor;
-@@ -665,7 +681,11 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc,
- }
- u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
-+#ifdef HAVE_SELECT_QUEUE_FALLBACK_T
-                        void *accel_priv, select_queue_fallback_t fallback)
-+#else
-+                       void *accel_priv)
-+#endif
- {
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-       u16 rings_p_up = priv->num_tx_rings_p_up;
-@@ -677,7 +697,11 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
-       if (vlan_tx_tag_present(skb))
-               up = vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT;
-+#ifdef HAVE_SELECT_QUEUE_FALLBACK_T
-       return fallback(dev, skb) % rings_p_up + up * rings_p_up;
-+#else
-+      return __netdev_pick_tx(dev, skb) % rings_p_up + up * rings_p_up;
-+#endif
- }
- static void mlx4_bf_copy(void __iomem *dst, const void *src,
-@@ -737,8 +761,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
-       if (vlan_tx_tag_present(skb))
-               vlan_tag = vlan_tx_tag_get(skb);
--
-+#ifdef HAVE_NETDEV_TXQ_BQL_PREFETCHW
-       netdev_txq_bql_enqueue_prefetchw(ring->tx_queue);
-+#else
-+      prefetchw(&ring->tx_queue->dql);
-+#endif
-       /* Track current inflight packets for performance analysis */
-       AVG_PERF_COUNTER(priv->pstats.inflight_avg,
-@@ -917,7 +944,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
-               netif_tx_stop_queue(ring->tx_queue);
-               ring->queue_stopped++;
-       }
-+#ifdef HAVE_SK_BUFF_XMIT_MORE
-       send_doorbell = !skb->xmit_more || netif_xmit_stopped(ring->tx_queue);
-+#else
-+      send_doorbell = 1;
-+#endif
-       real_size = (real_size / 16) & 0x3f;
-@@ -956,8 +987,10 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
-                       wmb();
-                       iowrite32(ring->doorbell_qpn,
-                                 ring->bf.uar->map + MLX4_SEND_DOORBELL);
-+#ifdef HAVE_SK_BUFF_XMIT_MORE
-               } else {
-                       ring->xmit_more++;
-+#endif
-               }
-       }
-diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
-index xxxxxxx..xxxxxxx xxxxxx
---- a/drivers/net/ethernet/mellanox/mlx4/eq.c
-+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
-@@ -461,7 +461,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
-       int i;
-       enum slave_port_gen_event gen_event;
-       unsigned long flags;
-+#ifdef HAVE_LINKSTATE
-       struct mlx4_vport_state *s_info;
-+#endif
-       int eqe_size = dev->caps.eqe_size;
-       while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor, eqe_size))) {
-@@ -568,6 +570,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
-                                                       continue;
-                                               mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n",
-                                                        __func__, i, port);
-+#ifdef HAVE_LINKSTATE
-                                               s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
-                                               if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
-                                                       eqe->event.port_change.port =
-@@ -576,6 +579,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
-                                                               | (mlx4_phys_to_slave_port(dev, i, port) << 28));
-                                                       mlx4_slave_event(dev, i, eqe);
-                                               }
-+#else
-+                                              mlx4_slave_event(dev, i, eqe);
-+#endif
-                                       } else {  /* IB port */
-                                               set_and_calc_slave_port_state(dev, i, port,
-                                                                             MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
-@@ -601,6 +607,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
-                                                       continue;
-                                               if (i == mlx4_master_func_num(dev))
-                                                       continue;
-+#ifdef HAVE_LINKSTATE
-                                               s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
-                                               if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
-                                                       eqe->event.port_change.port =
-@@ -609,6 +616,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
-                                                               | (mlx4_phys_to_slave_port(dev, i, port) << 28));
-                                                       mlx4_slave_event(dev, i, eqe);
-                                               }
-+#else
-+                                              mlx4_slave_event(dev, i, eqe);
-+#endif
-                                       }
-                               else /* IB port */
-                                       /* port-up event will be sent to a slave when the
-diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
-index xxxxxxx..xxxxxxx xxxxxx
---- a/drivers/net/ethernet/mellanox/mlx4/main.c
-+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
-@@ -2057,6 +2057,9 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
-       int nreq = min_t(int, dev->caps.num_ports *
-                        min_t(int, num_online_cpus() + 1,
-                              MAX_MSIX_P_PORT) + MSIX_LEGACY_SZ, MAX_MSIX);
-+#ifndef HAVE_PCI_ENABLE_MSIX_RANGE
-+      int err;
-+#endif
-       int i;
-       if (msi_x) {
-@@ -2070,6 +2073,8 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
-               for (i = 0; i < nreq; ++i)
-                       entries[i].entry = i;
-+
-+#ifdef HAVE_PCI_ENABLE_MSIX_RANGE
-               nreq = pci_enable_msix_range(dev->pdev, entries, 2, nreq);
-               if (nreq < 0) {
-@@ -2077,6 +2082,25 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
-                       goto no_msi;
-               } else if (nreq < MSIX_LEGACY_SZ +
-                          dev->caps.num_ports * MIN_MSIX_P_PORT) {
-+#else
-+      retry:
-+              err = pci_enable_msix(dev->pdev, entries, nreq);
-+              if (err) {
-+                      /* Try again if at least 2 vectors are available */
-+                      if (err > 1) {
-+                              mlx4_info(dev, "Requested %d vectors, "
-+                                              "but only %d MSI-X vectors available, "
-+                                              "trying again\n", nreq, err);
-+                              nreq = err;
-+                              goto retry;
-+                      }
-+                      kfree(entries);
-+                      goto no_msi;
-+              }
-+
-+              if (nreq <
-+                              MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) {
-+#endif
-                       /*Working in legacy mode , all EQ's shared*/
-                       dev->caps.comp_pool           = 0;
-                       dev->caps.num_comp_vectors = nreq - 1;
-diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
-index xxxxxxx..xxxxxxx xxxxxx
---- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
-+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
-@@ -764,7 +764,11 @@ int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
- void mlx4_en_tx_irq(struct mlx4_cq *mcq);
- u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
-+#ifdef HAVE_SELECT_QUEUE_FALLBACK_T
-                        void *accel_priv, select_queue_fallback_t fallback);
-+#else
-+                       void *accel_priv);
-+#endif
- netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
- int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
diff --git a/patches/0003-BACKPORT-mlx4.patch b/patches/0003-BACKPORT-mlx4.patch
new file mode 100644 (file)
index 0000000..449c4c1
--- /dev/null
@@ -0,0 +1,1514 @@
+From: Vladimir Sokolovsky <vlad@mellanox.com>
+Subject: [PATCH] BACKPORT: mlx4
+
+Signed-off-by: Vladimir Sokolovsky <vlad@mellanox.com>
+---
+ drivers/infiniband/hw/mlx4/cm.c                 |   27 +++
+ drivers/infiniband/hw/mlx4/main.c               |    8 +
+ drivers/net/ethernet/mellanox/mlx4/cmd.c        |    6 +
+ drivers/net/ethernet/mellanox/mlx4/en_clock.c   |    2 +
+ drivers/net/ethernet/mellanox/mlx4/en_cq.c      |   11 ++
+ drivers/net/ethernet/mellanox/mlx4/en_ethtool.c |   58 +++++++-
+ drivers/net/ethernet/mellanox/mlx4/en_netdev.c  |  199 +++++++++++++++++++++++
+ drivers/net/ethernet/mellanox/mlx4/en_rx.c      |   84 ++++++++++-
+ drivers/net/ethernet/mellanox/mlx4/en_tx.c      |   56 +++++++-
+ drivers/net/ethernet/mellanox/mlx4/eq.c         |   10 ++
+ drivers/net/ethernet/mellanox/mlx4/main.c       |   24 +++
+ drivers/net/ethernet/mellanox/mlx4/mlx4_en.h    |   21 +++-
+ include/linux/mlx4/cq.h                         |    5 +
+ 13 files changed, 506 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/infiniband/hw/mlx4/cm.c
++++ b/drivers/infiniband/hw/mlx4/cm.c
+@@ -242,7 +242,12 @@ static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new)
+ static struct id_map_entry *
+ id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
+ {
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0))
++      int ret, id;
++      static int next_id;
++#else
+       int ret;
++#endif
+       struct id_map_entry *ent;
+       struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
+@@ -258,6 +263,27 @@ id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
+       ent->dev = to_mdev(ibdev);
+       INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout);
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0))
++      do {
++              spin_lock(&to_mdev(ibdev)->sriov.id_map_lock);
++              ret = idr_get_new_above(&sriov->pv_id_table, ent,
++                                      next_id, &id);
++              if (!ret) {
++                      next_id = ((unsigned) id + 1) & MAX_IDR_MASK;
++                      ent->pv_cm_id = (u32)id;
++                      sl_id_map_add(ibdev, ent);
++              }
++
++              spin_unlock(&sriov->id_map_lock);
++      } while (ret == -EAGAIN && idr_pre_get(&sriov->pv_id_table, GFP_KERNEL));
++      /*the function idr_get_new_above can return -ENOSPC, so don't insert in that case.*/
++      if (!ret) {
++              spin_lock(&sriov->id_map_lock);
++              list_add_tail(&ent->list, &sriov->cm_list);
++              spin_unlock(&sriov->id_map_lock);
++              return ent;
++      }
++#else
+       idr_preload(GFP_KERNEL);
+       spin_lock(&to_mdev(ibdev)->sriov.id_map_lock);
+@@ -273,6 +299,7 @@ id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
+       if (ret >= 0)
+               return ent;
++#endif
+       /*error flow*/
+       kfree(ent);
+diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/infiniband/hw/mlx4/main.c
++++ b/drivers/infiniband/hw/mlx4/main.c
+@@ -1772,7 +1772,11 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
+       in6_dev = in6_dev_get(dev);
+       if (in6_dev) {
+               read_lock_bh(&in6_dev->lock);
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
++              for (ifp = in6_dev->addr_list; ifp; ifp = ifp->if_next) {
++#else
+               list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
++#endif
+                       pgid = (union ib_gid *)&ifp->addr;
+                       if (!memcmp(pgid, &default_gid, sizeof(*pgid)))
+                               continue;
+@@ -1853,8 +1857,12 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
+               if (iboe->netdevs[port - 1] &&
+                   netif_is_bond_slave(iboe->netdevs[port - 1])) {
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0))
++                      iboe->masters[port - 1] = iboe->netdevs[port - 1]->master;
++#else
+                       iboe->masters[port - 1] = netdev_master_upper_dev_get(
+                               iboe->netdevs[port - 1]);
++#endif
+               } else {
+                       iboe->masters[port - 1] = NULL;
+               }
+diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
+@@ -2545,10 +2545,16 @@ int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_in
+       ivf->vlan               = s_info->default_vlan;
+       ivf->qos                = s_info->default_qos;
++#ifdef HAVE_TX_RATE_LIMIT
+       ivf->max_tx_rate        = s_info->tx_rate;
+       ivf->min_tx_rate        = 0;
++#endif
++#ifdef HAVE_VF_INFO_SPOOFCHK
+       ivf->spoofchk           = s_info->spoofchk;
++#endif
++#ifdef HAVE_LINKSTATE
+       ivf->linkstate          = s_info->link_state;
++#endif
+       return 0;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+@@ -276,7 +276,9 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
+       .n_alarm        = 0,
+       .n_ext_ts       = 0,
+       .n_per_out      = 0,
++#ifdef HAVE_PTP_CLOCK_INFO_N_PINS
+       .n_pins         = 0,
++#endif
+       .pps            = 0,
+       .adjfreq        = mlx4_en_phc_adjfreq,
+       .adjtime        = mlx4_en_phc_adjtime,
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+@@ -34,6 +34,7 @@
+ #include <linux/mlx4/cq.h>
+ #include <linux/mlx4/qp.h>
+ #include <linux/mlx4/cmd.h>
++#include <linux/interrupt.h>
+ #include "mlx4_en.h"
+@@ -103,10 +104,14 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
+       int timestamp_en = 0;
+       struct cpu_rmap *rmap =
+ #ifdef CONFIG_RFS_ACCEL
++#ifdef HAVE_NETDEV_RX_CPU_RMAP
+               priv->dev->rx_cpu_rmap;
+ #else
+               NULL;
+ #endif
++#else
++              NULL;
++#endif
+       cq->dev = mdev->pndev[priv->port];
+       cq->mcq.set_ci_db  = cq->wqres.db.db;
+@@ -135,9 +140,11 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
+                               mdev->dev->caps.num_comp_vectors;
+               }
++#ifdef HAVE_IRQ_DESC_GET_IRQ_DATA
+               cq->irq_desc =
+                       irq_to_desc(mlx4_eq_get_irq(mdev->dev,
+                                                   cq->vector));
++#endif
+       } else {
+               /* For TX we use the same irq per
+               ring we assigned for the RX    */
+@@ -176,7 +183,9 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
+                       mlx4_warn(mdev, "Failed setting affinity hint\n");
+               netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
++#ifdef HAVE_NAPI_HASH_ADD
+               napi_hash_add(&cq->napi);
++#endif
+       }
+       napi_enable(&cq->napi);
+@@ -205,8 +214,10 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+ {
+       napi_disable(&cq->napi);
+       if (!cq->is_tx) {
++#ifdef HAVE_NAPI_HASH_ADD
+               napi_hash_del(&cq->napi);
+               synchronize_rcu();
++#endif
+               irq_set_affinity_hint(cq->mcq.irq, NULL);
+       }
+       netif_napi_del(&cq->napi);
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+@@ -580,6 +580,7 @@ static void mlx4_en_get_ringparam(struct net_device *dev,
+       param->tx_pending = priv->tx_ring[0]->size;
+ }
++#if defined(HAVE_GET_SET_RXFH) || defined(HAVE_GET_SET_RXFH_INDIR)
+ static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
+ {
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+@@ -587,7 +588,11 @@ static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
+       return priv->rx_ring_num;
+ }
++#ifdef HAVE_GET_SET_RXFH_INDIR
++static int mlx4_en_get_rxfh_indir(struct net_device *dev, u32 *ring_index)
++#else
+ static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key)
++#endif
+ {
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_en_rss_map *rss_map = &priv->rss_map;
+@@ -605,8 +610,13 @@ static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key)
+       return err;
+ }
++#ifdef HAVE_GET_SET_RXFH_INDIR
++static int mlx4_en_set_rxfh_indir(struct net_device *dev,
++              const u32 *ring_index)
++#else
+ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
+                           const u8 *key)
++#endif
+ {
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_en_dev *mdev = priv->mdev;
+@@ -650,6 +660,7 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
+       mutex_unlock(&mdev->state_lock);
+       return err;
+ }
++#endif
+ #define all_zeros_or_all_ones(field)          \
+       ((field) == 0 || (field) == (__force typeof(field))-1)
+@@ -1058,8 +1069,13 @@ static int mlx4_en_get_num_flows(struct mlx4_en_priv *priv)
+ }
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+ static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+                            u32 *rule_locs)
++#else
++static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
++                           void *rule_locs)
++#endif
+ {
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_en_dev *mdev = priv->mdev;
+@@ -1087,7 +1103,11 @@ static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+               while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) {
+                       err = mlx4_en_get_flow(dev, cmd, i);
+                       if (!err)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+                               rule_locs[priority++] = i;
++#else
++                              ((u32 *)(rule_locs))[priority++] = i;
++#endif
+                       i++;
+               }
+               err = 0;
+@@ -1125,6 +1145,7 @@ static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+       return err;
+ }
++#if defined(HAVE_GET_SET_CHANNELS) || defined(HAVE_GET_SET_CHANNELS_EXT)
+ static void mlx4_en_get_channels(struct net_device *dev,
+                                struct ethtool_channels *channel)
+ {
+@@ -1174,8 +1195,12 @@ static int mlx4_en_set_channels(struct net_device *dev,
+       netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
+       netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
+       if (dev->num_tc)
+-              mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP);
++#else
++      if (netdev_get_num_tc(dev))
++#endif
++      mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP);
+       en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num);
+       en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num);
+@@ -1192,7 +1217,9 @@ out:
+       mutex_unlock(&mdev->state_lock);
+       return err;
+ }
++#endif
++#if defined(HAVE_GET_TS_INFO) || defined(HAVE_GET_TS_INFO_EXT)
+ static int mlx4_en_get_ts_info(struct net_device *dev,
+                              struct ethtool_ts_info *info)
+ {
+@@ -1224,6 +1251,7 @@ static int mlx4_en_get_ts_info(struct net_device *dev,
+       return ret;
+ }
++#endif
+ static int mlx4_en_set_priv_flags(struct net_device *dev, u32 flags)
+ {
+@@ -1267,6 +1295,7 @@ static u32 mlx4_en_get_priv_flags(struct net_device *dev)
+       return priv->pflags;
+ }
++#ifdef HAVE_GET_SET_TUNABLE
+ static int mlx4_en_get_tunable(struct net_device *dev,
+                              const struct ethtool_tunable *tuna,
+                              void *data)
+@@ -1308,7 +1337,7 @@ static int mlx4_en_set_tunable(struct net_device *dev,
+       return ret;
+ }
+-
++#endif
+ const struct ethtool_ops mlx4_en_ethtool_ops = {
+       .get_drvinfo = mlx4_en_get_drvinfo,
+@@ -1331,18 +1360,43 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
+       .set_ringparam = mlx4_en_set_ringparam,
+       .get_rxnfc = mlx4_en_get_rxnfc,
+       .set_rxnfc = mlx4_en_set_rxnfc,
++#if defined(HAVE_GET_SET_RXFH) && !defined(HAVE_GET_SET_RXFH_INDIR)
+       .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
+       .get_rxfh = mlx4_en_get_rxfh,
+       .set_rxfh = mlx4_en_set_rxfh,
++#endif
++#ifdef HAVE_GET_SET_CHANNELS
+       .get_channels = mlx4_en_get_channels,
+       .set_channels = mlx4_en_set_channels,
++#endif
++#if defined(HAVE_GET_TS_INFO) && !defined(HAVE_GET_TS_INFO_EXT)
+       .get_ts_info = mlx4_en_get_ts_info,
++#endif
+       .set_priv_flags = mlx4_en_set_priv_flags,
+       .get_priv_flags = mlx4_en_get_priv_flags,
++#ifdef HAVE_GET_SET_TUNABLE
+       .get_tunable            = mlx4_en_get_tunable,
+       .set_tunable            = mlx4_en_set_tunable,
++#endif
+ };
++#ifdef HAVE_ETHTOOL_OPS_EXT
++const struct ethtool_ops_ext mlx4_en_ethtool_ops_ext = {
++      .size = sizeof(struct ethtool_ops_ext),
++      .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
++#ifdef HAVE_GET_SET_RXFH_INDIR
++      .get_rxfh_indir = mlx4_en_get_rxfh_indir,
++      .set_rxfh_indir = mlx4_en_set_rxfh_indir,
++#endif
++#ifdef HAVE_GET_SET_CHANNELS_EXT
++      .get_channels = mlx4_en_get_channels,
++      .set_channels = mlx4_en_set_channels,
++#endif
++#ifdef HAVE_GET_TS_INFO_EXT
++      .get_ts_info = mlx4_en_get_ts_info,
++#endif
++};
++#endif
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+@@ -38,8 +38,12 @@
+ #include <linux/slab.h>
+ #include <linux/hash.h>
+ #include <net/ip.h>
++#ifdef HAVE_SKB_MARK_NAPI_ID
+ #include <net/busy_poll.h>
++#endif
++#ifdef CONFIG_MLX4_EN_VXLAN
+ #include <net/vxlan.h>
++#endif
+ #include <linux/mlx4/driver.h>
+ #include <linux/mlx4/device.h>
+@@ -99,6 +103,7 @@ static int mlx4_en_low_latency_recv(struct napi_struct *napi)
+ #ifdef CONFIG_RFS_ACCEL
++#ifdef HAVE_NDO_RX_FLOW_STEER
+ struct mlx4_en_filter {
+       struct list_head next;
+       struct work_struct work;
+@@ -275,10 +280,17 @@ static inline struct mlx4_en_filter *
+ mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
+                   u8 ip_proto, __be16 src_port, __be16 dst_port)
+ {
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0))
++      struct hlist_node *elem;
++#endif
+       struct mlx4_en_filter *filter;
+       struct mlx4_en_filter *ret = NULL;
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0))
++      hlist_for_each_entry(filter, elem,
++#else
+       hlist_for_each_entry(filter,
++#endif
+                            filter_hash_bucket(priv, src_ip, dst_ip,
+                                               src_port, dst_port),
+                            filter_chain) {
+@@ -407,9 +419,16 @@ static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
+               mlx4_en_filter_free(filter);
+ }
+ #endif
++#endif
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+ static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
+                                  __be16 proto, u16 vid)
++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
++static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
++#else
++static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
++#endif
+ {
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_en_dev *mdev = priv->mdev;
+@@ -431,11 +450,19 @@ static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
+               en_dbg(HW, priv, "failed adding vlan %d\n", vid);
+       mutex_unlock(&mdev->state_lock);
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
+       return 0;
++#endif
+ }
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
+                                   __be16 proto, u16 vid)
++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
++static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
++#else
++static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
++#endif
+ {
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_en_dev *mdev = priv->mdev;
+@@ -456,7 +483,9 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
+       }
+       mutex_unlock(&mdev->state_lock);
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
+       return 0;
++#endif
+ }
+ static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
+@@ -653,13 +682,21 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
+               mlx4_unregister_mac(dev, priv->port, mac);
+       } else {
+               struct mlx4_mac_entry *entry;
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0))
++              struct hlist_node *n, *tmp;
++#else
+               struct hlist_node *tmp;
++#endif
+               struct hlist_head *bucket;
+               unsigned int i;
+               for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
+                       bucket = &priv->mac_hash[i];
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0))
++                      hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) {
++#else
+                       hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
++#endif
+                               mac = mlx4_mac_to_u64(entry->mac);
+                               en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
+                                      entry->mac);
+@@ -696,11 +733,19 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
+               struct hlist_head *bucket;
+               unsigned int mac_hash;
+               struct mlx4_mac_entry *entry;
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0))
++              struct hlist_node *n, *tmp;
++#else
+               struct hlist_node *tmp;
++#endif
+               u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac);
+               bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0))
++              hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) {
++#else
+               hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
++#endif
+                       if (ether_addr_equal_64bits(entry->mac, prev_mac)) {
+                               mlx4_en_uc_steer_release(priv, entry->mac,
+                                                        qpn, entry->reg_id);
+@@ -789,17 +834,29 @@ static void mlx4_en_clear_list(struct net_device *dev)
+ static void mlx4_en_cache_mclist(struct net_device *dev)
+ {
+       struct mlx4_en_priv *priv = netdev_priv(dev);
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+       struct netdev_hw_addr *ha;
++#else
++      struct dev_mc_list *mclist;
++#endif
+       struct mlx4_en_mc_list *tmp;
+       mlx4_en_clear_list(dev);
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+       netdev_for_each_mc_addr(ha, dev) {
++#else
++      for (mclist = dev->mc_list; mclist; mclist = mclist->next) {
++#endif
+               tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
+               if (!tmp) {
+                       mlx4_en_clear_list(dev);
+                       return;
+               }
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+               memcpy(tmp->addr, ha->addr, ETH_ALEN);
++#else
++              memcpy(tmp->addr, mclist->dmi_addr, ETH_ALEN);
++#endif
+               list_add_tail(&tmp->list, &priv->mc_list);
+       }
+ }
+@@ -1107,7 +1164,11 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
+ {
+       struct netdev_hw_addr *ha;
+       struct mlx4_mac_entry *entry;
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0))
++      struct hlist_node *n, *tmp;
++#else
+       struct hlist_node *tmp;
++#endif
+       bool found;
+       u64 mac;
+       int err = 0;
+@@ -1123,7 +1184,11 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
+       /* find what to remove */
+       for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
+               bucket = &priv->mac_hash[i];
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0))
++              hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) {
++#else
+               hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
++#endif
+                       found = false;
+                       netdev_for_each_uc_addr(ha, dev) {
+                               if (ether_addr_equal_64bits(entry->mac,
+@@ -1167,7 +1232,11 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
+       netdev_for_each_uc_addr(ha, dev) {
+               found = false;
+               bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]];
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0))
++              hlist_for_each_entry(entry, n, bucket, hlist) {
++#else
+               hlist_for_each_entry(entry, bucket, hlist) {
++#endif
+                       if (ether_addr_equal_64bits(entry->mac, ha->addr)) {
+                               found = true;
+                               break;
+@@ -1249,7 +1318,11 @@ static void mlx4_en_do_set_rx_mode(struct work_struct *work)
+               }
+       }
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+       if (dev->priv_flags & IFF_UNICAST_FLT)
++#else
++      if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
++#endif
+               mlx4_en_do_uc_filter(priv, dev, mdev);
+       /* Promsicuous mode: disable all filters */
+@@ -1944,10 +2017,12 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
+ {
+       int i;
++#ifdef HAVE_NETDEV_RX_CPU_RMAP
+ #ifdef CONFIG_RFS_ACCEL
+       free_irq_cpu_rmap(priv->dev->rx_cpu_rmap);
+       priv->dev->rx_cpu_rmap = NULL;
+ #endif
++#endif
+       for (i = 0; i < priv->tx_ring_num; i++) {
+               if (priv->tx_ring && priv->tx_ring[i])
+@@ -2010,6 +2085,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
+                       goto err;
+       }
++#ifdef HAVE_NETDEV_RX_CPU_RMAP
+ #ifdef CONFIG_RFS_ACCEL
+       if (priv->mdev->dev->caps.comp_pool) {
+               priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool);
+@@ -2017,6 +2093,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
+                       goto err;
+       }
+ #endif
++#endif
+       return 0;
+@@ -2107,7 +2184,11 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
+       return 0;
+ }
++#ifdef HAVE_SIOCGHWTSTAMP
+ static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
++#else
++static int mlx4_en_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
++#endif
+ {
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_en_dev *mdev = priv->mdev;
+@@ -2166,6 +2247,7 @@ static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+                           sizeof(config)) ? -EFAULT : 0;
+ }
++#ifdef HAVE_SIOCGHWTSTAMP
+ static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+ {
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+@@ -2173,19 +2255,25 @@ static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+       return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config,
+                           sizeof(priv->hwtstamp_config)) ? -EFAULT : 0;
+ }
++#endif
+ static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+ {
+       switch (cmd) {
+       case SIOCSHWTSTAMP:
++#ifdef HAVE_SIOCGHWTSTAMP
+               return mlx4_en_hwtstamp_set(dev, ifr);
+       case SIOCGHWTSTAMP:
+               return mlx4_en_hwtstamp_get(dev, ifr);
++#else
++              return mlx4_en_hwtstamp_ioctl(dev, ifr);
++#endif
+       default:
+               return -EOPNOTSUPP;
+       }
+ }
++#ifdef HAVE_NDO_SET_FEATURES
+ static int mlx4_en_set_features(struct net_device *netdev,
+               netdev_features_t features)
+ {
+@@ -2202,6 +2290,7 @@ static int mlx4_en_set_features(struct net_device *netdev,
+       return 0;
+ }
++#endif
+ static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
+ {
+@@ -2223,6 +2312,7 @@ static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
+       return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos);
+ }
++#if defined(HAVE_VF_INFO_SPOOFCHK) || defined(HAVE_NETDEV_OPS_EXT_NDO_SET_VF_SPOOFCHK)
+ static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
+ {
+       struct mlx4_en_priv *en_priv = netdev_priv(dev);
+@@ -2230,6 +2320,7 @@ static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
+       return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting);
+ }
++#endif
+ static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf)
+ {
+@@ -2239,6 +2330,7 @@ static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_
+       return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf);
+ }
++#if defined(HAVE_NETDEV_OPS_NDO_SET_VF_LINK_STATE) || defined(HAVE_NETDEV_OPS_EXT_NDO_SET_VF_LINK_STATE)
+ static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state)
+ {
+       struct mlx4_en_priv *en_priv = netdev_priv(dev);
+@@ -2246,7 +2338,9 @@ static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_st
+       return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
+ }
++#endif
++#if defined(HAVE_NETDEV_NDO_GET_PHYS_PORT_ID) || defined(HAVE_NETDEV_EXT_NDO_GET_PHYS_PORT_ID)
+ #define PORT_ID_BYTE_LEN 8
+ static int mlx4_en_get_phys_port_id(struct net_device *dev,
+                                   struct netdev_phys_port_id *ppid)
+@@ -2266,6 +2360,7 @@ static int mlx4_en_get_phys_port_id(struct net_device *dev,
+       }
+       return 0;
+ }
++#endif
+ #ifdef CONFIG_MLX4_EN_VXLAN
+ static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
+@@ -2287,9 +2382,13 @@ out:
+       }
+       /* set offloads */
++#ifdef HAVE_NETDEV_HW_ENC_FEATURES
+       priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+                                     NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
++#endif
++#ifdef HAVE_NETDEV_HW_FEATURES
+       priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
++#endif
+       priv->dev->features    |= NETIF_F_GSO_UDP_TUNNEL;
+ }
+@@ -2299,9 +2398,13 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
+       struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
+                                                vxlan_del_task);
+       /* unset offloads */
++#ifdef HAVE_NETDEV_HW_ENC_FEATURES
+       priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+                                     NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL);
++#endif
++#ifdef HAVE_NETDEV_HW_FEATURES
+       priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
++#endif
+       priv->dev->features    &= ~NETIF_F_GSO_UDP_TUNNEL;
+       ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
+@@ -2379,15 +2482,25 @@ static const struct net_device_ops mlx4_netdev_ops = {
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = mlx4_en_netpoll,
+ #endif
++#ifdef HAVE_NDO_SET_FEATURES
+       .ndo_set_features       = mlx4_en_set_features,
++#endif
++#ifdef HAVE_NDO_SETUP_TC
+       .ndo_setup_tc           = mlx4_en_setup_tc,
++#endif
++#ifdef HAVE_NDO_RX_FLOW_STEER
+ #ifdef CONFIG_RFS_ACCEL
+       .ndo_rx_flow_steer      = mlx4_en_filter_rfs,
+ #endif
++#endif
+ #ifdef CONFIG_NET_RX_BUSY_POLL
++#ifndef HAVE_NETDEV_EXTENDED_NDO_BUSY_POLL
+       .ndo_busy_poll          = mlx4_en_low_latency_recv,
+ #endif
++#endif
++#ifdef HAVE_NETDEV_NDO_GET_PHYS_PORT_ID
+       .ndo_get_phys_port_id   = mlx4_en_get_phys_port_id,
++#endif
+ #ifdef CONFIG_MLX4_EN_VXLAN
+       .ndo_add_vxlan_port     = mlx4_en_add_vxlan_port,
+       .ndo_del_vxlan_port     = mlx4_en_del_vxlan_port,
+@@ -2395,6 +2508,13 @@ static const struct net_device_ops mlx4_netdev_ops = {
+ #endif
+ };
++#ifdef HAVE_NETDEV_EXT_NDO_GET_PHYS_PORT_ID
++static const struct net_device_ops_ext mlx4_netdev_ops_ext = {
++      .size                   = sizeof(struct net_device_ops_ext),
++      .ndo_get_phys_port_id   = mlx4_en_get_phys_port_id,
++};
++#endif
++
+ static const struct net_device_ops mlx4_netdev_ops_master = {
+       .ndo_open               = mlx4_en_open,
+       .ndo_stop               = mlx4_en_close,
+@@ -2410,18 +2530,30 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
+       .ndo_vlan_rx_kill_vid   = mlx4_en_vlan_rx_kill_vid,
+       .ndo_set_vf_mac         = mlx4_en_set_vf_mac,
+       .ndo_set_vf_vlan        = mlx4_en_set_vf_vlan,
++#ifdef HAVE_NETDEV_OPS_NDO_SET_VF_SPOOFCHK
+       .ndo_set_vf_spoofchk    = mlx4_en_set_vf_spoofchk,
++#endif
++#ifdef HAVE_NETDEV_OPS_NDO_SET_VF_LINK_STATE
+       .ndo_set_vf_link_state  = mlx4_en_set_vf_link_state,
++#endif
+       .ndo_get_vf_config      = mlx4_en_get_vf_config,
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = mlx4_en_netpoll,
+ #endif
++#ifdef HAVE_NDO_SET_FEATURES
+       .ndo_set_features       = mlx4_en_set_features,
++#endif
++#ifdef HAVE_NDO_SETUP_TC
+       .ndo_setup_tc           = mlx4_en_setup_tc,
++#endif
++#ifdef HAVE_NDO_RX_FLOW_STEER
+ #ifdef CONFIG_RFS_ACCEL
+       .ndo_rx_flow_steer      = mlx4_en_filter_rfs,
+ #endif
++#endif
++#ifdef HAVE_NETDEV_NDO_GET_PHYS_PORT_ID
+       .ndo_get_phys_port_id   = mlx4_en_get_phys_port_id,
++#endif
+ #ifdef CONFIG_MLX4_EN_VXLAN
+       .ndo_add_vxlan_port     = mlx4_en_add_vxlan_port,
+       .ndo_del_vxlan_port     = mlx4_en_del_vxlan_port,
+@@ -2429,6 +2561,21 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
+ #endif
+ };
++#ifdef HAVE_NET_DEVICE_OPS_EXT
++static const struct net_device_ops_ext mlx4_netdev_ops_master_ext = {
++      .size                   = sizeof(struct net_device_ops_ext),
++#ifdef HAVE_NETDEV_OPS_EXT_NDO_SET_VF_SPOOFCHK
++      .ndo_set_vf_spoofchk    = mlx4_en_set_vf_spoofchk,
++#endif
++#if defined(HAVE_NETDEV_OPS_EXT_NDO_SET_VF_LINK_STATE)
++      .ndo_set_vf_link_state  = mlx4_en_set_vf_link_state,
++#endif
++#ifdef HAVE_NETDEV_EXT_NDO_GET_PHYS_PORT_ID
++      .ndo_get_phys_port_id   = mlx4_en_get_phys_port_id,
++#endif
++};
++#endif
++
+ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+                       struct mlx4_en_port_profile *prof)
+ {
+@@ -2447,7 +2594,9 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+       netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
+       SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
++#ifdef HAVE_NET_DEVICE_DEV_PORT
+       dev->dev_port = port - 1;
++#endif
+       /*
+        * Initialize driver private data
+@@ -2560,19 +2709,41 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+       /*
+        * Initialize netdev entry points
+        */
++#ifdef HAVE_NET_DEVICE_OPS_EXT
++      if (mlx4_is_master(priv->mdev->dev)) {
++              set_netdev_ops_ext(dev, &mlx4_netdev_ops_master_ext);
++              dev->netdev_ops = &mlx4_netdev_ops_master;
++      } else {
++              set_netdev_ops_ext(dev, &mlx4_netdev_ops_ext);
++              dev->netdev_ops = &mlx4_netdev_ops;
++      }
++#else
+       if (mlx4_is_master(priv->mdev->dev))
+               dev->netdev_ops = &mlx4_netdev_ops_master;
+       else
+               dev->netdev_ops = &mlx4_netdev_ops;
++#endif
+       dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
+       netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
+       netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
++#ifdef HAVE_ETHTOOL_OPS_EXT
++      SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
++      set_ethtool_ops_ext(dev, &mlx4_en_ethtool_ops_ext);
++#else
+       dev->ethtool_ops = &mlx4_en_ethtool_ops;
++#endif
++
++#ifdef CONFIG_NET_RX_BUSY_POLL
++#ifdef HAVE_NETDEV_EXTENDED_NDO_BUSY_POLL
++      netdev_extended(dev)->ndo_busy_poll = mlx4_en_low_latency_recv;
++#endif
++#endif
+       /*
+        * Set driver features
+        */
++#ifdef HAVE_NETDEV_HW_FEATURES
+       dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+       if (mdev->LSO_support)
+               dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
+@@ -2588,9 +2759,37 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+       if (mdev->dev->caps.steering_mode ==
+           MLX4_STEERING_MODE_DEVICE_MANAGED)
+               dev->hw_features |= NETIF_F_NTUPLE;
++#else
++      dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
++
++      if (mdev->LSO_support)
++              dev->features |= NETIF_F_TSO | NETIF_F_TSO6;
++
++      dev->vlan_features = dev->features;
++
++      dev->features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
++#ifdef HAVE_SET_NETDEV_HW_FEATURES
++      set_netdev_hw_features(dev, dev->features);
++#endif
++      dev->features = dev->features | NETIF_F_HIGHDMA |
++                      NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
++                      NETIF_F_HW_VLAN_CTAG_FILTER;
++      dev->features |= NETIF_F_LOOPBACK;
++
++      if (mdev->dev->caps.steering_mode ==
++          MLX4_STEERING_MODE_DEVICE_MANAGED)
++#ifdef HAVE_NETDEV_EXTENDED_HW_FEATURES
++              netdev_extended(dev)->hw_features |= NETIF_F_NTUPLE;
++#else
++              dev->features |= NETIF_F_NTUPLE;
++#endif
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
+       if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
+               dev->priv_flags |= IFF_UNICAST_FLT;
++#endif
+       mdev->pndev[port] = dev;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+@@ -30,8 +30,9 @@
+  * SOFTWARE.
+  *
+  */
+-
++#ifdef HAVE_SKB_MARK_NAPI_ID
+ #include <net/busy_poll.h>
++#endif
+ #include <linux/mlx4/cq.h>
+ #include <linux/slab.h>
+ #include <linux/mlx4/qp.h>
+@@ -498,8 +499,10 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
+       kfree(ring);
+       *pring = NULL;
+ #ifdef CONFIG_RFS_ACCEL
++#ifdef HAVE_NDO_RX_FLOW_STEER
+       mlx4_en_cleanup_filters(priv);
+ #endif
++#endif
+ }
+ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
+@@ -588,7 +591,9 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
+               skb_copy_to_linear_data(skb, va, length);
+               skb->tail += length;
+       } else {
++#ifdef HAVE_ETH_GET_HEADLEN
+               unsigned int pull_len;
++#endif
+               /* Move relevant fragments to skb */
+               used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, frags,
+@@ -599,6 +604,7 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
+               }
+               skb_shinfo(skb)->nr_frags = used_frags;
++#ifdef HAVE_ETH_GET_HEADLEN
+               pull_len = eth_get_headlen(va, SMALL_PACKET_SIZE);
+               /* Copy headers into the skb linear buffer */
+               memcpy(skb->data, va, pull_len);
+@@ -610,6 +616,17 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
+               /* Adjust size of first fragment */
+               skb_frag_size_sub(&skb_shinfo(skb)->frags[0], pull_len);
+               skb->data_len = length - pull_len;
++#else
++              memcpy(skb->data, va, HEADER_COPY_SIZE);
++              skb->tail += HEADER_COPY_SIZE;
++
++              /* Skip headers in first fragment */
++              skb_shinfo(skb)->frags[0].page_offset += HEADER_COPY_SIZE;
++
++              /* Adjust size of first fragment */
++              skb_frag_size_sub(&skb_shinfo(skb)->frags[0], HEADER_COPY_SIZE);
++              skb->data_len = length - HEADER_COPY_SIZE;
++#endif
+       }
+       return skb;
+ }
+@@ -659,7 +676,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
+       int ip_summed;
+       int factor = priv->cqe_factor;
+       u64 timestamp;
++#ifdef HAVE_NETDEV_HW_ENC_FEATURES
+       bool l2_tunnel;
++#endif
+       if (!priv->port_up)
+               return 0;
+@@ -715,6 +734,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
+                       if (is_multicast_ether_addr(ethh->h_dest)) {
+                               struct mlx4_mac_entry *entry;
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0))
++                              struct hlist_node *n;
++#endif
+                               struct hlist_head *bucket;
+                               unsigned int mac_hash;
+@@ -722,7 +744,11 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
+                               mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX];
+                               bucket = &priv->mac_hash[mac_hash];
+                               rcu_read_lock();
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0))
++                              hlist_for_each_entry_rcu(entry, n, bucket, hlist) {
++#else
+                               hlist_for_each_entry_rcu(entry, bucket, hlist) {
++#endif
+                                       if (ether_addr_equal_64bits(entry->mac,
+                                                                   ethh->h_source)) {
+                                               rcu_read_unlock();
+@@ -740,8 +766,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
+               length -= ring->fcs_del;
+               ring->bytes += length;
+               ring->packets++;
++#ifdef HAVE_NETDEV_HW_ENC_FEATURES
+               l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
+                       (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
++#endif
+               if (likely(dev->features & NETIF_F_RXCSUM)) {
+                       if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
+@@ -754,8 +782,12 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
+                                * - not an IP fragment
+                                * - no LLS polling in progress
+                                */
++#ifdef HAVE_SKB_MARK_NAPI_ID
+                               if (!mlx4_en_cq_busy_polling(cq) &&
+                                   (dev->features & NETIF_F_GRO)) {
++#else
++                                      if (dev->features & NETIF_F_GRO) {
++#endif
+                                       struct sk_buff *gro_skb = napi_get_frags(&cq->napi);
+                                       if (!gro_skb)
+                                               goto next;
+@@ -771,23 +803,39 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
+                                       gro_skb->data_len = length;
+                                       gro_skb->ip_summed = CHECKSUM_UNNECESSARY;
++#ifdef HAVE_NETDEV_HW_ENC_FEATURES
+                                       if (l2_tunnel)
++#ifdef HAVE_SK_BUFF_CSUM_LEVEL
+                                               gro_skb->csum_level = 1;
++#else
++                                              gro_skb->encapsulation = 1;
++#endif
++#endif
+                                       if ((cqe->vlan_my_qpn &
+                                           cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) &&
+                                           (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
+                                               u16 vid = be16_to_cpu(cqe->sl_vid);
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0))
++                                              __vlan_hwaccel_put_tag(gro_skb, vid);
++#else
+                                               __vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid);
++#endif
+                                       }
+                                       if (dev->features & NETIF_F_RXHASH)
++#ifdef HAVE_SKB_SET_HASH
+                                               skb_set_hash(gro_skb,
+                                                            be32_to_cpu(cqe->immed_rss_invalid),
+                                                            PKT_HASH_TYPE_L3);
++#else
++                                      gro_skb->rxhash = be32_to_cpu(cqe->immed_rss_invalid);
++#endif
+                                       skb_record_rx_queue(gro_skb, cq->ring);
++#ifdef HAVE_SKB_MARK_NAPI_ID
+                                       skb_mark_napi_id(gro_skb, &cq->napi);
++#endif
+                                       if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
+                                               timestamp = mlx4_en_get_cqe_ts(cqe);
+@@ -826,18 +874,33 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
+               skb->protocol = eth_type_trans(skb, dev);
+               skb_record_rx_queue(skb, cq->ring);
++#ifdef HAVE_NETDEV_HW_ENC_FEATURES
++#ifdef HAVE_SK_BUFF_CSUM_LEVEL
+               if (l2_tunnel && ip_summed == CHECKSUM_UNNECESSARY)
+                       skb->csum_level = 1;
++#else
++              if (l2_tunnel)
++                      skb->encapsulation = 1;
++#endif
++#endif
++#ifdef HAVE_SKB_SET_HASH
+               if (dev->features & NETIF_F_RXHASH)
+                       skb_set_hash(skb,
+                                    be32_to_cpu(cqe->immed_rss_invalid),
+                                    PKT_HASH_TYPE_L3);
++#else
++                      skb->rxhash = be32_to_cpu(cqe->immed_rss_invalid);
++#endif
+               if ((be32_to_cpu(cqe->vlan_my_qpn) &
+                   MLX4_CQE_VLAN_PRESENT_MASK) &&
+                   (dev->features & NETIF_F_HW_VLAN_CTAG_RX))
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0))
++                      __vlan_hwaccel_put_tag(skb, be16_to_cpu(cqe->sl_vid));
++#else
+                       __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->sl_vid));
++#endif
+               if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
+                       timestamp = mlx4_en_get_cqe_ts(cqe);
+@@ -845,7 +908,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
+                                              timestamp);
+               }
++#ifdef HAVE_SKB_MARK_NAPI_ID
+               skb_mark_napi_id(skb, &cq->napi);
++#endif
+               if (!mlx4_en_cq_busy_polling(cq))
+                       napi_gro_receive(&cq->napi, skb);
+@@ -893,20 +958,30 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       int done;
++#ifdef HAVE_SKB_MARK_NAPI_ID
+       if (!mlx4_en_cq_lock_napi(cq))
+               return budget;
++#endif
+       done = mlx4_en_process_rx_cq(dev, cq, budget);
++#ifdef HAVE_SKB_MARK_NAPI_ID
+       mlx4_en_cq_unlock_napi(cq);
++#endif
+       /* If we used up all the quota - we're probably not done yet... */
++#ifndef HAVE_IRQ_DESC_GET_IRQ_DATA
++      cq->tot_rx += done;
++#endif
+       if (done == budget) {
++#ifdef HAVE_IRQ_DESC_GET_IRQ_DATA
+               int cpu_curr;
+               const struct cpumask *aff;
++#endif
+               INC_PERF_COUNTER(priv->pstats.napi_quota);
++#ifdef HAVE_IRQ_DESC_GET_IRQ_DATA
+               cpu_curr = smp_processor_id();
+               aff = irq_desc_get_irq_data(cq->irq_desc)->affinity;
+@@ -919,6 +994,13 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
+                       mlx4_en_arm_cq(priv, cq);
+                       return 0;
+               }
++#else
++              if (cq->tot_rx < MLX4_EN_MIN_RX_ARM)
++                      return budget;
++
++              cq->tot_rx = 0;
++              return 0;
++#endif
+       } else {
+               /* Done for now */
+               napi_complete(napi);
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+@@ -151,7 +151,11 @@ err_bounce:
+       kfree(ring->bounce_buf);
+       ring->bounce_buf = NULL;
+ err_info:
++#ifdef HAVE_KVFREE
+       kvfree(ring->tx_info);
++#else
++      vfree(ring->tx_info);
++#endif
+       ring->tx_info = NULL;
+ err_ring:
+       kfree(ring);
+@@ -174,7 +178,11 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
+       mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
+       kfree(ring->bounce_buf);
+       ring->bounce_buf = NULL;
++#ifdef HAVE_KVFREE
+       kvfree(ring->tx_info);
++#else
++      vfree(ring->tx_info);
++#endif
+       ring->tx_info = NULL;
+       kfree(ring);
+       *pring = NULL;
+@@ -328,7 +336,11 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
+                       }
+               }
+       }
++#ifdef HAVE_DEV_CONSUME_SKB_ANY
+       dev_consume_skb_any(skb);
++#else
++      dev_kfree_skb_any(skb);
++#endif
+       return tx_info->nr_txbb;
+ }
+@@ -392,7 +404,13 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
+       if (!priv->port_up)
+               return true;
++#ifdef HAVE_NETDEV_TXQ_BQL_PREFETCHW
+       netdev_txq_bql_complete_prefetchw(ring->tx_queue);
++#else
++#ifdef CONFIG_BQL
++      prefetchw(&ring->tx_queue->dql.limit);
++#endif
++#endif
+       index = cons_index & size_mask;
+       cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor;
+@@ -582,9 +600,11 @@ static int get_real_size(const struct sk_buff *skb,
+       if (shinfo->gso_size) {
+               *inline_ok = false;
++#ifdef HAVE_SKB_INNER_TRANSPORT_HEADER
+               if (skb->encapsulation)
+                       *lso_header_size = (skb_inner_transport_header(skb) - skb->data) + inner_tcp_hdrlen(skb);
+               else
++#endif
+                       *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
+               real_size = CTRL_SIZE + shinfo->nr_frags * DS_SIZE +
+                       ALIGN(*lso_header_size + 4, DS_SIZE);
+@@ -664,20 +684,36 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc,
+       }
+ }
++#if defined(NDO_SELECT_QUEUE_HAS_ACCEL_PRIV) || defined(HAVE_SELECT_QUEUE_FALLBACK_T)
+ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
++#ifdef HAVE_SELECT_QUEUE_FALLBACK_T
+                        void *accel_priv, select_queue_fallback_t fallback)
++#else
++                       void *accel_priv)
++#endif
++#else /* NDO_SELECT_QUEUE_HAS_ACCEL_PRIV || HAVE_SELECT_QUEUE_FALLBACK_T */
++u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
++#endif
+ {
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       u16 rings_p_up = priv->num_tx_rings_p_up;
+       u8 up = 0;
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
+       if (dev->num_tc)
++#else
++      if (netdev_get_num_tc(dev))
++#endif
+               return skb_tx_hash(dev, skb);
+       if (vlan_tx_tag_present(skb))
+               up = vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT;
++#ifdef HAVE_SELECT_QUEUE_FALLBACK_T
+       return fallback(dev, skb) % rings_p_up + up * rings_p_up;
++#else
++      return __netdev_pick_tx(dev, skb) % rings_p_up + up * rings_p_up;
++#endif
+ }
+ static void mlx4_bf_copy(void __iomem *dst, const void *src,
+@@ -737,8 +773,13 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+       if (vlan_tx_tag_present(skb))
+               vlan_tag = vlan_tx_tag_get(skb);
+-
++#ifdef HAVE_NETDEV_TXQ_BQL_PREFETCHW
+       netdev_txq_bql_enqueue_prefetchw(ring->tx_queue);
++#else
++#ifdef CONFIG_BQL
++      prefetchw(&ring->tx_queue->dql);
++#endif
++#endif
+       /* Track current inflight packets for performance analysis */
+       AVG_PERF_COUNTER(priv->pstats.inflight_avg,
+@@ -827,8 +868,13 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+        */
+       tx_info->ts_requested = 0;
+       if (unlikely(ring->hwtstamp_tx_type == HWTSTAMP_TX_ON &&
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)
+                    shinfo->tx_flags & SKBTX_HW_TSTAMP)) {
+               shinfo->tx_flags |= SKBTX_IN_PROGRESS;
++#else
++          shinfo->tx_flags.flags & SKBTX_HW_TSTAMP)) {
++              shinfo->tx_flags.flags |= SKBTX_IN_PROGRESS;
++#endif
+               tx_info->ts_requested = 1;
+       }
+@@ -894,6 +940,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+               build_inline_wqe(tx_desc, skb, shinfo, real_size, &vlan_tag,
+                                tx_ind, fragptr);
++#ifdef HAVE_SKB_INNER_NETWORK_HEADER
+       if (skb->encapsulation) {
+               struct iphdr *ipv4 = (struct iphdr *)skb_inner_network_header(skb);
+               if (ipv4->protocol == IPPROTO_TCP || ipv4->protocol == IPPROTO_UDP)
+@@ -901,6 +948,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+               else
+                       op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP);
+       }
++#endif
+       ring->prod += nr_txbb;
+@@ -917,7 +965,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+               netif_tx_stop_queue(ring->tx_queue);
+               ring->queue_stopped++;
+       }
++#ifdef HAVE_SK_BUFF_XMIT_MORE
+       send_doorbell = !skb->xmit_more || netif_xmit_stopped(ring->tx_queue);
++#else
++      send_doorbell = true;
++#endif
+       real_size = (real_size / 16) & 0x3f;
+@@ -956,8 +1008,10 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+                       wmb();
+                       iowrite32(ring->doorbell_qpn,
+                                 ring->bf.uar->map + MLX4_SEND_DOORBELL);
++#ifdef HAVE_SK_BUFF_XMIT_MORE
+               } else {
+                       ring->xmit_more++;
++#endif
+               }
+       }
+diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
++++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
+@@ -461,7 +461,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
+       int i;
+       enum slave_port_gen_event gen_event;
+       unsigned long flags;
++#ifdef HAVE_LINKSTATE
+       struct mlx4_vport_state *s_info;
++#endif
+       int eqe_size = dev->caps.eqe_size;
+       while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor, eqe_size))) {
+@@ -568,6 +570,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
+                                                       continue;
+                                               mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n",
+                                                        __func__, i, port);
++#ifdef HAVE_LINKSTATE
+                                               s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
+                                               if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
+                                                       eqe->event.port_change.port =
+@@ -576,6 +579,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
+                                                               | (mlx4_phys_to_slave_port(dev, i, port) << 28));
+                                                       mlx4_slave_event(dev, i, eqe);
+                                               }
++#else
++                                              mlx4_slave_event(dev, i, eqe);
++#endif
+                                       } else {  /* IB port */
+                                               set_and_calc_slave_port_state(dev, i, port,
+                                                                             MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
+@@ -601,6 +607,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
+                                                       continue;
+                                               if (i == mlx4_master_func_num(dev))
+                                                       continue;
++#ifdef HAVE_LINKSTATE
+                                               s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
+                                               if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
+                                                       eqe->event.port_change.port =
+@@ -609,6 +616,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
+                                                               | (mlx4_phys_to_slave_port(dev, i, port) << 28));
+                                                       mlx4_slave_event(dev, i, eqe);
+                                               }
++#else
++                                              mlx4_slave_event(dev, i, eqe);
++#endif
+                                       }
+                               else /* IB port */
+                                       /* port-up event will be sent to a slave when the
+diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx4/main.c
++++ b/drivers/net/ethernet/mellanox/mlx4/main.c
+@@ -2057,6 +2057,9 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
+       int nreq = min_t(int, dev->caps.num_ports *
+                        min_t(int, num_online_cpus() + 1,
+                              MAX_MSIX_P_PORT) + MSIX_LEGACY_SZ, MAX_MSIX);
++#ifndef HAVE_PCI_ENABLE_MSIX_RANGE
++      int err;
++#endif
+       int i;
+       if (msi_x) {
+@@ -2070,6 +2073,8 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
+               for (i = 0; i < nreq; ++i)
+                       entries[i].entry = i;
++
++#ifdef HAVE_PCI_ENABLE_MSIX_RANGE
+               nreq = pci_enable_msix_range(dev->pdev, entries, 2, nreq);
+               if (nreq < 0) {
+@@ -2077,6 +2082,25 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
+                       goto no_msi;
+               } else if (nreq < MSIX_LEGACY_SZ +
+                          dev->caps.num_ports * MIN_MSIX_P_PORT) {
++#else
++      retry:
++              err = pci_enable_msix(dev->pdev, entries, nreq);
++              if (err) {
++                      /* Try again if at least 2 vectors are available */
++                      if (err > 1) {
++                              mlx4_info(dev, "Requested %d vectors, "
++                                              "but only %d MSI-X vectors available, "
++                                              "trying again\n", nreq, err);
++                              nreq = err;
++                              goto retry;
++                      }
++                      kfree(entries);
++                      goto no_msi;
++              }
++
++              if (nreq <
++                              MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) {
++#endif
+                       /*Working in legacy mode , all EQ's shared*/
+                       dev->caps.comp_pool           = 0;
+                       dev->caps.num_comp_vectors = nreq - 1;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+@@ -111,6 +111,10 @@ enum {
+       FRAG_SZ3 = MLX4_EN_ALLOC_SIZE
+ };
+ #define MLX4_EN_MAX_RX_FRAGS  4
++#ifndef HAVE_IRQ_DESC_GET_IRQ_DATA
++/* Minimum packet number till arming the CQ */
++#define MLX4_EN_MIN_RX_ARM    2097152
++#endif
+ /* Maximum ring sizes */
+ #define MLX4_EN_MAX_TX_SIZE   8192
+@@ -344,6 +348,9 @@ struct mlx4_en_cq {
+       u16 moder_cnt;
+       struct mlx4_cqe *buf;
+ #define MLX4_EN_OPCODE_ERROR  0x1e
++#ifndef HAVE_IRQ_DESC_GET_IRQ_DATA
++      u32 tot_rx;
++#endif
+ #ifdef CONFIG_NET_RX_BUSY_POLL
+       unsigned int state;
+@@ -763,8 +770,16 @@ int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
+ int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
+ void mlx4_en_tx_irq(struct mlx4_cq *mcq);
++#if defined(NDO_SELECT_QUEUE_HAS_ACCEL_PRIV) || defined(HAVE_SELECT_QUEUE_FALLBACK_T)
+ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
++#ifdef HAVE_SELECT_QUEUE_FALLBACK_T
+                        void *accel_priv, select_queue_fallback_t fallback);
++#else
++                       void *accel_priv);
++#endif
++#else /* NDO_SELECT_QUEUE_HAS_ACCEL_PRIV */
++u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
++#endif
+ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
+ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
+@@ -822,8 +837,10 @@ extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops;
+ int mlx4_en_setup_tc(struct net_device *dev, u8 up);
+ #ifdef CONFIG_RFS_ACCEL
++#ifdef HAVE_NDO_RX_FLOW_STEER
+ void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv);
+ #endif
++#endif
+ #define MLX4_EN_NUM_SELF_TEST 5
+ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
+@@ -845,7 +862,9 @@ int mlx4_en_timestamp_config(struct net_device *dev,
+ /* Globals
+  */
+ extern const struct ethtool_ops mlx4_en_ethtool_ops;
+-
++#ifdef HAVE_ETHTOOL_OPS_EXT
++extern const struct ethtool_ops_ext mlx4_en_ethtool_ops_ext;
++#endif
+ /*
+diff --git a/include/linux/mlx4/cq.h b/include/linux/mlx4/cq.h
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/include/linux/mlx4/cq.h
++++ b/include/linux/mlx4/cq.h
+@@ -34,7 +34,12 @@
+ #define MLX4_CQ_H
+ #include <linux/types.h>
++#ifdef HAVE_UAPI_LINUX_IF_ETHER_H
+ #include <uapi/linux/if_ether.h>
++#else
++#include <linux/if_ether.h>
++#endif
++
+ #include <linux/mlx4/device.h>
+ #include <linux/mlx4/doorbell.h>
index 1c3b5e39480bd35f2281f5b41558dd8ac97a7342..fd522bfb26087a15ad01fac245ba230ae04eb15e 100644 (file)
@@ -12,14 +12,16 @@ Signed-off-by: Vladimir Sokolovsky <vlad@mellanox.com>
  drivers/infiniband/core/cma.c         |   45 ++++++++++++++
  drivers/infiniband/core/fmr_pool.c    |    7 ++
  drivers/infiniband/core/netlink.c     |   20 ++++++
- drivers/infiniband/core/sa_query.c    |   1+++++
+ drivers/infiniband/core/sa_query.c    |   19 ++++++
  drivers/infiniband/core/ucm.c         |   38 ++++++++++++
  drivers/infiniband/core/ucma.c        |   76 ++++++++++++++++++++++++
  drivers/infiniband/core/umem.c        |   16 +++++
  drivers/infiniband/core/user_mad.c    |   16 +++++
  drivers/infiniband/core/uverbs_cmd.c  |   51 ++++++++++++++++
  drivers/infiniband/core/uverbs_main.c |   40 +++++++++++++
- 12 files changed, 452 insertions(+), 0 deletions(-)
+ include/rdma/ib_pack.h                |    4 +
+ include/rdma/ib_verbs.h               |    4 +
+ 14 files changed, 464 insertions(+), 0 deletions(-)
 
 diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
 index xxxxxxx..xxxxxxx xxxxxx
@@ -449,7 +451,19 @@ diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_que
 index xxxxxxx..xxxxxxx xxxxxx
 --- a/drivers/infiniband/core/sa_query.c
 +++ b/drivers/infiniband/core/sa_query.c
-@@ -618,10 +618,13 @@ static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent)
+@@ -42,7 +42,11 @@
+ #include <linux/kref.h>
+ #include <linux/idr.h>
+ #include <linux/workqueue.h>
++#ifdef HAVE_UAPI_LINUX_IF_ETHER_H
+ #include <uapi/linux/if_ether.h>
++#else
++#include <linux/if_ether.h>
++#endif
+ #include <rdma/ib_pack.h>
+ #include <rdma/ib_cache.h>
+ #include "sa.h"
+@@ -618,10 +622,13 @@ static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent)
  
  static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
  {
@@ -463,7 +477,7 @@ index xxxxxxx..xxxxxxx xxxxxx
        if (preload)
                idr_preload(gfp_mask);
        spin_lock_irqsave(&idr_lock, flags);
-@@ -633,6 +636,18 @@ static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
+@@ -633,6 +640,18 @@ static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
                idr_preload_end();
        if (id < 0)
                return id;
@@ -1064,3 +1078,35 @@ index xxxxxxx..xxxxxxx xxxxxx
        if (ret) {
                printk(KERN_ERR "user_verbs: couldn't create abi_version attribute\n");
                goto out_class;
+diff --git a/include/rdma/ib_pack.h b/include/rdma/ib_pack.h
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/include/rdma/ib_pack.h
++++ b/include/rdma/ib_pack.h
+@@ -34,7 +34,11 @@
+ #define IB_PACK_H
+ #include <rdma/ib_verbs.h>
++#ifdef HAVE_UAPI_LINUX_IF_ETHER_H
+ #include <uapi/linux/if_ether.h>
++#else
++#include <linux/if_ether.h>
++#endif
+ enum {
+       IB_LRH_BYTES  = 8,
+diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/include/rdma/ib_verbs.h
++++ b/include/rdma/ib_verbs.h
+@@ -48,7 +48,11 @@
+ #include <linux/rwsem.h>
+ #include <linux/scatterlist.h>
+ #include <linux/workqueue.h>
++#ifdef HAVE_UAPI_LINUX_IF_ETHER_H
+ #include <uapi/linux/if_ether.h>
++#else
++#include <linux/if_ether.h>
++#endif
+ #include <linux/atomic.h>
+ #include <asm/uaccess.h>