--- /dev/null
+From: Vladimir Sokolovsky <vlad@mellanox.com>
+Subject: [PATCH] BACKPORT: mlx5
+
+Signed-off-by: Vladimir Sokolovsky <vlad@mellanox.com>
+---
+ drivers/infiniband/hw/mlx5/gsi.c | 37 +++
+ drivers/infiniband/hw/mlx5/ib_virt.c | 2 +
+ drivers/infiniband/hw/mlx5/main.c | 10 +
+ drivers/infiniband/hw/mlx5/mlx5_ib.h | 2 +
+ drivers/net/ethernet/mellanox/mlx5/core/Makefile | 7 +-
+ drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 32 ++
+ drivers/net/ethernet/mellanox/mlx5/core/en.h | 18 ++
+ .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 354 +++++++++++++++++++++
+ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 145 ++++++++-
+ drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 25 ++
+ drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 4 +
+ drivers/net/ethernet/mellanox/mlx5/core/en_tc.h | 6 +
+ drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 47 +++
+ drivers/net/ethernet/mellanox/mlx5/core/eq.c | 6 +
+ drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 4 +
+ .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 2 +
+ drivers/net/ethernet/mellanox/mlx5/core/main.c | 40 +++
+ drivers/net/ethernet/mellanox/mlx5/core/port.c | 22 ++
+ drivers/net/ethernet/mellanox/mlx5/core/sriov.c | 8 +
+ include/linux/mlx5/driver.h | 5 +
+ include/linux/mlx5/port.h | 5 +
+ 21 files changed, 778 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/infiniband/hw/mlx5/gsi.c b/drivers/infiniband/hw/mlx5/gsi.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/infiniband/hw/mlx5/gsi.c
++++ b/drivers/infiniband/hw/mlx5/gsi.c
+@@ -32,12 +32,14 @@
+
+ #include "mlx5_ib.h"
+
++#if defined(HAVE_IRQ_POLL_H) && IS_ENABLED(CONFIG_IRQ_POLL)
+ struct mlx5_ib_gsi_wr {
+ struct ib_cqe cqe;
+ struct ib_wc wc;
+ int send_flags;
+ bool completed:1;
+ };
++#endif
+
+ struct mlx5_ib_gsi_qp {
+ struct ib_qp ibqp;
+@@ -47,9 +49,11 @@ struct mlx5_ib_gsi_qp {
+ enum ib_sig_type sq_sig_type;
+ /* Serialize qp state modifications */
+ struct mutex mutex;
++#if defined(HAVE_IRQ_POLL_H) && IS_ENABLED(CONFIG_IRQ_POLL)
+ struct ib_cq *cq;
+ struct mlx5_ib_gsi_wr *outstanding_wrs;
+ u32 outstanding_pi, outstanding_ci;
++#endif
+ int num_qps;
+ /* Protects access to the tx_qps. Post send operations synchronize
+ * with tx_qp creation in setup_qp(). Also protects the
+@@ -69,6 +73,7 @@ static bool mlx5_ib_deth_sqpn_cap(struct mlx5_ib_dev *dev)
+ return MLX5_CAP_GEN(dev->mdev, set_deth_sqpn);
+ }
+
++#if defined(HAVE_IRQ_POLL_H) && IS_ENABLED(CONFIG_IRQ_POLL)
+ /* Call with gsi->lock locked */
+ static void generate_completions(struct mlx5_ib_gsi_qp *gsi)
+ {
+@@ -111,6 +116,7 @@ static void handle_single_completion(struct ib_cq *cq, struct ib_wc *wc)
+ generate_completions(gsi);
+ spin_unlock_irqrestore(&gsi->lock, flags);
+ }
++#endif
+
+ struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
+ struct ib_qp_init_attr *init_attr)
+@@ -142,6 +148,7 @@ struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
+ goto err_free;
+ }
+
++#if defined(HAVE_IRQ_POLL_H) && IS_ENABLED(CONFIG_IRQ_POLL)
+ gsi->outstanding_wrs = kcalloc(init_attr->cap.max_send_wr,
+ sizeof(*gsi->outstanding_wrs),
+ GFP_KERNEL);
+@@ -149,6 +156,7 @@ struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
+ ret = -ENOMEM;
+ goto err_free_tx;
+ }
++#endif
+
+ mutex_init(&gsi->mutex);
+
+@@ -158,7 +166,11 @@ struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
+ mlx5_ib_warn(dev, "GSI QP already exists on port %d\n",
+ port_num);
+ ret = -EBUSY;
++#if defined(HAVE_IRQ_POLL_H) && IS_ENABLED(CONFIG_IRQ_POLL)
+ goto err_free_wrs;
++#else
++ goto err_free_tx;
++#endif
+ }
+ gsi->num_qps = num_qps;
+ spin_lock_init(&gsi->lock);
+@@ -168,6 +180,7 @@ struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
+ gsi->ibqp.qp_num = 1;
+ gsi->port_num = port_num;
+
++#if defined(HAVE_IRQ_POLL_H) && IS_ENABLED(CONFIG_IRQ_POLL)
+ gsi->cq = ib_alloc_cq(pd->device, gsi, init_attr->cap.max_send_wr, 0,
+ IB_POLL_SOFTIRQ);
+ if (IS_ERR(gsi->cq)) {
+@@ -176,9 +189,12 @@ struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
+ ret = PTR_ERR(gsi->cq);
+ goto err_free_wrs;
+ }
++#endif
+
+ hw_init_attr.qp_type = MLX5_IB_QPT_HW_GSI;
++#if defined(HAVE_IRQ_POLL_H) && IS_ENABLED(CONFIG_IRQ_POLL)
+ hw_init_attr.send_cq = gsi->cq;
++#endif
+ if (num_qps) {
+ hw_init_attr.cap.max_send_wr = 0;
+ hw_init_attr.cap.max_send_sge = 0;
+@@ -189,7 +205,11 @@ struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
+ mlx5_ib_warn(dev, "unable to create hardware GSI QP. error %ld\n",
+ PTR_ERR(gsi->rx_qp));
+ ret = PTR_ERR(gsi->rx_qp);
++#if defined(HAVE_IRQ_POLL_H) && IS_ENABLED(CONFIG_IRQ_POLL)
+ goto err_destroy_cq;
++#else
++ goto err_free_tx;
++#endif
+ }
+
+ dev->devr.ports[init_attr->port_num - 1].gsi = gsi;
+@@ -198,12 +218,17 @@ struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
+
+ return &gsi->ibqp;
+
++#if defined(HAVE_IRQ_POLL_H) && IS_ENABLED(CONFIG_IRQ_POLL)
+ err_destroy_cq:
+ ib_free_cq(gsi->cq);
+ err_free_wrs:
+ mutex_unlock(&dev->devr.mutex);
+ kfree(gsi->outstanding_wrs);
+ err_free_tx:
++#else
++err_free_tx:
++ mutex_unlock(&dev->devr.mutex);
++#endif
+ kfree(gsi->tx_qps);
+ err_free:
+ kfree(gsi);
+@@ -239,9 +264,11 @@ int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp)
+ gsi->tx_qps[qp_index] = NULL;
+ }
+
++#if defined(HAVE_IRQ_POLL_H) && IS_ENABLED(CONFIG_IRQ_POLL)
+ ib_free_cq(gsi->cq);
+
+ kfree(gsi->outstanding_wrs);
++#endif
+ kfree(gsi->tx_qps);
+ kfree(gsi);
+
+@@ -254,7 +281,11 @@ static struct ib_qp *create_gsi_ud_qp(struct mlx5_ib_gsi_qp *gsi)
+ struct ib_qp_init_attr init_attr = {
+ .event_handler = gsi->rx_qp->event_handler,
+ .qp_context = gsi->rx_qp->qp_context,
++#if defined(HAVE_IRQ_POLL_H) && IS_ENABLED(CONFIG_IRQ_POLL)
+ .send_cq = gsi->cq,
++#else
++ .send_cq = gsi->rx_qp->send_cq,
++#endif
+ .recv_cq = gsi->rx_qp->recv_cq,
+ .cap = {
+ .max_send_wr = gsi->cap.max_send_wr,
+@@ -410,6 +441,7 @@ int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
+ return ret;
+ }
+
++#if defined(HAVE_IRQ_POLL_H) && IS_ENABLED(CONFIG_IRQ_POLL)
+ /* Call with gsi->lock locked */
+ static int mlx5_ib_add_outstanding_wr(struct mlx5_ib_gsi_qp *gsi,
+ struct ib_ud_wr *wr, struct ib_wc *wc)
+@@ -476,11 +508,13 @@ static struct ib_qp *get_tx_qp(struct mlx5_ib_gsi_qp *gsi, struct ib_ud_wr *wr)
+
+ return gsi->tx_qps[qp_index];
+ }
++#endif
+
+ int mlx5_ib_gsi_post_send(struct ib_qp *qp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+ {
+ struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
++#if defined(HAVE_IRQ_POLL_H) && IS_ENABLED(CONFIG_IRQ_POLL)
+ struct ib_qp *tx_qp;
+ unsigned long flags;
+ int ret;
+@@ -520,6 +554,9 @@ err:
+ spin_unlock_irqrestore(&gsi->lock, flags);
+ *bad_wr = wr;
+ return ret;
++#else
++ return ib_post_send(gsi->rx_qp, wr, bad_wr);
++#endif
+ }
+
+ int mlx5_ib_gsi_post_recv(struct ib_qp *qp, struct ib_recv_wr *wr,
+diff --git a/drivers/infiniband/hw/mlx5/ib_virt.c b/drivers/infiniband/hw/mlx5/ib_virt.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/infiniband/hw/mlx5/ib_virt.c
++++ b/drivers/infiniband/hw/mlx5/ib_virt.c
+@@ -30,6 +30,7 @@
+ * SOFTWARE.
+ */
+
++#ifdef HAVE_NDO_SET_VF_GUID
+ #include <linux/module.h>
+ #include <linux/mlx5/vport.h>
+ #include "mlx5_ib.h"
+@@ -192,3 +193,4 @@ int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
+
+ return -EINVAL;
+ }
++#endif
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -1268,8 +1268,16 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
+ case MLX5_IB_MMAP_WC_PAGE:
+ /* Some architectures don't support WC memory */
+ #if defined(CONFIG_X86)
++#ifdef HAVE_PAT_ENABLED_EXPORTED
++#ifdef HAVE_PAT_ENABLED_FUNCTION_X86
+ if (!pat_enabled())
++#else
++ if (!pat_enabled)
++#endif
++ return -EPERM;
++#else /* HAVE_PAT_ENABLED_EXPORTED */
+ return -EPERM;
++#endif
+ #elif !(defined(CONFIG_PPC) || (defined(CONFIG_ARM) && defined(CONFIG_MMU)))
+ return -EPERM;
+ #endif
+@@ -2781,12 +2789,14 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
+ dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
+ dev->ib_dev.get_port_immutable = mlx5_port_immutable;
+ dev->ib_dev.get_dev_fw_str = get_dev_fw_str;
++#ifdef HAVE_NDO_SET_VF_GUID
+ if (mlx5_core_is_pf(mdev)) {
+ dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config;
+ dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state;
+ dev->ib_dev.get_vf_stats = mlx5_ib_get_vf_stats;
+ dev->ib_dev.set_vf_guid = mlx5_ib_set_vf_guid;
+ }
++#endif
+
+ dev->ib_dev.disassociate_ucontext = mlx5_ib_disassociate_ucontext;
+
+diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
++++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
+@@ -869,6 +869,7 @@ static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) {}
+
+ #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
+
++#ifdef HAVE_NDO_SET_VF_GUID
+ int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
+ u8 port, struct ifla_vf_info *info);
+ int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
+@@ -877,6 +878,7 @@ int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
+ u8 port, struct ifla_vf_stats *stats);
+ int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
+ u64 guid, int type);
++#endif
+
+ __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
+ int index);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
++++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+@@ -5,9 +5,12 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
+ mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \
+ fs_counters.o rl.o
+
+-mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o eswitch_offloads.o \
++mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o \
+ en_main.o en_common.o en_fs.o en_ethtool.o en_tx.o \
+ en_rx.o en_rx_am.o en_txrx.o en_clock.o vxlan.o \
+- en_tc.o en_arfs.o en_rep.o en_fs_ethtool.o
++ en_tc.o en_arfs.o en_fs_ethtool.o
++
++mlx5_core-$(CONFIG_NET_SWITCHDEV) += eswitch.o eswitch_offloads.o \
++ en_rep.o
+
+ mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+@@ -687,7 +687,11 @@ static void cmd_work_handler(struct work_struct *work)
+ lay->status_own = CMD_OWNER_HW;
+ set_signature(ent, !cmd->checksum_disabled);
+ dump_command(dev, ent, 1);
++#ifdef HAVE_KTIME_GET_NS
+ ent->ts1 = ktime_get_ns();
++#else
++ ktime_get_ts(&ent->ts1);
++#endif
+
+ if (ent->callback)
+ schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
+@@ -785,6 +789,9 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
+ struct mlx5_cmd *cmd = &dev->cmd;
+ struct mlx5_cmd_work_ent *ent;
+ struct mlx5_cmd_stats *stats;
++#ifndef HAVE_KTIME_GET_NS
++ ktime_t t1, t2, delta;
++#endif
+ int err = 0;
+ s64 ds;
+ u16 op;
+@@ -819,7 +826,14 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
+ if (err == -ETIMEDOUT)
+ goto out_free;
+
++#ifdef HAVE_KTIME_GET_NS
+ ds = ent->ts2 - ent->ts1;
++#else
++ t1 = timespec_to_ktime(ent->ts1);
++ t2 = timespec_to_ktime(ent->ts2);
++ delta = ktime_sub(t2, t1);
++ ds = ktime_to_ns(delta);
++#endif
+ op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
+ if (op < ARRAY_SIZE(cmd->stats)) {
+ stats = &cmd->stats[op];
+@@ -1272,6 +1286,9 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
+ void *context;
+ int err;
+ int i;
++#ifndef HAVE_KTIME_GET_NS
++ ktime_t t1, t2, delta;
++#endif
+ s64 ds;
+ struct mlx5_cmd_stats *stats;
+ unsigned long flags;
+@@ -1285,12 +1302,20 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
+
+ ent = cmd->ent_arr[i];
+ if (ent->callback)
++#ifdef HAVE___CANCEL_DELAYED_WORK
++ __cancel_delayed_work(&ent->cb_timeout_work);
++#else
+ cancel_delayed_work(&ent->cb_timeout_work);
++#endif
+ if (ent->page_queue)
+ sem = &cmd->pages_sem;
+ else
+ sem = &cmd->sem;
++#ifdef HAVE_KTIME_GET_NS
+ ent->ts2 = ktime_get_ns();
++#else
++ ktime_get_ts(&ent->ts2);
++#endif
+ memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
+ dump_command(dev, ent, 0);
+ if (!ent->ret) {
+@@ -1309,7 +1334,14 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
+ free_ent(cmd, ent->idx);
+
+ if (ent->callback) {
++#ifdef HAVE_KTIME_GET_NS
+ ds = ent->ts2 - ent->ts1;
++#else
++ t1 = timespec_to_ktime(ent->ts1);
++ t2 = timespec_to_ktime(ent->ts2);
++ delta = ktime_sub(t2, t1);
++ ds = ktime_to_ns(delta);
++#endif
+ if (ent->op < ARRAY_SIZE(cmd->stats)) {
+ stats = &cmd->stats[ent->op];
+ spin_lock_irqsave(&stats->lock, flags);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+@@ -34,7 +34,11 @@
+
+ #include <linux/if_vlan.h>
+ #include <linux/etherdevice.h>
++#ifdef HAVE_TIMECOUNTER_H
+ #include <linux/timecounter.h>
++#else
++#include <linux/clocksource.h>
++#endif
+ #include <linux/net_tstamp.h>
+ #include <linux/ptp_clock_kernel.h>
+ #include <linux/mlx5/driver.h>
+@@ -44,7 +48,9 @@
+ #include <linux/mlx5/vport.h>
+ #include <linux/mlx5/transobj.h>
+ #include <linux/rhashtable.h>
++#ifdef HAVE_NET_SWITCHDEV_H
+ #include <net/switchdev.h>
++#endif
+ #include "wq.h"
+ #include "mlx5_core.h"
+ #include "en_stats.h"
+@@ -688,11 +694,21 @@ enum mlx5e_link_mode {
+ #define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
+
+
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ void mlx5e_build_ptys2ethtool_map(void);
++#endif
+
+ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw);
++#if defined(NDO_SELECT_QUEUE_HAS_ACCEL_PRIV) || defined(HAVE_SELECT_QUEUE_FALLBACK_T)
+ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
++#ifdef HAVE_SELECT_QUEUE_FALLBACK_T
+ void *accel_priv, select_queue_fallback_t fallback);
++#else
++ void *accel_priv);
++#endif
++#else /* NDO_SELECT_QUEUE_HAS_ACCEL_PRIV || HAVE_SELECT_QUEUE_FALLBACK_T */
++u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb);
++#endif
+ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
+
+ void mlx5e_completion_event(struct mlx5_core_cq *mcq);
+@@ -857,6 +873,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
+ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
+ int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev);
+
++#if defined(CONFIG_NET_SWITCHDEV)
+ struct mlx5_eswitch_rep;
+ int mlx5e_vport_rep_load(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep);
+@@ -868,6 +885,7 @@ void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw,
+ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv);
+ void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
+ int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr);
++#endif
+
+ int mlx5e_create_direct_rqts(struct mlx5e_priv *priv);
+ void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+@@ -48,6 +48,7 @@ static void mlx5e_get_drvinfo(struct net_device *dev,
+ sizeof(drvinfo->bus_info));
+ }
+
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ struct ptys2ethtool_config {
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised);
+@@ -126,6 +127,124 @@ void mlx5e_build_ptys2ethtool_map(void)
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_KR2, SPEED_50000,
+ ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT);
+ }
++#else
++static const struct {
++ u32 supported;
++ u32 advertised;
++ u32 speed;
++} ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER] = {
++ [MLX5E_1000BASE_CX_SGMII] = {
++ .supported = SUPPORTED_1000baseKX_Full,
++ .advertised = ADVERTISED_1000baseKX_Full,
++ .speed = 1000,
++ },
++ [MLX5E_1000BASE_KX] = {
++ .supported = SUPPORTED_1000baseKX_Full,
++ .advertised = ADVERTISED_1000baseKX_Full,
++ .speed = 1000,
++ },
++ [MLX5E_10GBASE_CX4] = {
++ .supported = SUPPORTED_10000baseKX4_Full,
++ .advertised = ADVERTISED_10000baseKX4_Full,
++ .speed = 10000,
++ },
++ [MLX5E_10GBASE_KX4] = {
++ .supported = SUPPORTED_10000baseKX4_Full,
++ .advertised = ADVERTISED_10000baseKX4_Full,
++ .speed = 10000,
++ },
++ [MLX5E_10GBASE_KR] = {
++ .supported = SUPPORTED_10000baseKR_Full,
++ .advertised = ADVERTISED_10000baseKR_Full,
++ .speed = 10000,
++ },
++ [MLX5E_20GBASE_KR2] = {
++ .supported = SUPPORTED_20000baseKR2_Full,
++ .advertised = ADVERTISED_20000baseKR2_Full,
++ .speed = 20000,
++ },
++ [MLX5E_40GBASE_CR4] = {
++ .supported = SUPPORTED_40000baseCR4_Full,
++ .advertised = ADVERTISED_40000baseCR4_Full,
++ .speed = 40000,
++ },
++ [MLX5E_40GBASE_KR4] = {
++ .supported = SUPPORTED_40000baseKR4_Full,
++ .advertised = ADVERTISED_40000baseKR4_Full,
++ .speed = 40000,
++ },
++ [MLX5E_56GBASE_R4] = {
++ .supported = SUPPORTED_56000baseKR4_Full,
++ .advertised = ADVERTISED_56000baseKR4_Full,
++ .speed = 56000,
++ },
++ [MLX5E_10GBASE_CR] = {
++ .supported = SUPPORTED_10000baseKR_Full,
++ .advertised = ADVERTISED_10000baseKR_Full,
++ .speed = 10000,
++ },
++ [MLX5E_10GBASE_SR] = {
++ .supported = SUPPORTED_10000baseKR_Full,
++ .advertised = ADVERTISED_10000baseKR_Full,
++ .speed = 10000,
++ },
++ [MLX5E_10GBASE_ER] = {
++ .supported = SUPPORTED_10000baseKR_Full,
++ .advertised = ADVERTISED_10000baseKR_Full,
++ .speed = 10000,
++ },
++ [MLX5E_40GBASE_SR4] = {
++ .supported = SUPPORTED_40000baseSR4_Full,
++ .advertised = ADVERTISED_40000baseSR4_Full,
++ .speed = 40000,
++ },
++ [MLX5E_40GBASE_LR4] = {
++ .supported = SUPPORTED_40000baseLR4_Full,
++ .advertised = ADVERTISED_40000baseLR4_Full,
++ .speed = 40000,
++ },
++ [MLX5E_100GBASE_CR4] = {
++ .speed = 100000,
++ },
++ [MLX5E_100GBASE_SR4] = {
++ .speed = 100000,
++ },
++ [MLX5E_100GBASE_KR4] = {
++ .speed = 100000,
++ },
++ [MLX5E_100GBASE_LR4] = {
++ .speed = 100000,
++ },
++ [MLX5E_100BASE_TX] = {
++ .speed = 100,
++ },
++ [MLX5E_1000BASE_T] = {
++ .supported = SUPPORTED_1000baseT_Full,
++ .advertised = ADVERTISED_1000baseT_Full,
++ .speed = 1000,
++ },
++ [MLX5E_10GBASE_T] = {
++ .supported = SUPPORTED_10000baseT_Full,
++ .advertised = ADVERTISED_10000baseT_Full,
++ .speed = 1000,
++ },
++ [MLX5E_25GBASE_CR] = {
++ .speed = 25000,
++ },
++ [MLX5E_25GBASE_KR] = {
++ .speed = 25000,
++ },
++ [MLX5E_25GBASE_SR] = {
++ .speed = 25000,
++ },
++ [MLX5E_50GBASE_CR2] = {
++ .speed = 50000,
++ },
++ [MLX5E_50GBASE_KR2] = {
++ .speed = 50000,
++ },
++};
++#endif
+
+ static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
+ {
+@@ -258,13 +377,17 @@ static void mlx5e_get_strings(struct net_device *dev,
+ uint32_t stringset, uint8_t *data)
+ {
+ struct mlx5e_priv *priv = netdev_priv(dev);
++#ifdef HAVE_GET_SET_PRIV_FLAGS
+ int i;
++#endif
+
+ switch (stringset) {
++#ifdef HAVE_GET_SET_PRIV_FLAGS
+ case ETH_SS_PRIV_FLAGS:
+ for (i = 0; i < ARRAY_SIZE(mlx5e_priv_flags); i++)
+ strcpy(data + i * ETH_GSTRING_LEN, mlx5e_priv_flags[i]);
+ break;
++#endif
+
+ case ETH_SS_TEST:
+ break;
+@@ -505,6 +628,7 @@ static int mlx5e_set_ringparam(struct net_device *dev,
+ return err;
+ }
+
++#if defined(HAVE_GET_SET_CHANNELS) || defined(HAVE_GET_SET_CHANNELS_EXT)
+ static void mlx5e_get_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+ {
+@@ -583,6 +707,7 @@ out:
+
+ return err;
+ }
++#endif
+
+ static int mlx5e_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal)
+@@ -656,6 +781,7 @@ out:
+ return err;
+ }
+
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ static void ptys2ethtool_supported_link(unsigned long *supported_modes,
+ u32 eth_proto_cap)
+ {
+@@ -700,6 +826,52 @@ static void ptys2ethtool_supported_port(struct ethtool_link_ksettings *link_kset
+ ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Backplane);
+ }
+ }
++#else
++static u32 ptys2ethtool_supported_link(u32 eth_proto_cap)
++{
++ int i;
++ u32 supported_modes = 0;
++
++ for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
++ if (eth_proto_cap & MLX5E_PROT_MASK(i))
++ supported_modes |= ptys2ethtool_table[i].supported;
++ }
++ return supported_modes;
++}
++
++static u32 ptys2ethtool_adver_link(u32 eth_proto_cap)
++{
++ int i;
++ u32 advertising_modes = 0;
++
++ for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
++ if (eth_proto_cap & MLX5E_PROT_MASK(i))
++ advertising_modes |= ptys2ethtool_table[i].advertised;
++ }
++ return advertising_modes;
++}
++
++static u32 ptys2ethtool_supported_port(u32 eth_proto_cap)
++{
++ if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
++ | MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
++ | MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
++ | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4)
++ | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4)
++ | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) {
++ return SUPPORTED_FIBRE;
++ }
++
++ if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_100GBASE_KR4)
++ | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4)
++ | MLX5E_PROT_MASK(MLX5E_10GBASE_KR)
++ | MLX5E_PROT_MASK(MLX5E_10GBASE_KX4)
++ | MLX5E_PROT_MASK(MLX5E_1000BASE_KX))) {
++ return SUPPORTED_Backplane;
++ }
++ return 0;
++}
++#endif /* HAVE_ETHTOOL_xLINKSETTINGS */
+
+ int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
+ {
+@@ -722,7 +894,11 @@ int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
+
+ static void get_speed_duplex(struct net_device *netdev,
+ u32 eth_proto_oper,
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ struct ethtool_link_ksettings *link_ksettings)
++#else
++ struct ethtool_cmd *cmd)
++#endif
+ {
+ int i;
+ u32 speed = SPEED_UNKNOWN;
+@@ -739,10 +915,16 @@ static void get_speed_duplex(struct net_device *netdev,
+ }
+ }
+ out:
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ link_ksettings->base.speed = speed;
+ link_ksettings->base.duplex = duplex;
++#else
++ ethtool_cmd_speed_set(cmd, speed);
++ cmd->duplex = duplex;
++#endif
+ }
+
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ static void get_supported(u32 eth_proto_cap,
+ struct ethtool_link_ksettings *link_ksettings)
+ {
+@@ -799,7 +981,54 @@ static void get_lp_advertising(u32 eth_proto_lp,
+
+ ptys2ethtool_adver_link(lp_advertising, eth_proto_lp);
+ }
++#else
++static void get_supported(u32 eth_proto_cap, u32 *supported)
++{
++ *supported |= ptys2ethtool_supported_port(eth_proto_cap);
++ *supported |= ptys2ethtool_supported_link(eth_proto_cap);
++ *supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
++}
+
++static void get_advertising(u32 eth_proto_cap, u8 tx_pause,
++ u8 rx_pause, u32 *advertising)
++{
++ *advertising |= ptys2ethtool_adver_link(eth_proto_cap);
++ *advertising |= tx_pause ? ADVERTISED_Pause : 0;
++ *advertising |= (tx_pause ^ rx_pause) ? ADVERTISED_Asym_Pause : 0;
++}
++
++static u8 get_connector_port(u32 eth_proto)
++{
++ if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
++ | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4)
++ | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4)
++ | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) {
++ return PORT_FIBRE;
++ }
++
++ if (eth_proto & (MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
++ | MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
++ | MLX5E_PROT_MASK(MLX5E_100GBASE_CR4))) {
++ return PORT_DA;
++ }
++
++ if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_KX4)
++ | MLX5E_PROT_MASK(MLX5E_10GBASE_KR)
++ | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4)
++ | MLX5E_PROT_MASK(MLX5E_100GBASE_KR4))) {
++ return PORT_NONE;
++ }
++
++ return PORT_OTHER;
++}
++
++static void get_lp_advertising(u32 eth_proto_lp, u32 *lp_advertising)
++{
++ *lp_advertising = ptys2ethtool_adver_link(eth_proto_lp);
++}
++#endif
++
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ static int mlx5e_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *link_ksettings)
+ {
+@@ -942,6 +1171,122 @@ static int mlx5e_set_link_ksettings(struct net_device *netdev,
+ out:
+ return err;
+ }
++#else
++static int mlx5e_get_settings(struct net_device *netdev,
++ struct ethtool_cmd *cmd)
++{
++ struct mlx5e_priv *priv = netdev_priv(netdev);
++ struct mlx5_core_dev *mdev = priv->mdev;
++ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
++ u32 eth_proto_cap;
++ u32 eth_proto_admin;
++ u32 eth_proto_lp;
++ u32 eth_proto_oper;
++ int err;
++
++ err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
++
++ if (err) {
++ netdev_err(netdev, "%s: query port ptys failed: %d\n",
++ __func__, err);
++ goto err_query_ptys;
++ }
++
++ eth_proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability);
++ eth_proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin);
++ eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
++ eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
++
++ cmd->supported = 0;
++ cmd->advertising = 0;
++
++ get_supported(eth_proto_cap, &cmd->supported);
++ get_advertising(eth_proto_admin, 0, 0, &cmd->advertising);
++ get_speed_duplex(netdev, eth_proto_oper, cmd);
++
++ eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
++
++ cmd->port = get_connector_port(eth_proto_oper);
++ get_lp_advertising(eth_proto_lp, &cmd->lp_advertising);
++
++ cmd->transceiver = XCVR_INTERNAL;
++
++err_query_ptys:
++ return err;
++}
++
++static u32 mlx5e_ethtool2ptys_adver_link(u32 link_modes)
++{
++ u32 i, ptys_modes = 0;
++
++ for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
++ if (ptys2ethtool_table[i].advertised & link_modes)
++ ptys_modes |= MLX5E_PROT_MASK(i);
++ }
++
++ return ptys_modes;
++}
++
++static u32 mlx5e_ethtool2ptys_speed_link(u32 speed)
++{
++ u32 i, speed_links = 0;
++
++ for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
++ if (ptys2ethtool_table[i].speed == speed)
++ speed_links |= MLX5E_PROT_MASK(i);
++ }
++
++ return speed_links;
++}
++
++static int mlx5e_set_settings(struct net_device *netdev,
++ struct ethtool_cmd *cmd)
++{
++ struct mlx5e_priv *priv = netdev_priv(netdev);
++ struct mlx5_core_dev *mdev = priv->mdev;
++ u32 link_modes;
++ u32 speed;
++ u32 eth_proto_cap, eth_proto_admin;
++ int err;
++
++ speed = ethtool_cmd_speed(cmd);
++
++ link_modes = cmd->autoneg == AUTONEG_ENABLE ?
++ mlx5e_ethtool2ptys_adver_link(cmd->advertising) :
++ mlx5e_ethtool2ptys_speed_link(speed);
++
++ err = mlx5_query_port_proto_cap(mdev, ð_proto_cap, MLX5_PTYS_EN);
++ if (err) {
++ netdev_err(netdev, "%s: query port eth proto cap failed: %d\n",
++ __func__, err);
++ goto out;
++ }
++
++ link_modes = link_modes & eth_proto_cap;
++ if (!link_modes) {
++ netdev_err(netdev, "%s: Not supported link mode(s) requested",
++ __func__);
++ err = -EINVAL;
++ goto out;
++ }
++
++ err = mlx5_query_port_proto_admin(mdev, ð_proto_admin, MLX5_PTYS_EN);
++ if (err) {
++ netdev_err(netdev, "%s: query port eth proto admin failed: %d\n",
++ __func__, err);
++ goto out;
++ }
++
++ if (link_modes == eth_proto_admin)
++ goto out;
++
++ mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN);
++ mlx5_toggle_port_link(mdev);
++
++out:
++ return err;
++}
++#endif
+
+ static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev)
+ {
+@@ -1056,6 +1401,7 @@ static int mlx5e_get_rxnfc(struct net_device *netdev,
+ return err;
+ }
+
++#ifdef HAVE_GET_SET_TUNABLE
+ static int mlx5e_get_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna,
+ void *data)
+@@ -1113,6 +1459,7 @@ static int mlx5e_set_tunable(struct net_device *dev,
+
+ return err;
+ }
++#endif
+
+ static void mlx5e_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pauseparam)
+@@ -1517,16 +1864,23 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
+ .set_channels = mlx5e_set_channels,
+ .get_coalesce = mlx5e_get_coalesce,
+ .set_coalesce = mlx5e_set_coalesce,
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ .get_link_ksettings = mlx5e_get_link_ksettings,
+ .set_link_ksettings = mlx5e_set_link_ksettings,
++#else
++ .get_settings = mlx5e_get_settings,
++ .set_settings = mlx5e_set_settings,
++#endif
+ .get_rxfh_key_size = mlx5e_get_rxfh_key_size,
+ .get_rxfh_indir_size = mlx5e_get_rxfh_indir_size,
+ .get_rxfh = mlx5e_get_rxfh,
+ .set_rxfh = mlx5e_set_rxfh,
+ .get_rxnfc = mlx5e_get_rxnfc,
+ .set_rxnfc = mlx5e_set_rxnfc,
++#ifdef HAVE_GET_SET_TUNABLE
+ .get_tunable = mlx5e_get_tunable,
+ .set_tunable = mlx5e_set_tunable,
++#endif
+ .get_pauseparam = mlx5e_get_pauseparam,
+ .set_pauseparam = mlx5e_set_pauseparam,
+ .get_ts_info = mlx5e_get_ts_info,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -30,12 +30,16 @@
+ * SOFTWARE.
+ */
+
++#ifdef HAVE_TC_OFFLOAD
+ #include <net/tc_act/tc_gact.h>
+ #include <net/pkt_cls.h>
++#endif
+ #include <linux/mlx5/fs.h>
+ #include <net/vxlan.h>
+ #include "en.h"
++#ifdef HAVE_TC_OFFLOAD
+ #include "en_tc.h"
++#endif
+ #include "eswitch.h"
+ #include "vxlan.h"
+
+@@ -1120,6 +1124,7 @@ static int mlx5e_set_sq_maxrate(struct net_device *dev,
+ return 0;
+ }
+
++#ifdef HAVE_NDO_SET_TX_MAXRATE
+ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
+ {
+ struct mlx5e_priv *priv = netdev_priv(dev);
+@@ -1150,6 +1155,7 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
+
+ return err;
+ }
++#endif
+
+ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
+ struct mlx5e_channel_param *cparam,
+@@ -1795,7 +1801,9 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev)
+ int mlx5e_open_locked(struct net_device *netdev)
+ {
+ struct mlx5e_priv *priv = netdev_priv(netdev);
++#ifdef CONFIG_NET_SWITCHDEV
+ struct mlx5_core_dev *mdev = priv->mdev;
++#endif
+ int num_txqs;
+ int err;
+
+@@ -1827,6 +1835,7 @@ int mlx5e_open_locked(struct net_device *netdev)
+ #ifdef CONFIG_RFS_ACCEL
+ priv->netdev->rx_cpu_rmap = priv->mdev->rmap;
+ #endif
++#ifdef CONFIG_NET_SWITCHDEV
+ if (priv->profile->update_stats)
+ queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
+
+@@ -1835,6 +1844,9 @@ int mlx5e_open_locked(struct net_device *netdev)
+ if (err)
+ goto err_close_channels;
+ }
++#else
++ queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
++#endif
+ return 0;
+
+ err_close_channels:
+@@ -1859,7 +1871,9 @@ int mlx5e_open(struct net_device *netdev)
+ int mlx5e_close_locked(struct net_device *netdev)
+ {
+ struct mlx5e_priv *priv = netdev_priv(netdev);
++#ifdef CONFIG_NET_SWITCHDEV
+ struct mlx5_core_dev *mdev = priv->mdev;
++#endif
+
+ /* May already be CLOSED in case a previous configuration operation
+ * (e.g RX/TX queue size change) that involves close&open failed.
+@@ -1869,8 +1883,10 @@ int mlx5e_close_locked(struct net_device *netdev)
+
+ clear_bit(MLX5E_STATE_OPENED, &priv->state);
+
++#ifdef CONFIG_NET_SWITCHDEV
+ if (MLX5_CAP_GEN(mdev, vport_group_manager))
+ mlx5e_remove_sqs_fwd_rules(priv);
++#endif
+
+ mlx5e_timestamp_cleanup(priv);
+ netif_carrier_off(priv->netdev);
+@@ -2274,6 +2290,7 @@ int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd)
+ return 0;
+ }
+
++#ifdef HAVE_NDO_SETUP_TC
+ static int mlx5e_setup_tc(struct net_device *netdev, u8 tc)
+ {
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+@@ -2298,16 +2315,18 @@ static int mlx5e_setup_tc(struct net_device *netdev, u8 tc)
+
+ return err;
+ }
+-
++#ifdef HAVE_NDO_SETUP_TC_4_PARAMS
+ static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle,
+ __be16 proto, struct tc_to_netdev *tc)
+ {
++#ifdef HAVE_TC_OFFLOAD
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS))
+ goto mqprio;
+
+ switch (tc->type) {
++#ifdef HAVE_HW_FLOWER_OFFLOAD_SUPPORT
+ case TC_SETUP_CLSFLOWER:
+ switch (tc->cls_flower->command) {
+ case TC_CLSFLOWER_REPLACE:
+@@ -2317,19 +2336,27 @@ static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle,
+ case TC_CLSFLOWER_STATS:
+ return mlx5e_stats_flower(priv, tc->cls_flower);
+ }
++#endif
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ mqprio:
++#endif /* HAVE_TC_OFFLOAD */
+ if (tc->type != TC_SETUP_MQPRIO)
+ return -EINVAL;
+
+ return mlx5e_setup_tc(dev, tc->tc);
+ }
++#endif /* HAVE_NDO_SETUP_TC_4_PARAMS */
++#endif /* HAVE_NDO_SETUP_TC */
+
++#ifdef HAVE_NDO_GET_STATS64
+ struct rtnl_link_stats64 *
+ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
++#else
++struct net_device_stats *mlx5e_get_stats(struct net_device *dev)
++#endif
+ {
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_sw_stats *sstats = &priv->stats.sw;
+@@ -2439,6 +2466,7 @@ static int set_feature_vlan_filter(struct net_device *netdev, bool enable)
+ return 0;
+ }
+
++#ifdef HAVE_NDO_SETUP_TC
+ static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
+ {
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+@@ -2451,6 +2479,7 @@ static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
+
+ return 0;
+ }
++#endif
+
+ static int set_feature_rx_all(struct net_device *netdev, bool enable)
+ {
+@@ -2525,8 +2554,10 @@ static int mlx5e_set_features(struct net_device *netdev,
+ err |= mlx5e_handle_feature(netdev, features,
+ NETIF_F_HW_VLAN_CTAG_FILTER,
+ set_feature_vlan_filter);
++#ifdef HAVE_TC_OFFLOAD
+ err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_TC,
+ set_feature_tc_num_filters);
++#endif
+ err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL,
+ set_feature_rx_all);
+ err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX,
+@@ -2597,6 +2628,7 @@ static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+ }
+ }
+
++#ifdef CONFIG_NET_SWITCHDEV
+ static int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
+ {
+ struct mlx5e_priv *priv = netdev_priv(dev);
+@@ -2622,6 +2654,7 @@ static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
+ return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting);
+ }
+
++#if (defined(HAVE_NETDEV_OPS_NDO_SET_VF_TRUST) && !defined(HAVE_NET_DEVICE_OPS_EXT))
+ static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
+ {
+ struct mlx5e_priv *priv = netdev_priv(dev);
+@@ -2629,6 +2662,7 @@ static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
+
+ return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
+ }
++#endif
+ static int mlx5_vport_link2ifla(u8 esw_link)
+ {
+ switch (esw_link) {
+@@ -2684,7 +2718,9 @@ static int mlx5e_get_vf_stats(struct net_device *dev,
+ return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1,
+ vf_stats);
+ }
++#endif
+
++#ifdef HAVE_NDO_UDP_TUNNEL_ADD
+ static void mlx5e_add_vxlan_port(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
+ {
+@@ -2712,6 +2748,29 @@ static void mlx5e_del_vxlan_port(struct net_device *netdev,
+
+ mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 0);
+ }
++#elif defined(HAVE_VXLAN_ENABLED) && defined(HAVE_VXLAN_DYNAMIC_PORT)
++static void mlx5e_add_vxlan_port(struct net_device *netdev,
++ sa_family_t sa_family, __be16 port)
++{
++ struct mlx5e_priv *priv = netdev_priv(netdev);
++
++ if (!mlx5e_vxlan_allowed(priv->mdev))
++ return;
++
++ mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 1);
++}
++
++static void mlx5e_del_vxlan_port(struct net_device *netdev,
++ sa_family_t sa_family, __be16 port)
++{
++ struct mlx5e_priv *priv = netdev_priv(netdev);
++
++ if (!mlx5e_vxlan_allowed(priv->mdev))
++ return;
++
++ mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 0);
++}
++#endif
+
+ static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv,
+ struct sk_buff *skb,
+@@ -2790,20 +2849,36 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = {
+ .ndo_open = mlx5e_open,
+ .ndo_stop = mlx5e_close,
+ .ndo_start_xmit = mlx5e_xmit,
++#ifdef HAVE_NDO_SETUP_TC
++#ifdef HAVE_NDO_SETUP_TC_4_PARAMS
+ .ndo_setup_tc = mlx5e_ndo_setup_tc,
++#else /* HAVE_NDO_SETUP_TC_4_PARAMS */
++ .ndo_setup_tc = mlx5e_setup_tc,
++#endif /* HAVE_NDO_SETUP_TC_4_PARAMS */
++#endif /* HAVE_NDO_SETUP_TC */
+ .ndo_select_queue = mlx5e_select_queue,
++#ifdef HAVE_NDO_GET_STATS64
+ .ndo_get_stats64 = mlx5e_get_stats,
++#else
++ .ndo_get_stats = mlx5e_get_stats,
++#endif
+ .ndo_set_rx_mode = mlx5e_set_rx_mode,
+ .ndo_set_mac_address = mlx5e_set_mac,
+ .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
++#if (defined(HAVE_NDO_SET_FEATURES) && !defined(HAVE_NET_DEVICE_OPS_EXT))
+ .ndo_set_features = mlx5e_set_features,
++#endif
+ .ndo_change_mtu = mlx5e_change_mtu,
+ .ndo_do_ioctl = mlx5e_ioctl,
++#ifdef HAVE_NDO_SET_TX_MAXRATE
+ .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
++#endif
++#ifdef HAVE_NDO_RX_FLOW_STEER
+ #ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
+ #endif
++#endif
+ .ndo_tx_timeout = mlx5e_tx_timeout,
+ };
+
+@@ -2811,7 +2886,13 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
+ .ndo_open = mlx5e_open,
+ .ndo_stop = mlx5e_close,
+ .ndo_start_xmit = mlx5e_xmit,
++#ifdef HAVE_NDO_SETUP_TC
++#ifdef HAVE_NDO_SETUP_TC_4_PARAMS
+ .ndo_setup_tc = mlx5e_ndo_setup_tc,
++#else /* HAVE_NDO_SETUP_TC_4_PARAMS */
++ .ndo_setup_tc = mlx5e_setup_tc,
++#endif /* HAVE_NDO_SETUP_TC_4_PARAMS */
++#endif /* HAVE_NDO_SETUP_TC */
+ .ndo_select_queue = mlx5e_select_queue,
+ .ndo_get_stats64 = mlx5e_get_stats,
+ .ndo_set_rx_mode = mlx5e_set_rx_mode,
+@@ -2821,20 +2902,33 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
+ .ndo_set_features = mlx5e_set_features,
+ .ndo_change_mtu = mlx5e_change_mtu,
+ .ndo_do_ioctl = mlx5e_ioctl,
++#ifdef HAVE_NDO_UDP_TUNNEL_ADD
+ .ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
+ .ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
++#elif defined(HAVE_VXLAN_ENABLED) && defined(HAVE_VXLAN_DYNAMIC_PORT)
++ .ndo_add_vxlan_port = mlx5e_add_vxlan_port,
++ .ndo_del_vxlan_port = mlx5e_del_vxlan_port,
++#endif
++#ifdef HAVE_NDO_SET_TX_MAXRATE
+ .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
++#endif
+ .ndo_features_check = mlx5e_features_check,
++#ifdef HAVE_NDO_RX_FLOW_STEER
+ #ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
+ #endif
++#endif
++#ifdef CONFIG_NET_SWITCHDEV
+ .ndo_set_vf_mac = mlx5e_set_vf_mac,
+ .ndo_set_vf_vlan = mlx5e_set_vf_vlan,
+ .ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk,
++#if (defined(HAVE_NETDEV_OPS_NDO_SET_VF_TRUST) && !defined(HAVE_NET_DEVICE_OPS_EXT))
+ .ndo_set_vf_trust = mlx5e_set_vf_trust,
++#endif
+ .ndo_get_vf_config = mlx5e_get_vf_config,
+ .ndo_set_vf_link_state = mlx5e_set_vf_link_state,
+ .ndo_get_vf_stats = mlx5e_get_vf_stats,
++#endif
+ .ndo_tx_timeout = mlx5e_tx_timeout,
+ };
+
+@@ -3096,9 +3190,11 @@ static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
+ }
+ }
+
++#ifdef CONFIG_NET_SWITCHDEV
+ static const struct switchdev_ops mlx5e_switchdev_ops = {
+ .switchdev_port_attr_get = mlx5e_attr_get,
+ };
++#endif
+
+ static void mlx5e_build_nic_netdev(struct net_device *netdev)
+ {
+@@ -3129,7 +3225,9 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
+ netdev->vlan_features |= NETIF_F_TSO;
+ netdev->vlan_features |= NETIF_F_TSO6;
+ netdev->vlan_features |= NETIF_F_RXCSUM;
++#ifdef HAVE_NETIF_F_RXHASH
+ netdev->vlan_features |= NETIF_F_RXHASH;
++#endif
+
+ if (!!MLX5_CAP_ETH(mdev, lro_cap))
+ netdev->vlan_features |= NETIF_F_LRO;
+@@ -3140,17 +3238,26 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ if (mlx5e_vxlan_allowed(mdev)) {
++#ifdef HAVE_NET_DEVICE_GSO_PARTIAL_FEATURES
+ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL;
++#else
++ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
++ NETIF_F_GSO_UDP_TUNNEL_CSUM;
++#endif
+ netdev->hw_enc_features |= NETIF_F_IP_CSUM;
+ netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
+ netdev->hw_enc_features |= NETIF_F_TSO;
+ netdev->hw_enc_features |= NETIF_F_TSO6;
+ netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
++#ifdef HAVE_NET_DEVICE_GSO_PARTIAL_FEATURES
+ netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL;
+ netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
++#else
++ netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
++#endif
+ }
+
+ mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
+@@ -3165,6 +3272,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
+ if (fcs_enabled)
+ netdev->features &= ~NETIF_F_RXALL;
+
++#ifdef HAVE_TC_OFFLOAD
+ #define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
+ if (FT_CAP(flow_modify_en) &&
+ FT_CAP(modify_root) &&
+@@ -3175,6 +3283,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
+ netdev->hw_features |= NETIF_F_NTUPLE;
+ #endif
+ }
++#endif
+
+ netdev->features |= NETIF_F_HIGHDMA;
+
+@@ -3259,13 +3368,17 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
+
+ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
+ {
++#ifdef CONFIG_NET_SWITCHDEV
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
++#endif
+
+ mlx5e_vxlan_cleanup(priv);
+
++#ifdef CONFIG_NET_SWITCHDEV
+ if (MLX5_CAP_GEN(mdev, vport_group_manager))
+ mlx5_eswitch_unregister_vport_rep(esw, 0);
++#endif
+ }
+
+ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
+@@ -3304,14 +3417,18 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
+ goto err_destroy_direct_tirs;
+ }
+
++#ifdef HAVE_TC_OFFLOAD
+ err = mlx5e_tc_init(priv);
+ if (err)
+ goto err_destroy_flow_steering;
++#endif
+
+ return 0;
+
++#ifdef HAVE_TC_OFFLOAD
+ err_destroy_flow_steering:
+ mlx5e_destroy_flow_steering(priv);
++#endif
+ err_destroy_direct_tirs:
+ mlx5e_destroy_direct_tirs(priv);
+ err_destroy_indirect_tirs:
+@@ -3328,7 +3445,9 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
+ {
+ int i;
+
++#ifdef HAVE_TC_OFFLOAD
+ mlx5e_tc_cleanup(priv);
++#endif
+ mlx5e_destroy_flow_steering(priv);
+ mlx5e_destroy_direct_tirs(priv);
+ mlx5e_destroy_indirect_tirs(priv);
+@@ -3355,20 +3474,29 @@ static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
+
+ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
+ {
++#ifdef HAVE_UDP_TUNNEL_GET_RX_INFO
+ struct net_device *netdev = priv->netdev;
++#endif
++#ifdef HAVE_UDP_TUNNEL_GET_RX_INFO
+ struct mlx5_core_dev *mdev = priv->mdev;
++#endif
++#ifdef CONFIG_NET_SWITCHDEV
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ struct mlx5_eswitch_rep rep;
++#endif
+
++#ifdef HAVE_UDP_TUNNEL_GET_RX_INFO
+ if (mlx5e_vxlan_allowed(mdev)) {
+ rtnl_lock();
+ udp_tunnel_get_rx_info(netdev);
+ rtnl_unlock();
+ }
++#endif
+
+ mlx5e_enable_async_events(priv);
+ queue_work(priv->wq, &priv->set_rx_mode_work);
+
++#ifdef CONFIG_NET_SWITCHDEV
+ if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
+ mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
+ rep.load = mlx5e_nic_rep_load;
+@@ -3377,6 +3505,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
+ rep.priv_data = priv;
+ mlx5_eswitch_register_vport_rep(esw, &rep);
+ }
++#endif
+ }
+
+ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
+@@ -3484,6 +3613,7 @@ err_free_netdev:
+ return NULL;
+ }
+
++#ifdef CONFIG_NET_SWITCHDEV
+ static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev)
+ {
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+@@ -3506,10 +3636,13 @@ static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev)
+ mlx5_eswitch_register_vport_rep(esw, &rep);
+ }
+ }
++#endif
+
+ static void *mlx5e_add(struct mlx5_core_dev *mdev)
+ {
++#ifdef CONFIG_NET_SWITCHDEV
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
++#endif
+ void *ppriv = NULL;
+ void *ret;
+
+@@ -3519,10 +3652,12 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
+ if (mlx5e_create_mdev_resources(mdev))
+ return NULL;
+
++#ifdef CONFIG_NET_SWITCHDEV
+ mlx5e_register_vport_rep(mdev);
+
+ if (MLX5_CAP_GEN(mdev, vport_group_manager))
+ ppriv = &esw->offloads.vport_reps[0];
++#endif
+
+ ret = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, ppriv);
+ if (!ret) {
+@@ -3565,15 +3700,21 @@ void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv)
+
+ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
+ {
++#ifdef CONFIG_NET_SWITCHDEV
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ int total_vfs = MLX5_TOTAL_VPORTS(mdev);
+ struct mlx5e_priv *priv = vpriv;
+ int vport;
++#else
++ struct mlx5e_priv *priv = vpriv;
++#endif
+
+ mlx5e_destroy_netdev(mdev, priv);
+
++#ifdef CONFIG_NET_SWITCHDEV
+ for (vport = 1; vport < total_vfs; vport++)
+ mlx5_eswitch_unregister_vport_rep(esw, vport);
++#endif
+
+ mlx5e_destroy_mdev_resources(mdev);
+ }
+@@ -3595,7 +3736,9 @@ static struct mlx5_interface mlx5e_interface = {
+
+ void mlx5e_init(void)
+ {
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ mlx5e_build_ptys2ethtool_map();
++#endif
+ mlx5_register_interface(&mlx5e_interface);
+ }
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -447,8 +447,13 @@ static int mlx5e_alloc_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
+ for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
+ if (unlikely(mlx5e_alloc_and_map_page(rq, wi, i)))
+ goto err_unmap;
++#ifdef HAVE_MM_PAGE__COUNT
++ atomic_add(mlx5e_mpwqe_strides_per_page(rq),
++ &wi->umr.dma_info[i].page->_count);
++#else
+ page_ref_add(wi->umr.dma_info[i].page,
+ mlx5e_mpwqe_strides_per_page(rq));
++#endif
+ wi->skbs_frags[i] = 0;
+ }
+
+@@ -466,8 +471,13 @@ err_unmap:
+ while (--i >= 0) {
+ dma_unmap_page(rq->pdev, wi->umr.dma_info[i].addr, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
++#ifdef HAVE_MM_PAGE__COUNT
++ atomic_sub(mlx5e_mpwqe_strides_per_page(rq),
++ &wi->umr.dma_info[i].page->_count);
++#else
+ page_ref_sub(wi->umr.dma_info[i].page,
+ mlx5e_mpwqe_strides_per_page(rq));
++#endif
+ put_page(wi->umr.dma_info[i].page);
+ }
+ dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz, PCI_DMA_TODEVICE);
+@@ -491,8 +501,13 @@ void mlx5e_free_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
+ for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
+ dma_unmap_page(rq->pdev, wi->umr.dma_info[i].addr, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
++#ifdef HAVE_MM_PAGE__COUNT
++ atomic_sub(mlx5e_mpwqe_strides_per_page(rq) - wi->skbs_frags[i],
++ &wi->umr.dma_info[i].page->_count);
++#else
+ page_ref_sub(wi->umr.dma_info[i].page,
+ mlx5e_mpwqe_strides_per_page(rq) - wi->skbs_frags[i]);
++#endif
+ put_page(wi->umr.dma_info[i].page);
+ }
+ dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz, PCI_DMA_TODEVICE);
+@@ -547,8 +562,13 @@ static int mlx5e_alloc_rx_linear_mpwqe(struct mlx5e_rq *rq,
+ */
+ split_page(wi->dma_info.page, MLX5_MPWRQ_WQE_PAGE_ORDER);
+ for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
++#ifdef HAVE_MM_PAGE__COUNT
++ atomic_add(mlx5e_mpwqe_strides_per_page(rq),
++ &wi->dma_info.page[i]._count);
++#else
+ page_ref_add(&wi->dma_info.page[i],
+ mlx5e_mpwqe_strides_per_page(rq));
++#endif
+ wi->skbs_frags[i] = 0;
+ }
+
+@@ -571,8 +591,13 @@ void mlx5e_free_rx_linear_mpwqe(struct mlx5e_rq *rq,
+ dma_unmap_page(rq->pdev, wi->dma_info.addr, rq->wqe_sz,
+ PCI_DMA_FROMDEVICE);
+ for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
++#ifdef HAVE_MM_PAGE__COUNT
++ atomic_sub(mlx5e_mpwqe_strides_per_page(rq) - wi->skbs_frags[i],
++ &wi->dma_info.page[i]._count);
++#else
+ page_ref_sub(&wi->dma_info.page[i],
+ mlx5e_mpwqe_strides_per_page(rq) - wi->skbs_frags[i]);
++#endif
+ put_page(&wi->dma_info.page[i]);
+ }
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -30,6 +30,7 @@
+ * SOFTWARE.
+ */
+
++#ifdef HAVE_TC_OFFLOAD
+ #include <net/flow_dissector.h>
+ #include <net/pkt_cls.h>
+ #include <net/tc_act/tc_gact.h>
+@@ -145,6 +146,7 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
+ }
+ }
+
++#ifdef HAVE_HW_FLOWER_OFFLOAD_SUPPORT
+ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
+ struct tc_cls_flower_offload *f)
+ {
+@@ -529,6 +531,7 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
+
+ return 0;
+ }
++#endif
+
+ static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
+ .head_offset = offsetof(struct mlx5e_tc_flow, node),
+@@ -565,3 +568,4 @@ void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
+ tc->t = NULL;
+ }
+ }
++#endif
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+@@ -38,6 +38,7 @@
+ int mlx5e_tc_init(struct mlx5e_priv *priv);
+ void mlx5e_tc_cleanup(struct mlx5e_priv *priv);
+
++#ifdef HAVE_HW_FLOWER_OFFLOAD_SUPPORT
+ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
+ struct tc_cls_flower_offload *f);
+ int mlx5e_delete_flower(struct mlx5e_priv *priv,
+@@ -45,10 +46,15 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv,
+
+ int mlx5e_stats_flower(struct mlx5e_priv *priv,
+ struct tc_cls_flower_offload *f);
++#endif
+
+ static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv)
+ {
++#ifdef HAVE_RHASHTABLE_NELEMS_ATOMIC_T
+ return atomic_read(&priv->fs.tc.ht.nelems);
++#else
++ return priv->fs.tc.ht.nelems;
++#endif
+ }
+
+ #endif /* __MLX5_EN_TC_H__ */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+@@ -105,15 +105,29 @@ static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, u8 num_dma)
+ }
+ }
+
++#ifndef HAVE_SELECT_QUEUE_FALLBACK_T
++#define fallback(dev, skb) __netdev_pick_tx(dev, skb)
++#endif
++
++#if defined(NDO_SELECT_QUEUE_HAS_ACCEL_PRIV) || defined(HAVE_SELECT_QUEUE_FALLBACK_T)
+ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
++#ifdef HAVE_SELECT_QUEUE_FALLBACK_T
+ void *accel_priv, select_queue_fallback_t fallback)
++#else
++ void *accel_priv)
++#endif
++#else /* NDO_SELECT_QUEUE_HAS_ACCEL_PRIV || HAVE_SELECT_QUEUE_FALLBACK_T */
++u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb)
++#endif
+ {
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int channel_ix = fallback(dev, skb);
+ int up = 0;
+
++#ifdef HAVE_NETDEV_GET_NUM_TC
+ if (!netdev_get_num_tc(dev))
+ return channel_ix;
++#endif
+
+ if (skb_vlan_tag_present(skb))
+ up = skb->vlan_tci >> VLAN_PRIO_SHIFT;
+@@ -141,8 +155,13 @@ static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb)
+
+ if (skb_transport_header_was_set(skb))
+ return skb_transport_offset(skb);
++#ifdef HAVE_SKB_FLOW_DISSECT_FLOW_KEYS
+ else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
+ return keys.control.thoff;
++#else
++ else if (skb_flow_dissect(skb, &keys))
++ return keys.thoff;
++#endif
+ else
+ return mlx5e_skb_l2_header_offset(skb);
+ }
+@@ -209,7 +228,11 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs,
+
+ memcpy(vhdr, *skb_data, cpy1_sz);
+ mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy1_sz);
++#ifdef HAVE_NETIF_F_HW_VLAN_STAG_RX
+ vhdr->h_vlan_proto = skb->vlan_proto;
++#else
++ vhdr->h_vlan_proto = cpu_to_be16(ETH_P_8021Q);
++#endif
+ vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
+ memcpy(&vhdr->h_vlan_encapsulated_proto, *skb_data, cpy2_sz);
+ mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy2_sz);
+@@ -242,13 +265,17 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
+
+ if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
++#if defined(HAVE_SKB_INNER_TRANSPORT_HEADER)
+ if (skb->encapsulation) {
+ eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
+ MLX5_ETH_WQE_L4_INNER_CSUM;
+ sq->stats.csum_partial_inner++;
+ } else {
++#endif
+ eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
++#if defined(HAVE_SKB_INNER_TRANSPORT_HEADER)
+ }
++#endif
+ } else
+ sq->stats.csum_none++;
+
+@@ -261,20 +288,26 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
+ eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
+ opcode = MLX5_OPCODE_LSO;
+
++#if defined(HAVE_SKB_INNER_TRANSPORT_HEADER)
+ if (skb->encapsulation) {
+ ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
+ sq->stats.tso_inner_packets++;
+ sq->stats.tso_inner_bytes += skb->len - ihs;
+ } else {
++#endif
+ ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ sq->stats.tso_packets++;
+ sq->stats.tso_bytes += skb->len - ihs;
++#if defined(HAVE_SKB_INNER_TRANSPORT_HEADER)
+ }
++#endif
+
+ num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
+ } else {
+ bf = sq->bf_budget &&
++#ifdef HAVE_SK_BUFF_XMIT_MORE
+ !skb->xmit_more &&
++#endif
+ !skb_shinfo(skb)->nr_frags;
+ ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
+ num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
+@@ -349,15 +382,21 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
+ netdev_tx_sent_queue(sq->txq, wi->num_bytes);
+
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
++#ifndef HAVE_SKB_SHARED_INFO_UNION_TX_FLAGS
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
++#else
++ skb_shinfo(skb)->tx_flags.flags |= SKBTX_IN_PROGRESS;
++#endif
+
+ if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM))) {
+ netif_tx_stop_queue(sq->txq);
+ sq->stats.stopped++;
+ }
+
++#ifdef HAVE_SK_BUFF_XMIT_MORE
+ sq->stats.xmit_more += skb->xmit_more;
+ if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
++#endif
+ int bf_sz = 0;
+
+ if (bf && test_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state))
+@@ -365,7 +404,9 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
+
+ cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+ mlx5e_tx_notify_hw(sq, &wqe->ctrl, bf_sz);
++#ifdef HAVE_SK_BUFF_XMIT_MORE
+ }
++#endif
+
+ /* fill sq edge with nops to avoid wqe wrap around */
+ while ((sq->pc & wq->sz_m1) > sq->edge)
+@@ -469,7 +510,13 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
+ npkts++;
+ nbytes += wi->num_bytes;
+ sqcc += wi->num_wqebbs;
++#ifdef HAVE_NAPI_CONSUME_SKB
+ napi_consume_skb(skb, napi_budget);
++#elif HAVE_DEV_CONSUME_SKB_ANY
++ dev_consume_skb_any(skb);
++#else
++ dev_kfree_skb_any(skb);
++#endif
+ } while (!last_wqe);
+ }
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+@@ -35,9 +35,11 @@
+ #include <linux/mlx5/driver.h>
+ #include <linux/mlx5/cmd.h>
+ #include "mlx5_core.h"
++#ifdef CONFIG_NET_SWITCHDEV
+ #ifdef CONFIG_MLX5_CORE_EN
+ #include "eswitch.h"
+ #endif
++#endif
+
+ enum {
+ MLX5_EQE_SIZE = sizeof(struct mlx5_eqe),
+@@ -291,11 +293,13 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
+ break;
+ #endif
+
++#ifdef CONFIG_NET_SWITCHDEV
+ #ifdef CONFIG_MLX5_CORE_EN
+ case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
+ mlx5_eswitch_vport_event(dev->priv.eswitch, eqe);
+ break;
+ #endif
++#endif
+ default:
+ mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
+ eqe->type, eq->eqn);
+@@ -483,10 +487,12 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
+ if (MLX5_CAP_GEN(dev, pg))
+ async_event_mask |= (1ull << MLX5_EVENT_TYPE_PAGE_FAULT);
+
++#ifdef CONFIG_NET_SWITCHDEV
+ if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH &&
+ MLX5_CAP_GEN(dev, vport_group_manager) &&
+ mlx5_core_is_pf(dev))
+ async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
++#endif
+
+ err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
+ MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+@@ -35,7 +35,9 @@
+
+ #include <linux/if_ether.h>
+ #include <linux/if_link.h>
++#ifdef HAVE_NET_DEVLINK_H
+ #include <net/devlink.h>
++#endif
+ #include <linux/mlx5/device.h>
+
+ #define MLX5_MAX_UC_PER_VPORT(dev) \
+@@ -238,8 +240,10 @@ int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
+ void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep);
+
++#ifdef HAVE_NET_DEVLINK_H
+ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode);
+ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
++#endif
+ void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep);
+ void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+@@ -543,6 +543,7 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
+ esw_destroy_offloads_fdb_table(esw);
+ }
+
++#ifdef HAVE_NET_DEVLINK_H
+ static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
+ {
+ switch (mode) {
+@@ -618,6 +619,7 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
+
+ return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
+ }
++#endif
+
+ void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -51,12 +51,16 @@
+ #ifdef CONFIG_RFS_ACCEL
+ #include <linux/cpu_rmap.h>
+ #endif
++#ifdef HAVE_NET_DEVLINK_H
+ #include <net/devlink.h>
++#endif
+ #include "mlx5_core.h"
+ #include "fs_core.h"
++#ifdef CONFIG_NET_SWITCHDEV
+ #ifdef CONFIG_MLX5_CORE_EN
+ #include "eswitch.h"
+ #endif
++#endif
+
+ MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
+ MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
+@@ -1152,6 +1156,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
+ goto err_rl;
+ }
+
++#ifdef CONFIG_NET_SWITCHDEV
+ #ifdef CONFIG_MLX5_CORE_EN
+ err = mlx5_eswitch_init(dev);
+ if (err) {
+@@ -1159,6 +1164,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
+ goto err_reg_dev;
+ }
+ #endif
++#endif
+
+ err = mlx5_sriov_init(dev);
+ if (err) {
+@@ -1187,9 +1193,11 @@ err_sriov:
+ if (mlx5_sriov_cleanup(dev))
+ dev_err(&dev->pdev->dev, "sriov cleanup failed\n");
+
++#ifdef CONFIG_NET_SWITCHDEV
+ #ifdef CONFIG_MLX5_CORE_EN
+ mlx5_eswitch_cleanup(dev->priv.eswitch);
+ #endif
++#endif
+ err_reg_dev:
+ mlx5_cleanup_rl_table(dev);
+ err_rl:
+@@ -1259,9 +1267,11 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
+ goto out;
+ }
+ mlx5_unregister_device(dev);
++#ifdef CONFIG_NET_SWITCHDEV
+ #ifdef CONFIG_MLX5_CORE_EN
+ mlx5_eswitch_cleanup(dev->priv.eswitch);
+ #endif
++#endif
+
+ mlx5_cleanup_rl_table(dev);
+ mlx5_cleanup_fs(dev);
+@@ -1316,21 +1326,28 @@ struct mlx5_core_event_handler {
+ void *data);
+ };
+
++#ifdef HAVE_NET_DEVLINK_H
+ static const struct devlink_ops mlx5_devlink_ops = {
++#ifdef CONFIG_NET_SWITCHDEV
+ #ifdef CONFIG_MLX5_CORE_EN
+ .eswitch_mode_set = mlx5_devlink_eswitch_mode_set,
+ .eswitch_mode_get = mlx5_devlink_eswitch_mode_get,
+ #endif
++#endif
+ };
++#endif
+
+ static int init_one(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+ {
+ struct mlx5_core_dev *dev;
++#ifdef HAVE_NET_DEVLINK_H
+ struct devlink *devlink;
++#endif
+ struct mlx5_priv *priv;
+ int err;
+
++#ifdef HAVE_NET_DEVLINK_H
+ devlink = devlink_alloc(&mlx5_devlink_ops, sizeof(*dev));
+ if (!devlink) {
+ dev_err(&pdev->dev, "kzalloc failed\n");
+@@ -1338,6 +1355,13 @@ static int init_one(struct pci_dev *pdev,
+ }
+
+ dev = devlink_priv(devlink);
++#else
++ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
++ if (!dev) {
++ dev_err(&pdev->dev, "kzalloc failed\n");
++ return -ENOMEM;
++ }
++#endif
+ priv = &dev->priv;
+ priv->pci_dev_data = id->driver_data;
+
+@@ -1374,21 +1398,29 @@ static int init_one(struct pci_dev *pdev,
+ goto clean_health;
+ }
+
++#ifdef HAVE_NET_DEVLINK_H
+ err = devlink_register(devlink, &pdev->dev);
+ if (err)
+ goto clean_load;
++#endif
+
+ return 0;
+
++#ifdef HAVE_NET_DEVLINK_H
+ clean_load:
+ mlx5_unload_one(dev, priv);
++#endif
+ clean_health:
+ mlx5_health_cleanup(dev);
+ close_pci:
+ mlx5_pci_close(dev, priv);
+ clean_dev:
+ pci_set_drvdata(pdev, NULL);
++#ifdef HAVE_NET_DEVLINK_H
+ devlink_free(devlink);
++#else
++ kfree(dev);
++#endif
+
+ return err;
+ }
+@@ -1396,10 +1428,14 @@ clean_dev:
+ static void remove_one(struct pci_dev *pdev)
+ {
+ struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
++#ifdef HAVE_NET_DEVLINK_H
+ struct devlink *devlink = priv_to_devlink(dev);
++#endif
+ struct mlx5_priv *priv = &dev->priv;
+
++#ifdef HAVE_NET_DEVLINK_H
+ devlink_unregister(devlink);
++#endif
+ if (mlx5_unload_one(dev, priv)) {
+ dev_err(&dev->pdev->dev, "mlx5_unload_one failed\n");
+ mlx5_health_cleanup(dev);
+@@ -1408,7 +1444,11 @@ static void remove_one(struct pci_dev *pdev)
+ mlx5_health_cleanup(dev);
+ mlx5_pci_close(dev, priv);
+ pci_set_drvdata(pdev, NULL);
++#ifdef HAVE_NET_DEVLINK_H
+ devlink_free(devlink);
++#else
++ kfree(dev);
++#endif
+ }
+
+ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
+@@ -202,6 +202,7 @@ int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
+ }
+ EXPORT_SYMBOL_GPL(mlx5_query_port_proto_oper);
+
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable,
+ u32 proto_admin, int proto_mask)
+ {
+@@ -230,6 +231,27 @@ int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable,
+ sizeof(out), MLX5_REG_PTYS, 0, 1);
+ }
+ EXPORT_SYMBOL_GPL(mlx5_set_port_ptys);
++#else
++int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
++ int proto_mask)
++{
++ u32 in[MLX5_ST_SZ_DW(ptys_reg)];
++ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
++
++ memset(in, 0, sizeof(in));
++
++ MLX5_SET(ptys_reg, in, local_port, 1);
++ MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
++ if (proto_mask == MLX5_PTYS_EN)
++ MLX5_SET(ptys_reg, in, eth_proto_admin, proto_admin);
++ else
++ MLX5_SET(ptys_reg, in, ib_proto_admin, proto_admin);
++
++ return mlx5_core_access_reg(dev, in, sizeof(in), out,
++ sizeof(out), MLX5_REG_PTYS, 0, 1);
++}
++EXPORT_SYMBOL_GPL(mlx5_set_port_proto);
++#endif
+
+ /* This function should be used after setting a port register only */
+ void mlx5_toggle_port_link(struct mlx5_core_dev *dev)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+@@ -33,9 +33,11 @@
+ #include <linux/pci.h>
+ #include <linux/mlx5/driver.h>
+ #include "mlx5_core.h"
++#ifdef CONFIG_NET_SWITCHDEV
+ #ifdef CONFIG_MLX5_CORE_EN
+ #include "eswitch.h"
+ #endif
++#endif
+
+ static void enable_vfs(struct mlx5_core_dev *dev, int num_vfs)
+ {
+@@ -147,9 +149,11 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
+ mlx5_core_cleanup_vfs(dev);
+
+ if (!num_vfs) {
++#ifdef CONFIG_NET_SWITCHDEV
+ #ifdef CONFIG_MLX5_CORE_EN
+ mlx5_eswitch_disable_sriov(dev->priv.eswitch);
+ #endif
++#endif
+ kfree(sriov->vfs_ctx);
+ sriov->vfs_ctx = NULL;
+ if (!pci_vfs_assigned(pdev))
+@@ -166,9 +170,11 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
+ }
+
+ mlx5_core_init_vfs(dev, num_vfs);
++#ifdef CONFIG_NET_SWITCHDEV
+ #ifdef CONFIG_MLX5_CORE_EN
+ mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY);
+ #endif
++#endif
+
+ return num_vfs;
+ }
+@@ -207,11 +213,13 @@ int mlx5_sriov_init(struct mlx5_core_dev *dev)
+ sriov->enabled_vfs = cur_vfs;
+
+ mlx5_core_init_vfs(dev, cur_vfs);
++#ifdef CONFIG_NET_SWITCHDEV
+ #ifdef CONFIG_MLX5_CORE_EN
+ if (cur_vfs)
+ mlx5_eswitch_enable_sriov(dev->priv.eswitch, cur_vfs,
+ SRIOV_LEGACY);
+ #endif
++#endif
+
+ enable_vfs(dev, cur_vfs);
+
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -666,8 +666,13 @@ struct mlx5_cmd_work_ent {
+ int page_queue;
+ u8 status;
+ u8 token;
++#ifdef HAVE_KTIME_GET_NS
+ u64 ts1;
+ u64 ts2;
++#else
++ struct timespec ts1;
++ struct timespec ts2;
++#endif
+ u16 op;
+ };
+
+diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
+index xxxxxxx..xxxxxxx xxxxxx
+--- a/include/linux/mlx5/port.h
++++ b/include/linux/mlx5/port.h
+@@ -73,8 +73,13 @@ int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
+ int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
+ u8 *proto_oper, int proto_mask,
+ u8 local_port);
++#ifdef HAVE_ETHTOOL_xLINKSETTINGS
+ int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable,
+ u32 proto_admin, int proto_mask);
++#else
++int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
++ int proto_mask);
++#endif
+ void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
+ int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status status);