]> git.openfabrics.org - compat-rdma/compat-rdma.git/commitdiff
Added RHEL7.4 support for cxgb4 and iw_cxgb4
authorArjun Vynipadath <arjun@chelsio.com>
Tue, 28 Aug 2018 06:34:48 +0000 (12:04 +0530)
committerArjun Vynipadath <arjun@chelsio.com>
Tue, 28 Aug 2018 07:13:42 +0000 (12:43 +0530)
patches/0024-BACKPORT-RHEL7.4-iw_cxgb4.patch [new file with mode: 0644]
patches/0025-BACKPORT-RHEL7.4-cxgb4.patch [new file with mode: 0644]

diff --git a/patches/0024-BACKPORT-RHEL7.4-iw_cxgb4.patch b/patches/0024-BACKPORT-RHEL7.4-iw_cxgb4.patch
new file mode 100644 (file)
index 0000000..c973efd
--- /dev/null
@@ -0,0 +1,383 @@
+From ba20bcb90a159b2a3a8bdab6b43436b6ac086bbc Mon Sep 17 00:00:00 2001
+From: Arjun Vynipadath <arjun@chelsio.com>
+Date: Fri, 10 Aug 2018 16:30:14 +0530
+Subject: [PATCH 2/4] iw_cxgb4: Compilation fixes for RHEL7.4
+
+- Fix compilation issues due to skb_put*()/skb_push() return
+  pointers
+- Check if kref_read() exists.
+---
+ drivers/infiniband/hw/cxgb4/cm.c                  | 41 ++++++++++++++++++-----
+ drivers/infiniband/hw/cxgb4/cq.c                  | 10 ++++++
+ drivers/infiniband/hw/cxgb4/iw_cxgb4.h            | 27 +++++++++++++++
+ drivers/infiniband/hw/cxgb4/mem.c                 | 10 ++++++
+ drivers/infiniband/hw/cxgb4/qp.c                  | 11 ++++--
+ drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h | 25 ++++++++++++++
+ 6 files changed, 113 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
+index d5fb5af..a924b05 100644
+--- a/drivers/infiniband/hw/cxgb4/cm.c
++++ b/drivers/infiniband/hw/cxgb4/cm.c
+@@ -607,7 +607,7 @@ static int send_flowc(struct c4iw_ep *ep)
+       else
+               nparams = 9;
+-      flowc = __skb_put(skb, FLOWC_LEN);
++      flowc = (struct fw_flowc_wr *)__skb_put(skb, FLOWC_LEN);
+       flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
+                                          FW_FLOWC_WR_NPARAMS_V(nparams));
+@@ -797,16 +797,16 @@ static int send_connect(struct c4iw_ep *ep)
+       if (ep->com.remote_addr.ss_family == AF_INET) {
+               switch (CHELSIO_CHIP_VERSION(adapter_type)) {
+               case CHELSIO_T4:
+-                      req = skb_put(skb, wrlen);
++                      req = (struct cpl_act_open_req *)skb_put(skb, wrlen);
+                       INIT_TP_WR(req, 0);
+                       break;
+               case CHELSIO_T5:
+-                      t5req = skb_put(skb, wrlen);
++                      t5req = (struct cpl_t5_act_open_req *)skb_put(skb, wrlen);
+                       INIT_TP_WR(t5req, 0);
+                       req = (struct cpl_act_open_req *)t5req;
+                       break;
+               case CHELSIO_T6:
+-                      t6req = skb_put(skb, wrlen);
++                      t6req = (struct cpl_t6_act_open_req *)skb_put(skb, wrlen);
+                       INIT_TP_WR(t6req, 0);
+                       req = (struct cpl_act_open_req *)t6req;
+                       t5req = (struct cpl_t5_act_open_req *)t6req;
+@@ -847,16 +847,16 @@ static int send_connect(struct c4iw_ep *ep)
+       } else {
+               switch (CHELSIO_CHIP_VERSION(adapter_type)) {
+               case CHELSIO_T4:
+-                      req6 = skb_put(skb, wrlen);
++                      req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen);
+                       INIT_TP_WR(req6, 0);
+                       break;
+               case CHELSIO_T5:
+-                      t5req6 = skb_put(skb, wrlen);
++                      t5req6 = (struct cpl_t5_act_open_req6 *)skb_put(skb, wrlen);
+                       INIT_TP_WR(t5req6, 0);
+                       req6 = (struct cpl_act_open_req6 *)t5req6;
+                       break;
+               case CHELSIO_T6:
+-                      t6req6 = skb_put(skb, wrlen);
++                      t6req6 = (struct cpl_t6_act_open_req6 *)skb_put(skb, wrlen);
+                       INIT_TP_WR(t6req6, 0);
+                       req6 = (struct cpl_act_open_req6 *)t6req6;
+                       t5req6 = (struct cpl_t5_act_open_req6 *)t6req6;
+@@ -931,7 +931,12 @@ static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
+       }
+       set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
++#ifdef HAVE_SKB_PUT_ZERO
+       req = skb_put_zero(skb, wrlen);
++#else
++      req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
++      memset(req, 0, wrlen);
++#endif
+       req->op_to_immdlen = cpu_to_be32(
+               FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
+               FW_WR_COMPL_F |
+@@ -1036,7 +1041,12 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
+       }
+       set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
++#ifdef HAVE_SKB_PUT_ZERO
+       req = skb_put_zero(skb, wrlen);
++#else
++      req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
++      memset(req, 0, wrlen);
++#endif
+       req->op_to_immdlen = cpu_to_be32(
+               FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
+               FW_WR_COMPL_F |
+@@ -1115,7 +1125,12 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
+       }
+       set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
++#ifdef HAVE_SKB_PUT_ZERO
+       req = skb_put_zero(skb, wrlen);
++#else
++      req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
++      memset(req, 0, wrlen);
++#endif
+       req->op_to_immdlen = cpu_to_be32(
+               FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
+               FW_WR_COMPL_F |
+@@ -1903,7 +1918,12 @@ static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
+       int win;
+       skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
++#ifdef HAVE_SKB_PUT_ZERO
+       req = __skb_put_zero(skb, sizeof(*req));
++#else
++      req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req));
++      memset(req, 0, sizeof(*req));
++#endif
+       req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR));
+       req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
+       req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
+@@ -3770,7 +3790,7 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
+       tcp_parse_options(skb, &tmp_opt, 0, NULL);
+ #endif
+-      req = __skb_push(skb, sizeof(*req));
++      req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req));
+       memset(req, 0, sizeof(*req));
+       req->l2info = cpu_to_be16(SYN_INTF_V(intf) |
+                        SYN_MAC_IDX_V(RX_MACIDX_G(
+@@ -3822,7 +3842,12 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
+       req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
+       if (!req_skb)
+               return;
++#ifdef HAVE_SKB_PUT_ZERO
+       req = __skb_put_zero(req_skb, sizeof(*req));
++#else
++      req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req));
++      memset(req, 0, sizeof(*req));
++#endif
+       req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F);
+       req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
+       req->le.version_cpl = htonl(FW_OFLD_CONNECTION_WR_CPL_F);
+diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
+index 2be2e1a..e55d1ef 100644
+--- a/drivers/infiniband/hw/cxgb4/cq.c
++++ b/drivers/infiniband/hw/cxgb4/cq.c
+@@ -44,7 +44,12 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
+       wr_len = sizeof *res_wr + sizeof *res;
+       set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
++#ifdef HAVE_SKB_PUT_ZERO
+       res_wr = __skb_put_zero(skb, wr_len);
++#else
++      res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
++      memset(res_wr, 0, wr_len);
++#endif
+       res_wr->op_nres = cpu_to_be32(
+                       FW_WR_OP_V(FW_RI_RES_WR) |
+                       FW_RI_RES_WR_NRES_V(1) |
+@@ -110,7 +115,12 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
+       }
+       set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
++#ifdef HAVE_SKB_PUT_ZERO
+       res_wr = __skb_put_zero(skb, wr_len);
++#else
++      res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
++      memset(res_wr, 0, wr_len);
++#endif
+       res_wr->op_nres = cpu_to_be32(
+                       FW_WR_OP_V(FW_RI_RES_WR) |
+                       FW_RI_RES_WR_NRES_V(1) |
+diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+index 1cb8760..884bc53 100644
+--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
++++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+@@ -213,17 +213,29 @@ void _c4iw_free_wr_wait(struct kref *kref);
+ static inline void c4iw_put_wr_wait(struct c4iw_wr_wait *wr_waitp)
+ {
++#ifdef HAVE_KREF_READ
+       pr_debug("wr_wait %p ref before put %u\n", wr_waitp,
+                kref_read(&wr_waitp->kref));
+       WARN_ON(kref_read(&wr_waitp->kref) == 0);
++#else
++      pr_debug("wr_wait %p ref before put %u\n", wr_waitp,
++               atomic_read(&wr_waitp->kref.refcount));
++      WARN_ON(atomic_read(&wr_waitp->kref.refcount) == 0);
++#endif
+       kref_put(&wr_waitp->kref, _c4iw_free_wr_wait);
+ }
+ static inline void c4iw_get_wr_wait(struct c4iw_wr_wait *wr_waitp)
+ {
++#ifdef HAVE_KREF_READ
+       pr_debug("wr_wait %p ref before get %u\n", wr_waitp,
+                kref_read(&wr_waitp->kref));
+       WARN_ON(kref_read(&wr_waitp->kref) == 0);
++#else
++      pr_debug("wr_wait %p ref before get %u\n", wr_waitp,
++               atomic_read(&wr_waitp->kref.refcount));
++      WARN_ON(atomic_read(&wr_waitp->kref.refcount) == 0);
++#endif
+       kref_get(&wr_waitp->kref);
+ }
+@@ -737,6 +749,7 @@ enum c4iw_mmid_state {
+ #define MPA_V2_RDMA_READ_RTR            0x4000
+ #define MPA_V2_IRD_ORD_MASK             0x3FFF
++#ifdef HAVE_KREF_READ
+ #define c4iw_put_ep(ep) {                                             \
+       pr_debug("put_ep ep %p refcnt %d\n",            \
+                ep, kref_read(&((ep)->kref)));                         \
+@@ -749,6 +762,20 @@ enum c4iw_mmid_state {
+                ep, kref_read(&((ep)->kref)));                         \
+       kref_get(&((ep)->kref));                                        \
+ }
++#else
++#define c4iw_put_ep(ep) {                                             \
++      pr_debug("put_ep ep %p refcnt %d\n",            \
++               ep, atomic_read(&((ep)->kref.refcount)));                              \
++      WARN_ON(atomic_read(&((ep)->kref.refcount)) < 1);                               \
++      kref_put(&((ep)->kref), _c4iw_free_ep);                         \
++}
++
++#define c4iw_get_ep(ep) {                                             \
++      pr_debug("get_ep ep %p, refcnt %d\n",           \
++               ep, atomic_read(&((ep)->kref.refcount)));                              \
++      kref_get(&((ep)->kref));                                        \
++}
++#endif
+ void _c4iw_free_ep(struct kref *kref);
+ struct mpa_message {
+diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
+index 1445918..6874cf2 100644
+--- a/drivers/infiniband/hw/cxgb4/mem.c
++++ b/drivers/infiniband/hw/cxgb4/mem.c
+@@ -81,7 +81,12 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
+       }
+       set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
++#ifdef HAVE_SKB_PUT_ZERO
+       req = __skb_put_zero(skb, wr_len);
++#else
++      req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
++      memset(req, 0, wr_len);
++#endif
+       INIT_ULPTX_WR(req, wr_len, 0, 0);
+       req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
+                       (wr_waitp ? FW_WR_COMPL_F : 0));
+@@ -140,7 +145,12 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
+               }
+               set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
++#ifdef HAVE_SKB_PUT_ZERO
+               req = __skb_put_zero(skb, wr_len);
++#else
++              req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
++              memset(req, 0, wr_len);
++#endif
+               INIT_ULPTX_WR(req, wr_len, 0, 0);
+               if (i == (num_wqe-1)) {
+diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
+index 30bdab6..f3f13fc 100644
+--- a/drivers/infiniband/hw/cxgb4/qp.c
++++ b/drivers/infiniband/hw/cxgb4/qp.c
+@@ -293,7 +293,12 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
+       }
+       set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
++#ifdef HAVE_SKB_PUT_ZERO
+       res_wr = __skb_put_zero(skb, wr_len);
++#else
++      res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
++      memset(res_wr, 0, wr_len);
++#endif
+       res_wr->op_nres = cpu_to_be32(
+                       FW_WR_OP_V(FW_RI_RES_WR) |
+                       FW_RI_RES_WR_NRES_V(2) |
+@@ -1297,7 +1302,7 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
+       set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
+-      wqe = __skb_put(skb, sizeof(*wqe));
++      wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
+       memset(wqe, 0, sizeof *wqe);
+       wqe->op_compl = cpu_to_be32(FW_WR_OP_V(FW_RI_INIT_WR));
+       wqe->flowid_len16 = cpu_to_be32(
+@@ -1421,7 +1426,7 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
+       set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
+-      wqe = __skb_put(skb, sizeof(*wqe));
++      wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
+       memset(wqe, 0, sizeof *wqe);
+       wqe->op_compl = cpu_to_be32(
+               FW_WR_OP_V(FW_RI_INIT_WR) |
+@@ -1487,7 +1492,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
+       }
+       set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
+-      wqe = __skb_put(skb, sizeof(*wqe));
++      wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
+       memset(wqe, 0, sizeof *wqe);
+       wqe->op_compl = cpu_to_be32(
+               FW_WR_OP_V(FW_RI_INIT_WR) |
+diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h
+index 240ba9d..bff8853 100644
+--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h
++++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h
+@@ -90,7 +90,12 @@ cxgb_mk_tid_release(struct sk_buff *skb, u32 len, u32 tid, u16 chan)
+ {
+       struct cpl_tid_release *req;
++#ifdef HAVE_SKB_PUT_ZERO
+       req = __skb_put_zero(skb, len);
++#else
++      req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
++      memset(req, 0, len);
++#endif
+       INIT_TP_WR(req, tid);
+       OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
+@@ -103,7 +108,12 @@ cxgb_mk_close_con_req(struct sk_buff *skb, u32 len, u32 tid, u16 chan,
+ {
+       struct cpl_close_con_req *req;
++#ifdef HAVE_SKB_PUT_ZERO
+       req = __skb_put_zero(skb, len);
++#else
++      req = (struct cpl_close_con_req *)__skb_put(skb, sizeof(*req));
++      memset(req, 0, len);
++#endif
+       INIT_TP_WR(req, tid);
+       OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
+@@ -117,7 +127,12 @@ cxgb_mk_abort_req(struct sk_buff *skb, u32 len, u32 tid, u16 chan,
+ {
+       struct cpl_abort_req *req;
++#ifdef HAVE_SKB_PUT_ZERO
+       req = __skb_put_zero(skb, len);
++#else
++      req = (struct cpl_abort_req *)__skb_put(skb, sizeof(*req));
++      memset(req, 0, len);
++#endif
+       INIT_TP_WR(req, tid);
+       OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
+@@ -131,7 +146,12 @@ cxgb_mk_abort_rpl(struct sk_buff *skb, u32 len, u32 tid, u16 chan)
+ {
+       struct cpl_abort_rpl *rpl;
++#ifdef HAVE_SKB_PUT_ZERO
+       rpl = __skb_put_zero(skb, len);
++#else
++      rpl = (struct cpl_abort_rpl *)__skb_put(skb, sizeof(*rpl));
++      memset(rpl, 0, len);
++#endif
+       INIT_TP_WR(rpl, tid);
+       OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
+@@ -145,7 +165,12 @@ cxgb_mk_rx_data_ack(struct sk_buff *skb, u32 len, u32 tid, u16 chan,
+ {
+       struct cpl_rx_data_ack *req;
++#ifdef HAVE_SKB_PUT_ZERO
+       req = __skb_put_zero(skb, len);
++#else
++      req = (struct cpl_rx_data_ack *)__skb_put(skb, sizeof(*req));
++      memset(req, 0, len);
++#endif
+       INIT_TP_WR(req, tid);
+       OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, tid));
+-- 
+2.9.5
+
diff --git a/patches/0025-BACKPORT-RHEL7.4-cxgb4.patch b/patches/0025-BACKPORT-RHEL7.4-cxgb4.patch
new file mode 100644 (file)
index 0000000..87a0c97
--- /dev/null
@@ -0,0 +1,355 @@
+From 30fa12d380c4fd8c4551c12d80654144cdd1a9a8 Mon Sep 17 00:00:00 2001
+From: Arjun Vynipadath <arjun@chelsio.com>
+Date: Fri, 10 Aug 2018 17:24:41 +0530
+Subject: [PATCH 4/4] cxgb4: Compilation fixes for RHEL7.4
+
+- Check if net_device has {min/max}_mtu
+- Fix compilation issues due to skb_put*()/skb_push() return
+  pointers.
+- Dont compile tc_u32 and tc_flower for RHEL7.4. This will
+  require too much conditions and is not guarenteed to work
+- Check if pcie_enable_relaxed_ordering() is present.
+- Check if ethtool ops have {get/set}_fec_params().
+---
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4.h         |  8 ++++
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c |  4 ++
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c  |  7 ++-
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c    | 50 +++++++++++++++++++---
+ .../net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c   |  3 ++
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c  |  3 ++
+ drivers/net/ethernet/chelsio/cxgb4/l2t.c           |  2 +-
+ 7 files changed, 70 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+index 63b2861..7b933a0 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+@@ -56,6 +56,7 @@
+ #ifdef HAVE_LINUX_RHASHTABLE_H
+ #include <linux/rhashtable.h>
+ #endif
++#include <linux/version.h>
+ #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
+ extern struct list_head adapter_list;
+@@ -68,6 +69,13 @@ extern struct mutex uld_mutex;
+ #define ETHTXQ_STOP_THRES \
+       (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
++#if defined(RHEL_RELEASE_CODE)
++ #if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) && \
++      (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)))
++  #define RHEL_RELEASE_7_4
++ #endif
++#endif
++
+ enum {
+       MAX_NPORTS      = 4,     /* max # of ports */
+       SERNUM_LEN      = 24,    /* Serial # length */
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+index 51e43b8..8828a56 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+@@ -842,6 +842,7 @@ static int set_link_ksettings(struct net_device *dev,
+       return ret;
+ }
++#ifdef HAVE_GET_SET_FEC
+ /* Translate the Firmware FEC value into the ethtool value. */
+ static inline unsigned int fwcap_to_eth_fec(unsigned int fw_fec)
+ {
+@@ -939,6 +940,7 @@ static int set_fecparam(struct net_device *dev, struct ethtool_fecparam *fec)
+               *lc = old_lc;
+       return ret;
+ }
++#endif
+ static void get_pauseparam(struct net_device *dev,
+                          struct ethtool_pauseparam *epause)
+@@ -1510,8 +1512,10 @@ static int cxgb4_get_module_eeprom(struct net_device *dev,
+ static const struct ethtool_ops cxgb_ethtool_ops = {
+       .get_link_ksettings = get_link_ksettings,
+       .set_link_ksettings = set_link_ksettings,
++#ifdef HAVE_GET_SET_FEC
+       .get_fecparam      = get_fecparam,
+       .set_fecparam      = set_fecparam,
++#endif
+       .get_drvinfo       = get_drvinfo,
+       .get_msglevel      = get_msglevel,
+       .set_msglevel      = set_msglevel,
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+index b76447b..05b4385 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+@@ -515,7 +515,7 @@ static int del_filter_wr(struct adapter *adapter, int fidx)
+       if (!skb)
+               return -ENOMEM;
+-      fwr = __skb_put(skb, len);
++      fwr = (struct fw_filter_wr *)__skb_put(skb, len);
+       t4_mk_filtdelwr(f->tid, fwr, (adapter->flags & SHUTTING_DOWN) ? -1
+                       : adapter->sge.fw_evtq.abs_id);
+@@ -572,7 +572,12 @@ int set_filter_wr(struct adapter *adapter, int fidx)
+               }
+       }
++#ifdef HAVE_SKB_PUT_ZERO
+       fwr = __skb_put_zero(skb, sizeof(*fwr));
++#else
++      fwr = (struct fw_filter2_wr *)__skb_put(skb, sizeof(*fwr));
++      memset(fwr, 0, sizeof(*fwr));
++#endif
+       /* It would be nice to put most of the following in t4_hw.c but most
+        * of the work is translating the cxgbtool ch_filter_specification
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+index 46dda4f..3722f68 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+@@ -1223,7 +1223,7 @@ static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
+       struct cpl_tid_release *req;
+       set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
+-      req = __skb_put(skb, sizeof(*req));
++      req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
+       INIT_TP_WR(req, tid);
+       OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
+ }
+@@ -1407,7 +1407,7 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
+               return -ENOMEM;
+       adap = netdev2adap(dev);
+-      req = __skb_put(skb, sizeof(*req));
++      req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
+       INIT_TP_WR(req, 0);
+       OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
+       req->local_port = sport;
+@@ -1448,7 +1448,7 @@ int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
+               return -ENOMEM;
+       adap = netdev2adap(dev);
+-      req = __skb_put(skb, sizeof(*req));
++      req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
+       INIT_TP_WR(req, 0);
+       OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
+       req->local_port = sport;
+@@ -1480,7 +1480,7 @@ int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
+       if (!skb)
+               return -ENOMEM;
+-      req = __skb_put(skb, sizeof(*req));
++      req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
+       INIT_TP_WR(req, 0);
+       OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
+       req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
+@@ -1856,7 +1856,11 @@ static void check_neigh_update(struct neighbour *neigh)
+       const struct device *parent;
+       const struct net_device *netdev = neigh->dev;
++#ifdef HAVE_IS_VLAN_DEV_CONST
+       if (is_vlan_dev(netdev))
++#else
++      if (netdev->priv_flags & IFF_802_1Q_VLAN)
++#endif
+               netdev = vlan_dev_real_dev(netdev);
+       parent = netdev->dev.parent;
+       if (parent && parent->driver == &cxgb4_driver.driver)
+@@ -2442,7 +2446,11 @@ int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
+ }
+ EXPORT_SYMBOL(cxgb4_remove_server_filter);
++#ifdef HAVE_NDO_GET_STATS64_RET_VOID
+ static void cxgb_get_stats(struct net_device *dev,
++#else
++static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
++#endif
+                          struct rtnl_link_stats64 *ns)
+ {
+       struct port_stats stats;
+@@ -2456,7 +2464,11 @@ static void cxgb_get_stats(struct net_device *dev,
+       spin_lock(&adapter->stats_lock);
+       if (!netif_device_present(dev)) {
+               spin_unlock(&adapter->stats_lock);
++#ifdef HAVE_NDO_GET_STATS64_RET_VOID
+               return;
++#else
++              return ns;
++#endif
+       }
+       t4_get_port_stats_offset(adapter, p->tx_chan, &stats,
+                                &p->stats_base);
+@@ -2490,6 +2502,10 @@ static void cxgb_get_stats(struct net_device *dev,
+       ns->tx_errors = stats.tx_error_frames;
+       ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
+               ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
++
++#ifndef HAVE_NDO_GET_STATS64_RET_VOID
++      return ns;
++#endif
+ }
+ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+@@ -2618,6 +2634,11 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
+       int ret;
+       struct port_info *pi = netdev_priv(dev);
++#ifndef HAVE_NET_DEVICE_MIN_MAX_MTU
++      /* accommodate SACK */
++      if (new_mtu < 81)
++              return -EINVAL;
++#endif
+       ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1,
+                           -1, -1, -1, true);
+       if (!ret)
+@@ -2948,6 +2969,7 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
+       return err;
+ }
++#ifndef RHEL_RELEASE_7_4
+ static int cxgb_setup_tc_flower(struct net_device *dev,
+                               struct tc_cls_flower_offload *cls_flower)
+ {
+@@ -3062,6 +3084,7 @@ static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
+       }
+ }
+ #endif
++#endif /* RHEL_RELEASE_7_4 */
+ static void cxgb_del_udp_tunnel(struct net_device *netdev,
+                               struct udp_tunnel_info *ti)
+@@ -3269,11 +3292,13 @@ static const struct net_device_ops cxgb4_netdev_ops = {
+ #else
+       .ndo_set_tx_maxrate = cxgb_set_tx_maxrate,
+ #endif
++#ifndef RHEL_RELEASE_7_4
+ #ifdef HAVE_NDO_SETUP_TC_RH_EXTENDED
+       .extended.ndo_setup_tc_rh       = cxgb_setup_tc,
+ #else
+       .ndo_setup_tc         = cxgb_setup_tc,
+ #endif
++#endif
+ #ifdef HAVE_NDO_UDP_TUNNEL_ADD_EXTENDED
+       .extended.ndo_udp_tunnel_add   = cxgb_add_udp_tunnel,
+       .extended.ndo_udp_tunnel_del   = cxgb_del_udp_tunnel,
+@@ -5274,8 +5299,10 @@ static void free_some_resources(struct adapter *adapter)
+       kvfree(adapter->srq);
+       t4_cleanup_sched(adapter);
+       kvfree(adapter->tids.tid_tab);
++#ifndef RHEL_RELEASE_7_4
+       cxgb4_cleanup_tc_flower(adapter);
+       cxgb4_cleanup_tc_u32(adapter);
++#endif
+       kfree(adapter->sge.egr_map);
+       kfree(adapter->sge.ingr_map);
+       kfree(adapter->sge.starving_fl);
+@@ -5509,6 +5536,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+       u32 whoami, pl_rev;
+       enum chip_type chip;
+       static int adap_idx = 1;
++#ifndef HAVE_PCIE_RELAXED_ORDERING_ENABLED
++      u16 v;
++#endif
+       printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
+@@ -5621,7 +5651,12 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+        * PCIe configuration space to see if it's flagged with advice against
+        * using Relaxed Ordering.
+        */
++#ifdef HAVE_PCIE_RELAXED_ORDERING_ENABLED
+       if (!pcie_relaxed_ordering_enabled(pdev))
++#else
++      pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &v);
++      if (v & PCI_EXP_DEVCTL_RELAX_EN)
++#endif
+               adapter->flags |= ROOT_NO_RELAXED_ORDERING;
+       spin_lock_init(&adapter->stats_lock);
+@@ -5715,6 +5750,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+               netdev->priv_flags |= IFF_UNICAST_FLT;
+               /* MTU range: 81 - 9600 */
++#ifdef HAVE_NET_DEVICE_MIN_MAX_MTU
+ #ifdef HAVE_NDO_CHANGE_MTU_EXTENDED
+               netdev->extended->min_mtu = 81;
+               netdev->extended->max_mtu = MAX_MTU;
+@@ -5722,6 +5758,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+               netdev->min_mtu = 81;              /* accommodate SACK */
+               netdev->max_mtu = MAX_MTU;
+ #endif
++#endif
+               netdev->netdev_ops = &cxgb4_netdev_ops;
+ #ifdef CONFIG_CHELSIO_T4_DCB
+@@ -5811,7 +5848,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+               dev_warn(&pdev->dev, "could not allocate TID table, "
+                        "continuing\n");
+               adapter->params.offload = 0;
+-      } else {
++      }
++#ifndef RHEL_RELEASE_7_4
++      else {
+               adapter->tc_u32 = cxgb4_init_tc_u32(adapter);
+               if (!adapter->tc_u32)
+                       dev_warn(&pdev->dev,
+@@ -5821,6 +5860,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+                       dev_warn(&pdev->dev,
+                                "could not offload tc flower, continuing\n");
+       }
++#endif
+       if (is_offload(adapter) || is_hashfilter(adapter)) {
+               if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+index 45705a5..4b0ab63 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+@@ -41,6 +41,8 @@
+ #include "cxgb4_filter.h"
+ #include "cxgb4_tc_flower.h"
++#ifndef RHEL_RELEASE_7_4
++
+ #define STATS_CHECK_PERIOD (HZ / 2)
+ static struct ch_tc_pedit_fields pedits[] = {
+@@ -912,3 +914,4 @@ void cxgb4_cleanup_tc_flower(struct adapter *adap)
+       cancel_work_sync(&adap->flower_stats_work);
+       rhashtable_destroy(&adap->flower_tbl);
+ }
++#endif /* RHEL_RELEASE_7_4 */
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+index 973881b..326fc95 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+@@ -39,6 +39,8 @@
+ #include "cxgb4_tc_u32_parse.h"
+ #include "cxgb4_tc_u32.h"
++#ifndef RHEL_RELEASE_7_4
++
+ /* Fill ch_filter_specification with parsed match value/mask pair. */
+ static int fill_match_fields(struct adapter *adap,
+                            struct ch_filter_specification *fs,
+@@ -491,3 +493,4 @@ out_no_mem:
+       return NULL;
+ }
++#endif /* RHEL_RELEASE_7_4 */
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+index 1817a03..abfc856 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+@@ -146,7 +146,7 @@ static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
+       if (!skb)
+               return -ENOMEM;
+-      req = __skb_put(skb, sizeof(*req));
++      req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
+       INIT_TP_WR(req, 0);
+       OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ,
+-- 
+2.9.5
+