]> git.openfabrics.org - compat-rdma/compat-rdma.git/commitdiff
qed*: Fixes and features for qed that have been commited prior 4.17
authorMichal Kalderon <michal.kalderon@cavium.com>
Sun, 22 Jul 2018 18:23:45 +0000 (21:23 +0300)
committerMichal Kalderon <michal.kalderon@cavium.com>
Wed, 1 Aug 2018 14:09:28 +0000 (17:09 +0300)
Signed-off-by: Michal Kalderon <michal.kalderon@cavium.com>
37 files changed:
linux-next-cherry-picks/0001-qed-use-trust-mode-to-allow-VF-to-override-forced-MA.patch [new file with mode: 0644]
linux-next-cherry-picks/0002-qed-Add-new-TLV-to-request-PF-to-update-MAC-in-bulle.patch [new file with mode: 0644]
linux-next-cherry-picks/0003-qed-Delete-unused-parameter-p_ptt-from-mcp-APIs.patch [new file with mode: 0644]
linux-next-cherry-picks/0004-qed-Add-configuration-information-to-register-dump-a.patch [new file with mode: 0644]
linux-next-cherry-picks/0006-RDMA-qedr-fix-spelling-mistake-failes-fails.patch [new file with mode: 0644]
linux-next-cherry-picks/0007-qed-Refactor-mf_mode-to-consist-of-bits.patch [new file with mode: 0644]
linux-next-cherry-picks/0008-qed-Remove-unused-data-member-is_mf_default.patch [new file with mode: 0644]
linux-next-cherry-picks/0009-qed-Add-support-for-multi-function-mode-with-802.1ad.patch [new file with mode: 0644]
linux-next-cherry-picks/0010-qed-Add-support-for-Unified-Fabric-Port.patch [new file with mode: 0644]
linux-next-cherry-picks/0014-qede-Add-build_skb-support.patch [new file with mode: 0644]
linux-next-cherry-picks/0015-qed-Add-MFW-interfaces-for-TLV-request-support.patch [new file with mode: 0644]
linux-next-cherry-picks/0016-qed-Add-support-for-tlv-request-processing.patch [new file with mode: 0644]
linux-next-cherry-picks/0017-qed-Add-support-for-processing-fcoe-tlv-request.patch [new file with mode: 0644]
linux-next-cherry-picks/0018-qed-Add-support-for-processing-iscsi-tlv-request.patch [new file with mode: 0644]
linux-next-cherry-picks/0019-qed-Add-driver-infrastucture-for-handling-mfw-reques.patch [new file with mode: 0644]
linux-next-cherry-picks/0020-qede-Add-support-for-populating-ethernet-TLVs.patch [new file with mode: 0644]
linux-next-cherry-picks/0021-qede-Refactor-ethtool-rx-classification-flow.patch [new file with mode: 0644]
linux-next-cherry-picks/0022-qede-Validate-unsupported-configurations.patch [new file with mode: 0644]
linux-next-cherry-picks/0023-qed-Support-other-classification-modes.patch [new file with mode: 0644]
linux-next-cherry-picks/0024-qede-Support-flow-classification-to-the-VFs.patch [new file with mode: 0644]
linux-next-cherry-picks/0025-qed-Support-drop-action-classification.patch [new file with mode: 0644]
linux-next-cherry-picks/0026-drivers-net-Fix-various-unnecessary-characters-after.patch [new file with mode: 0644]
linux-next-cherry-picks/0027-RDMA-qedr-fix-spelling-mistake-adrresses-addresses.patch [new file with mode: 0644]
linux-next-cherry-picks/0028-qed-Add-link-change-count-value-to-ethtool-statistic.patch [new file with mode: 0644]
linux-next-cherry-picks/0029-qed-Fix-shared-memory-inconsistency-between-driver-a.patch [new file with mode: 0644]
linux-next-cherry-picks/0030-qed-Fix-use-of-incorrect-shmem-address.patch [new file with mode: 0644]
linux-next-cherry-picks/0031-qed-Add-srq-core-support-for-RoCE-and-iWARP.patch [new file with mode: 0644]
linux-next-cherry-picks/0032-qed-use-dma_zalloc_coherent-instead-of-allocator-mem.patch [new file with mode: 0644]
linux-next-cherry-picks/0033-qed-Utilize-FW-8.37.2.0.patch [new file with mode: 0644]
linux-next-cherry-picks/0034-RDMA-qedr-Fix-NULL-pointer-dereference-when-running-.patch [new file with mode: 0644]
linux-next-cherry-picks/0038-qed-Fix-possible-memory-leak-in-Rx-error-path-handli.patch [new file with mode: 0644]
linux-next-cherry-picks/0039-qed-Add-sanity-check-for-SIMD-fastpath-handler.patch [new file with mode: 0644]
linux-next-cherry-picks/0040-qed-Do-not-advertise-DCBX_LLD_MANAGED-capability.patch [new file with mode: 0644]
linux-next-cherry-picks/0041-qed-Limit-msix-vectors-in-kdump-kernel-to-the-minimu.patch [new file with mode: 0644]
linux-next-cherry-picks/0042-qed-Fix-setting-of-incorrect-eswitch-mode.patch [new file with mode: 0644]
linux-next-cherry-picks/0043-qed-Fix-use-of-incorrect-size-in-memcpy-call.patch [new file with mode: 0644]
linux-next-cherry-picks/0044-qede-Adverstise-software-timestamp-caps-when-PHC-is-.patch [new file with mode: 0644]

diff --git a/linux-next-cherry-picks/0001-qed-use-trust-mode-to-allow-VF-to-override-forced-MA.patch b/linux-next-cherry-picks/0001-qed-use-trust-mode-to-allow-VF-to-override-forced-MA.patch
new file mode 100644 (file)
index 0000000..7b1e364
--- /dev/null
@@ -0,0 +1,352 @@
+From 7425d8220f8d0c2127aec75677f652a26f86bc95 Mon Sep 17 00:00:00 2001
+From: Shahed Shaikh <shahed.shaikh@cavium.com>
+Date: Thu, 19 Apr 2018 05:50:11 -0700
+Subject: [PATCH 01/44] qed* : use trust mode to allow VF to override forced
+ MAC
+
+As per existing behavior, when PF sets a MAC address for a VF
+(also called as forced MAC), VF is not allowed to change its
+MAC address afterwards.
+This puts the limitation on few use cases such as bonding of VFs,
+where bonding driver asks VF to change its MAC address.
+
+This patch uses a VF trust mode to allow VF to change its MAC address
+in spite PF has set a forced MAC for that VF.
+
+Signed-off-by: Shahed Shaikh <shahed.shaikh@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/qlogic/qed/qed_sriov.c    | 210 +++++++++++++++++++++++--
+ drivers/net/ethernet/qlogic/qede/qede_filter.c |   3 +-
+ 2 files changed, 195 insertions(+), 18 deletions(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+index 5acb91b..77376fd 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+@@ -48,7 +48,7 @@ static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
+                              u8 opcode,
+                              __le16 echo,
+                              union event_ring_data *data, u8 fw_return_code);
+-
++static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid);
+ static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf)
+ {
+@@ -1790,7 +1790,8 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
+       if (!p_vf->vport_instance)
+               return -EINVAL;
+-      if (events & BIT(MAC_ADDR_FORCED)) {
++      if ((events & BIT(MAC_ADDR_FORCED)) ||
++          p_vf->p_vf_info.is_trusted_configured) {
+               /* Since there's no way [currently] of removing the MAC,
+                * we can always assume this means we need to force it.
+                */
+@@ -1809,8 +1810,12 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
+                                 "PF failed to configure MAC for VF\n");
+                       return rc;
+               }
+-
+-              p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
++              if (p_vf->p_vf_info.is_trusted_configured)
++                      p_vf->configured_features |=
++                              BIT(VFPF_BULLETIN_MAC_ADDR);
++              else
++                      p_vf->configured_features |=
++                              BIT(MAC_ADDR_FORCED);
+       }
+       if (events & BIT(VLAN_ADDR_FORCED)) {
+@@ -3170,6 +3175,10 @@ static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
+       if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))
+               return 0;
++      /* Don't keep track of shadow copy since we don't intend to restore. */
++      if (p_vf->p_vf_info.is_trusted_configured)
++              return 0;
++
+       /* First remove entries and then add new ones */
+       if (p_params->opcode == QED_FILTER_REMOVE) {
+               for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
+@@ -3244,9 +3253,17 @@ static int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
+       /* No real decision to make; Store the configured MAC */
+       if (params->type == QED_FILTER_MAC ||
+-          params->type == QED_FILTER_MAC_VLAN)
++          params->type == QED_FILTER_MAC_VLAN) {
+               ether_addr_copy(vf->mac, params->mac);
++              if (vf->is_trusted_configured) {
++                      qed_iov_bulletin_set_mac(hwfn, vf->mac, vfid);
++
++                      /* Update and post bulleitin again */
++                      qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
++              }
++      }
++
+       return 0;
+ }
+@@ -4081,16 +4098,60 @@ static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn,
+               return;
+       }
+-      feature = 1 << MAC_ADDR_FORCED;
++      if (vf_info->p_vf_info.is_trusted_configured) {
++              feature = BIT(VFPF_BULLETIN_MAC_ADDR);
++              /* Trust mode will disable Forced MAC */
++              vf_info->bulletin.p_virt->valid_bitmap &=
++                      ~BIT(MAC_ADDR_FORCED);
++      } else {
++              feature = BIT(MAC_ADDR_FORCED);
++              /* Forced MAC will disable MAC_ADDR */
++              vf_info->bulletin.p_virt->valid_bitmap &=
++                      ~BIT(VFPF_BULLETIN_MAC_ADDR);
++      }
++
+       memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
+       vf_info->bulletin.p_virt->valid_bitmap |= feature;
+-      /* Forced MAC will disable MAC_ADDR */
+-      vf_info->bulletin.p_virt->valid_bitmap &= ~BIT(VFPF_BULLETIN_MAC_ADDR);
+       qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
+ }
++static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid)
++{
++      struct qed_vf_info *vf_info;
++      u64 feature;
++
++      vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
++      if (!vf_info) {
++              DP_NOTICE(p_hwfn->cdev, "Can not set MAC, invalid vfid [%d]\n",
++                        vfid);
++              return -EINVAL;
++      }
++
++      if (vf_info->b_malicious) {
++              DP_NOTICE(p_hwfn->cdev, "Can't set MAC to malicious VF [%d]\n",
++                        vfid);
++              return -EINVAL;
++      }
++
++      if (vf_info->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)) {
++              DP_VERBOSE(p_hwfn, QED_MSG_IOV,
++                         "Can not set MAC, Forced MAC is configured\n");
++              return -EINVAL;
++      }
++
++      feature = BIT(VFPF_BULLETIN_MAC_ADDR);
++      ether_addr_copy(vf_info->bulletin.p_virt->mac, mac);
++
++      vf_info->bulletin.p_virt->valid_bitmap |= feature;
++
++      if (vf_info->p_vf_info.is_trusted_configured)
++              qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
++
++      return 0;
++}
++
+ static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
+                                            u16 pvid, int vfid)
+ {
+@@ -4204,6 +4265,21 @@ static int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val)
+       return rc;
+ }
++static u8 *qed_iov_bulletin_get_mac(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
++{
++      struct qed_vf_info *p_vf;
++
++      p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
++      if (!p_vf || !p_vf->bulletin.p_virt)
++              return NULL;
++
++      if (!(p_vf->bulletin.p_virt->valid_bitmap &
++            BIT(VFPF_BULLETIN_MAC_ADDR)))
++              return NULL;
++
++      return p_vf->bulletin.p_virt->mac;
++}
++
+ static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn,
+                                          u16 rel_vf_id)
+ {
+@@ -4493,8 +4569,12 @@ static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid)
+               if (!vf_info)
+                       continue;
+-              /* Set the forced MAC, and schedule the IOV task */
+-              ether_addr_copy(vf_info->forced_mac, mac);
++              /* Set the MAC, and schedule the IOV task */
++              if (vf_info->is_trusted_configured)
++                      ether_addr_copy(vf_info->mac, mac);
++              else
++                      ether_addr_copy(vf_info->forced_mac, mac);
++
+               qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
+       }
+@@ -4802,6 +4882,33 @@ static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
+       qed_ptt_release(hwfn, ptt);
+ }
++static bool qed_pf_validate_req_vf_mac(struct qed_hwfn *hwfn,
++                                     u8 *mac,
++                                     struct qed_public_vf_info *info)
++{
++      if (info->is_trusted_configured) {
++              if (is_valid_ether_addr(info->mac) &&
++                  (!mac || !ether_addr_equal(mac, info->mac)))
++                      return true;
++      } else {
++              if (is_valid_ether_addr(info->forced_mac) &&
++                  (!mac || !ether_addr_equal(mac, info->forced_mac)))
++                      return true;
++      }
++
++      return false;
++}
++
++static void qed_set_bulletin_mac(struct qed_hwfn *hwfn,
++                               struct qed_public_vf_info *info,
++                               int vfid)
++{
++      if (info->is_trusted_configured)
++              qed_iov_bulletin_set_mac(hwfn, info->mac, vfid);
++      else
++              qed_iov_bulletin_set_forced_mac(hwfn, info->forced_mac, vfid);
++}
++
+ static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn)
+ {
+       int i;
+@@ -4816,18 +4923,20 @@ static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn)
+                       continue;
+               /* Update data on bulletin board */
+-              mac = qed_iov_bulletin_get_forced_mac(hwfn, i);
+-              if (is_valid_ether_addr(info->forced_mac) &&
+-                  (!mac || !ether_addr_equal(mac, info->forced_mac))) {
++              if (info->is_trusted_configured)
++                      mac = qed_iov_bulletin_get_mac(hwfn, i);
++              else
++                      mac = qed_iov_bulletin_get_forced_mac(hwfn, i);
++
++              if (qed_pf_validate_req_vf_mac(hwfn, mac, info)) {
+                       DP_VERBOSE(hwfn,
+                                  QED_MSG_IOV,
+                                  "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n",
+                                  i,
+                                  hwfn->cdev->p_iov_info->first_vf_in_pf + i);
+-                      /* Update bulletin board with forced MAC */
+-                      qed_iov_bulletin_set_forced_mac(hwfn,
+-                                                      info->forced_mac, i);
++                      /* Update bulletin board with MAC */
++                      qed_set_bulletin_mac(hwfn, info, i);
+                       update = true;
+               }
+@@ -4867,6 +4976,72 @@ static void qed_handle_bulletin_post(struct qed_hwfn *hwfn)
+       qed_ptt_release(hwfn, ptt);
+ }
++static void qed_update_mac_for_vf_trust_change(struct qed_hwfn *hwfn, int vf_id)
++{
++      struct qed_public_vf_info *vf_info;
++      struct qed_vf_info *vf;
++      u8 *force_mac;
++      int i;
++
++      vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
++      vf = qed_iov_get_vf_info(hwfn, vf_id, true);
++
++      if (!vf_info || !vf)
++              return;
++
++      /* Force MAC converted to generic MAC in case of VF trust on */
++      if (vf_info->is_trusted_configured &&
++          (vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))) {
++              force_mac = qed_iov_bulletin_get_forced_mac(hwfn, vf_id);
++
++              if (force_mac) {
++                      /* Clear existing shadow copy of MAC to have a clean
++                       * slate.
++                       */
++                      for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
++                              if (ether_addr_equal(vf->shadow_config.macs[i],
++                                                   vf_info->mac)) {
++                                      memset(vf->shadow_config.macs[i], 0,
++                                             ETH_ALEN);
++                                      DP_VERBOSE(hwfn, QED_MSG_IOV,
++                                                 "Shadow MAC %pM removed for VF 0x%02x, VF trust mode is ON\n",
++                                                  vf_info->mac, vf_id);
++                                      break;
++                              }
++                      }
++
++                      ether_addr_copy(vf_info->mac, force_mac);
++                      memset(vf_info->forced_mac, 0, ETH_ALEN);
++                      vf->bulletin.p_virt->valid_bitmap &=
++                                      ~BIT(MAC_ADDR_FORCED);
++                      qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
++              }
++      }
++
++      /* Update shadow copy with VF MAC when trust mode is turned off */
++      if (!vf_info->is_trusted_configured) {
++              u8 empty_mac[ETH_ALEN];
++
++              memset(empty_mac, 0, ETH_ALEN);
++              for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
++                      if (ether_addr_equal(vf->shadow_config.macs[i],
++                                           empty_mac)) {
++                              ether_addr_copy(vf->shadow_config.macs[i],
++                                              vf_info->mac);
++                              DP_VERBOSE(hwfn, QED_MSG_IOV,
++                                         "Shadow is updated with %pM for VF 0x%02x, VF trust mode is OFF\n",
++                                          vf_info->mac, vf_id);
++                              break;
++                      }
++              }
++              /* Clear bulletin when trust mode is turned off,
++               * to have a clean slate for next (normal) operations.
++               */
++              qed_iov_bulletin_set_mac(hwfn, empty_mac, vf_id);
++              qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
++      }
++}
++
+ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
+ {
+       struct qed_sp_vport_update_params params;
+@@ -4890,6 +5065,9 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
+                       continue;
+               vf_info->is_trusted_configured = vf_info->is_trusted_request;
++              /* Handle forced MAC mode */
++              qed_update_mac_for_vf_trust_change(hwfn, i);
++
+               /* Validate that the VF has a configured vport */
+               vf = qed_iov_get_vf_info(hwfn, i, true);
+               if (!vf->vport_instance)
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
+index 6687e04..8094f03 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
+@@ -550,8 +550,7 @@ void qede_force_mac(void *dev, u8 *mac, bool forced)
+       __qede_lock(edev);
+-      /* MAC hints take effect only if we haven't set one already */
+-      if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced) {
++      if (!is_valid_ether_addr(mac)) {
+               __qede_unlock(edev);
+               return;
+       }
+-- 
+2.9.5
+
diff --git a/linux-next-cherry-picks/0002-qed-Add-new-TLV-to-request-PF-to-update-MAC-in-bulle.patch b/linux-next-cherry-picks/0002-qed-Add-new-TLV-to-request-PF-to-update-MAC-in-bulle.patch
new file mode 100644 (file)
index 0000000..8069b66
--- /dev/null
@@ -0,0 +1,251 @@
+From 809c45a091d93e05c6e9b5d53bb3f1185273286b Mon Sep 17 00:00:00 2001
+From: Shahed Shaikh <shahed.shaikh@cavium.com>
+Date: Thu, 19 Apr 2018 05:50:12 -0700
+Subject: [PATCH 02/44] qed* : Add new TLV to request PF to update MAC in
+ bulletin board
+
+There may be a need for VF driver to request PF to explicitly update its
+bulletin with a MAC address.
+e.g. When user assigns a MAC address to VF while VF is still down,
+and PF's bulletin board contains different MAC address, in this case,
+when VF's interface is brought up, it gets loaded with MAC address from
+bulletin board which is not desirable.
+
+To handle this corner case, we need a new TLV to request PF to update
+its bulletin board with suggested MAC.
+
+This request will be honored only for trusted VFs.
+
+Signed-off-by: Shahed Shaikh <shahed.shaikh@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/qlogic/qed/qed_l2.c       | 19 +++++++++++++
+ drivers/net/ethernet/qlogic/qed/qed_sriov.c    | 37 ++++++++++++++++++++++++++
+ drivers/net/ethernet/qlogic/qed/qed_vf.c       | 29 ++++++++++++++++++++
+ drivers/net/ethernet/qlogic/qed/qed_vf.h       | 21 +++++++++++++++
+ drivers/net/ethernet/qlogic/qede/qede_filter.c |  4 +++
+ include/linux/qed/qed_eth_if.h                 |  1 +
+ 6 files changed, 111 insertions(+)
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
+index e874504..8b1b7e8 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
+@@ -2850,6 +2850,24 @@ static int qed_fp_cqe_completion(struct qed_dev *dev,
+                                     cqe);
+ }
++static int qed_req_bulletin_update_mac(struct qed_dev *cdev, u8 *mac)
++{
++      int i, ret;
++
++      if (IS_PF(cdev))
++              return 0;
++
++      for_each_hwfn(cdev, i) {
++              struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
++
++              ret = qed_vf_pf_bulletin_update_mac(p_hwfn, mac);
++              if (ret)
++                      return ret;
++      }
++
++      return 0;
++}
++
+ #ifdef CONFIG_QED_SRIOV
+ extern const struct qed_iov_hv_ops qed_iov_ops_pass;
+ #endif
+@@ -2887,6 +2905,7 @@ static const struct qed_eth_ops qed_eth_ops_pass = {
+       .ntuple_filter_config = &qed_ntuple_arfs_filter_config,
+       .configure_arfs_searcher = &qed_configure_arfs_searcher,
+       .get_coalesce = &qed_get_coalesce,
++      .req_bulletin_update_mac = &qed_req_bulletin_update_mac,
+ };
+ const struct qed_eth_ops *qed_get_eth_ops(void)
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+index 77376fd..f01bf52 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+@@ -3820,6 +3820,40 @@ static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
+               __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
+ }
++static int
++qed_iov_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn,
++                                struct qed_ptt *p_ptt,
++                                struct qed_vf_info *p_vf)
++{
++      struct qed_bulletin_content *p_bulletin = p_vf->bulletin.p_virt;
++      struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
++      struct vfpf_bulletin_update_mac_tlv *p_req;
++      u8 status = PFVF_STATUS_SUCCESS;
++      int rc = 0;
++
++      if (!p_vf->p_vf_info.is_trusted_configured) {
++              DP_VERBOSE(p_hwfn,
++                         QED_MSG_IOV,
++                         "Blocking bulletin update request from untrusted VF[%d]\n",
++                         p_vf->abs_vf_id);
++              status = PFVF_STATUS_NOT_SUPPORTED;
++              rc = -EINVAL;
++              goto send_status;
++      }
++
++      p_req = &mbx->req_virt->bulletin_update_mac;
++      ether_addr_copy(p_bulletin->mac, p_req->mac);
++      DP_VERBOSE(p_hwfn, QED_MSG_IOV,
++                 "Updated bulletin of VF[%d] with requested MAC[%pM]\n",
++                 p_vf->abs_vf_id, p_req->mac);
++
++send_status:
++      qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
++                           CHANNEL_TLV_BULLETIN_UPDATE_MAC,
++                           sizeof(struct pfvf_def_resp_tlv), status);
++      return rc;
++}
++
+ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
+                                   struct qed_ptt *p_ptt, int vfid)
+ {
+@@ -3899,6 +3933,9 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
+               case CHANNEL_TLV_COALESCE_READ:
+                       qed_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf);
+                       break;
++              case CHANNEL_TLV_BULLETIN_UPDATE_MAC:
++                      qed_iov_vf_pf_bulletin_update_mac(p_hwfn, p_ptt, p_vf);
++                      break;
+               }
+       } else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
+               DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
+index 91b5e9f..2d7fcd6 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
+@@ -1375,6 +1375,35 @@ int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn,
+ }
+ int
++qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn,
++                            u8 *p_mac)
++{
++      struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
++      struct vfpf_bulletin_update_mac_tlv *p_req;
++      struct pfvf_def_resp_tlv *p_resp;
++      int rc;
++
++      if (!p_mac)
++              return -EINVAL;
++
++      /* clear mailbox and prep header tlv */
++      p_req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_BULLETIN_UPDATE_MAC,
++                             sizeof(*p_req));
++      ether_addr_copy(p_req->mac, p_mac);
++      DP_VERBOSE(p_hwfn, QED_MSG_IOV,
++                 "Requesting bulletin update for MAC[%pM]\n", p_mac);
++
++      /* add list termination tlv */
++      qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END,
++                  sizeof(struct channel_list_end_tlv));
++
++      p_resp = &p_iov->pf2vf_reply->default_resp;
++      rc = qed_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp));
++      qed_vf_pf_req_end(p_hwfn, rc);
++      return rc;
++}
++
++int
+ qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
+                      u16 rx_coal, u16 tx_coal, struct qed_queue_cid *p_cid)
+ {
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
+index 97d44df..4f05d5e 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_vf.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
+@@ -518,6 +518,12 @@ struct pfvf_read_coal_resp_tlv {
+       u8 padding[6];
+ };
++struct vfpf_bulletin_update_mac_tlv {
++      struct vfpf_first_tlv first_tlv;
++      u8 mac[ETH_ALEN];
++      u8 padding[2];
++};
++
+ union vfpf_tlvs {
+       struct vfpf_first_tlv first_tlv;
+       struct vfpf_acquire_tlv acquire;
+@@ -532,6 +538,7 @@ union vfpf_tlvs {
+       struct vfpf_update_tunn_param_tlv tunn_param_update;
+       struct vfpf_update_coalesce update_coalesce;
+       struct vfpf_read_coal_req_tlv read_coal_req;
++      struct vfpf_bulletin_update_mac_tlv bulletin_update_mac;
+       struct tlv_buffer_size tlv_buf_size;
+ };
+@@ -650,6 +657,7 @@ enum {
+       CHANNEL_TLV_COALESCE_UPDATE,
+       CHANNEL_TLV_QID,
+       CHANNEL_TLV_COALESCE_READ,
++      CHANNEL_TLV_BULLETIN_UPDATE_MAC,
+       CHANNEL_TLV_MAX,
+       /* Required for iterating over vport-update tlvs.
+@@ -1042,6 +1050,13 @@ int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
+                                 struct qed_tunnel_info *p_tunn);
+ u32 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id);
++/**
++ * @brief - Ask PF to update the MAC address in it's bulletin board
++ *
++ * @param p_mac - mac address to be updated in bulletin board
++ */
++int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, u8 *p_mac);
++
+ #else
+ static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+                                         struct qed_mcp_link_params *params)
+@@ -1228,6 +1243,12 @@ static inline int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
+       return -EINVAL;
+ }
++static inline int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn,
++                                              u8 *p_mac)
++{
++      return -EINVAL;
++}
++
+ static inline u32
+ qed_vf_hw_bar_size(struct qed_hwfn  *p_hwfn,
+                  enum BAR_ID bar_id)
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
+index 8094f03..43569b1 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
+@@ -1160,6 +1160,10 @@ int qede_set_mac_addr(struct net_device *ndev, void *p)
+       if (edev->state != QEDE_STATE_OPEN) {
+               DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
+                          "The device is currently down\n");
++              /* Ask PF to explicitly update a copy in bulletin board */
++              if (IS_VF(edev) && edev->ops->req_bulletin_update_mac)
++                      edev->ops->req_bulletin_update_mac(edev->cdev,
++                                                         ndev->dev_addr);
+               goto out;
+       }
+diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
+index 147d08c..7f9756f 100644
+--- a/include/linux/qed/qed_eth_if.h
++++ b/include/linux/qed/qed_eth_if.h
+@@ -352,6 +352,7 @@ struct qed_eth_ops {
+       int (*configure_arfs_searcher)(struct qed_dev *cdev,
+                                      enum qed_filter_config_mode mode);
+       int (*get_coalesce)(struct qed_dev *cdev, u16 *coal, void *handle);
++      int (*req_bulletin_update_mac)(struct qed_dev *cdev, u8 *mac);
+ };
+ const struct qed_eth_ops *qed_get_eth_ops(void);
+-- 
+2.9.5
+
diff --git a/linux-next-cherry-picks/0003-qed-Delete-unused-parameter-p_ptt-from-mcp-APIs.patch b/linux-next-cherry-picks/0003-qed-Delete-unused-parameter-p_ptt-from-mcp-APIs.patch
new file mode 100644 (file)
index 0000000..abc3415
--- /dev/null
@@ -0,0 +1,90 @@
+From b60bfdfec5b8ec88552e75c8bd99f1ebfa66a6e0 Mon Sep 17 00:00:00 2001
+From: Denis Bolotin <denis.bolotin@cavium.com>
+Date: Mon, 23 Apr 2018 14:56:04 +0300
+Subject: [PATCH 03/44] qed: Delete unused parameter p_ptt from mcp APIs
+
+Since nvm images attributes are cached during driver load, acquiring ptt
+is not needed when calling qed_mcp_get_nvm_image().
+
+Signed-off-by: Denis Bolotin <denis.bolotin@cavium.com>
+Signed-off-by: Ariel Elior <ariel.elior@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/qlogic/qed/qed_main.c | 9 +--------
+ drivers/net/ethernet/qlogic/qed/qed_mcp.c  | 4 +---
+ drivers/net/ethernet/qlogic/qed/qed_mcp.h  | 2 --
+ 3 files changed, 2 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
+index 9854aa9..d1d3787 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
+@@ -1894,15 +1894,8 @@ static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type,
+                            u8 *buf, u16 len)
+ {
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+-      struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
+-      int rc;
+-
+-      if (!ptt)
+-              return -EAGAIN;
+-      rc = qed_mcp_get_nvm_image(hwfn, ptt, type, buf, len);
+-      qed_ptt_release(hwfn, ptt);
+-      return rc;
++      return qed_mcp_get_nvm_image(hwfn, type, buf, len);
+ }
+ static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+index ec0d425..1377ad1 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+@@ -2531,7 +2531,6 @@ int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
+ static int
+ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
+-                        struct qed_ptt *p_ptt,
+                         enum qed_nvm_images image_id,
+                         struct qed_nvm_image_att *p_image_att)
+ {
+@@ -2569,7 +2568,6 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
+ }
+ int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
+-                        struct qed_ptt *p_ptt,
+                         enum qed_nvm_images image_id,
+                         u8 *p_buffer, u32 buffer_len)
+ {
+@@ -2578,7 +2576,7 @@ int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
+       memset(p_buffer, 0, buffer_len);
+-      rc = qed_mcp_get_nvm_image_att(p_hwfn, p_ptt, image_id, &image_att);
++      rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
+       if (rc)
+               return rc;
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+index 8a5c988..dd62c38 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+@@ -486,7 +486,6 @@ struct qed_nvm_image_att {
+  * @brief Allows reading a whole nvram image
+  *
+  * @param p_hwfn
+- * @param p_ptt
+  * @param image_id - image requested for reading
+  * @param p_buffer - allocated buffer into which to fill data
+  * @param buffer_len - length of the allocated buffer.
+@@ -494,7 +493,6 @@ struct qed_nvm_image_att {
+  * @return 0 iff p_buffer now contains the nvram image.
+  */
+ int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
+-                        struct qed_ptt *p_ptt,
+                         enum qed_nvm_images image_id,
+                         u8 *p_buffer, u32 buffer_len);
+-- 
+2.9.5
+
diff --git a/linux-next-cherry-picks/0004-qed-Add-configuration-information-to-register-dump-a.patch b/linux-next-cherry-picks/0004-qed-Add-configuration-information-to-register-dump-a.patch
new file mode 100644 (file)
index 0000000..730d128
--- /dev/null
@@ -0,0 +1,254 @@
+From 1ac4329a1cff2e0bb12b71c13ad53a0e05bc87a6 Mon Sep 17 00:00:00 2001
+From: Denis Bolotin <denis.bolotin@cavium.com>
+Date: Mon, 23 Apr 2018 14:56:05 +0300
+Subject: [PATCH 04/44] qed: Add configuration information to register dump and
+ debug data
+
+Configuration information is added to the debug data collection, in
+addition to register dump.
+Added qed_dbg_nvm_image() that receives an image type, allocates a
+buffer and reads the image. The images are saved in the buffers and the
+dump size is updated.
+
+Signed-off-by: Denis Bolotin <denis.bolotin@cavium.com>
+Signed-off-by: Ariel Elior <ariel.elior@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/qlogic/qed/qed_debug.c | 113 +++++++++++++++++++++++++++-
+ drivers/net/ethernet/qlogic/qed/qed_mcp.c   |  14 +++-
+ drivers/net/ethernet/qlogic/qed/qed_mcp.h   |  14 ++++
+ include/linux/qed/qed_if.h                  |   3 +
+ 4 files changed, 139 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
+index 4926c55..b3211c7 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
+@@ -7778,6 +7778,57 @@ int qed_dbg_igu_fifo_size(struct qed_dev *cdev)
+       return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO);
+ }
++int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn,
++                           enum qed_nvm_images image_id, u32 *length)
++{
++      struct qed_nvm_image_att image_att;
++      int rc;
++
++      *length = 0;
++      rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
++      if (rc)
++              return rc;
++
++      *length = image_att.length;
++
++      return rc;
++}
++
++int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer,
++                    u32 *num_dumped_bytes, enum qed_nvm_images image_id)
++{
++      struct qed_hwfn *p_hwfn =
++              &cdev->hwfns[cdev->dbg_params.engine_for_debug];
++      u32 len_rounded, i;
++      __be32 val;
++      int rc;
++
++      *num_dumped_bytes = 0;
++      rc = qed_dbg_nvm_image_length(p_hwfn, image_id, &len_rounded);
++      if (rc)
++              return rc;
++
++      DP_NOTICE(p_hwfn->cdev,
++                "Collecting a debug feature [\"nvram image %d\"]\n",
++                image_id);
++
++      len_rounded = roundup(len_rounded, sizeof(u32));
++      rc = qed_mcp_get_nvm_image(p_hwfn, image_id, buffer, len_rounded);
++      if (rc)
++              return rc;
++
++      /* QED_NVM_IMAGE_NVM_META image is not swapped like other images */
++      if (image_id != QED_NVM_IMAGE_NVM_META)
++              for (i = 0; i < len_rounded; i += 4) {
++                      val = cpu_to_be32(*(u32 *)(buffer + i));
++                      *(u32 *)(buffer + i) = val;
++              }
++
++      *num_dumped_bytes = len_rounded;
++
++      return rc;
++}
++
+ int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
+                               u32 *num_dumped_bytes)
+ {
+@@ -7831,6 +7882,9 @@ enum debug_print_features {
+       IGU_FIFO = 6,
+       PHY = 7,
+       FW_ASSERTS = 8,
++      NVM_CFG1 = 9,
++      DEFAULT_CFG = 10,
++      NVM_META = 11,
+ };
+ static u32 qed_calc_regdump_header(enum debug_print_features feature,
+@@ -7965,13 +8019,61 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
+               DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
+       }
++      /* nvm cfg1 */
++      rc = qed_dbg_nvm_image(cdev,
++                             (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
++                             &feature_size, QED_NVM_IMAGE_NVM_CFG1);
++      if (!rc) {
++              *(u32 *)((u8 *)buffer + offset) =
++                  qed_calc_regdump_header(NVM_CFG1, cur_engine,
++                                          feature_size, omit_engine);
++              offset += (feature_size + REGDUMP_HEADER_SIZE);
++      } else if (rc != -ENOENT) {
++              DP_ERR(cdev,
++                     "qed_dbg_nvm_image failed for image  %d (%s), rc = %d\n",
++                     QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1", rc);
++      }
++
++      /* nvm default */
++      rc = qed_dbg_nvm_image(cdev,
++                             (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
++                             &feature_size, QED_NVM_IMAGE_DEFAULT_CFG);
++      if (!rc) {
++              *(u32 *)((u8 *)buffer + offset) =
++                  qed_calc_regdump_header(DEFAULT_CFG, cur_engine,
++                                          feature_size, omit_engine);
++              offset += (feature_size + REGDUMP_HEADER_SIZE);
++      } else if (rc != -ENOENT) {
++              DP_ERR(cdev,
++                     "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
++                     QED_NVM_IMAGE_DEFAULT_CFG, "QED_NVM_IMAGE_DEFAULT_CFG",
++                     rc);
++      }
++
++      /* nvm meta */
++      rc = qed_dbg_nvm_image(cdev,
++                             (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
++                             &feature_size, QED_NVM_IMAGE_NVM_META);
++      if (!rc) {
++              *(u32 *)((u8 *)buffer + offset) =
++                  qed_calc_regdump_header(NVM_META, cur_engine,
++                                          feature_size, omit_engine);
++              offset += (feature_size + REGDUMP_HEADER_SIZE);
++      } else if (rc != -ENOENT) {
++              DP_ERR(cdev,
++                     "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
++                     QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META", rc);
++      }
++
+       return 0;
+ }
+ int qed_dbg_all_data_size(struct qed_dev *cdev)
+ {
++      struct qed_hwfn *p_hwfn =
++              &cdev->hwfns[cdev->dbg_params.engine_for_debug];
++      u32 regs_len = 0, image_len = 0;
+       u8 cur_engine, org_engine;
+-      u32 regs_len = 0;
+       org_engine = qed_get_debug_engine(cdev);
+       for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
+@@ -7993,6 +8095,15 @@ int qed_dbg_all_data_size(struct qed_dev *cdev)
+       /* Engine common */
+       regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev);
++      qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_CFG1, &image_len);
++      if (image_len)
++              regs_len += REGDUMP_HEADER_SIZE + image_len;
++      qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_DEFAULT_CFG, &image_len);
++      if (image_len)
++              regs_len += REGDUMP_HEADER_SIZE + image_len;
++      qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_META, &image_len);
++      if (image_len)
++              regs_len += REGDUMP_HEADER_SIZE + image_len;
+       return regs_len;
+ }
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+index 1377ad1..0550f0e 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+@@ -2529,7 +2529,7 @@ int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
+       return rc;
+ }
+-static int
++int
+ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
+                         enum qed_nvm_images image_id,
+                         struct qed_nvm_image_att *p_image_att)
+@@ -2545,6 +2545,15 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
+       case QED_NVM_IMAGE_FCOE_CFG:
+               type = NVM_TYPE_FCOE_CFG;
+               break;
++      case QED_NVM_IMAGE_NVM_CFG1:
++              type = NVM_TYPE_NVM_CFG1;
++              break;
++      case QED_NVM_IMAGE_DEFAULT_CFG:
++              type = NVM_TYPE_DEFAULT_CFG;
++              break;
++      case QED_NVM_IMAGE_NVM_META:
++              type = NVM_TYPE_META;
++              break;
+       default:
+               DP_NOTICE(p_hwfn, "Unknown request of image_id %08x\n",
+                         image_id);
+@@ -2588,9 +2597,6 @@ int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
+               return -EINVAL;
+       }
+-      /* Each NVM image is suffixed by CRC; Upper-layer has no need for it */
+-      image_att.length -= 4;
+-
+       if (image_att.length > buffer_len) {
+               DP_VERBOSE(p_hwfn,
+                          QED_MSG_STORAGE,
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+index dd62c38..3af3896 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+@@ -486,6 +486,20 @@ struct qed_nvm_image_att {
+  * @brief Allows reading a whole nvram image
+  *
+  * @param p_hwfn
++ * @param image_id - image to get attributes for
++ * @param p_image_att - image attributes structure into which to fill data
++ *
++ * @return int - 0 - operation was successful.
++ */
++int
++qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
++                        enum qed_nvm_images image_id,
++                        struct qed_nvm_image_att *p_image_att);
++
++/**
++ * @brief Allows reading a whole nvram image
++ *
++ * @param p_hwfn
+  * @param image_id - image requested for reading
+  * @param p_buffer - allocated buffer into which to fill data
+  * @param buffer_len - length of the allocated buffer.
+diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
+index b5b2bc9..e53f9c7 100644
+--- a/include/linux/qed/qed_if.h
++++ b/include/linux/qed/qed_if.h
+@@ -159,6 +159,9 @@ struct qed_dcbx_get {
+ enum qed_nvm_images {
+       QED_NVM_IMAGE_ISCSI_CFG,
+       QED_NVM_IMAGE_FCOE_CFG,
++      QED_NVM_IMAGE_NVM_CFG1,
++      QED_NVM_IMAGE_DEFAULT_CFG,
++      QED_NVM_IMAGE_NVM_META,
+ };
+ struct qed_link_eee_params {
+-- 
+2.9.5
+
diff --git a/linux-next-cherry-picks/0006-RDMA-qedr-fix-spelling-mistake-failes-fails.patch b/linux-next-cherry-picks/0006-RDMA-qedr-fix-spelling-mistake-failes-fails.patch
new file mode 100644 (file)
index 0000000..4f91be6
--- /dev/null
@@ -0,0 +1,29 @@
+From ffab8c89ba59c4e01f9c277f1baaad12bd5a3c0c Mon Sep 17 00:00:00 2001
+From: Colin Ian King <colin.king@canonical.com>
+Date: Tue, 1 May 2018 09:25:49 +0100
+Subject: [PATCH 06/44] RDMA/qedr: fix spelling mistake: "failes" -> "fails"
+
+Trivial fix to spelling mistake in DP_ERR error message
+
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/qedr/verbs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
+index 7d3763b..35f3b6f 100644
+--- a/drivers/infiniband/hw/qedr/verbs.c
++++ b/drivers/infiniband/hw/qedr/verbs.c
+@@ -2579,7 +2579,7 @@ static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
+       u32 pbes_in_page;
+       if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
+-              DP_ERR(mr->dev, "qedr_set_page failes when %d\n", mr->npages);
++              DP_ERR(mr->dev, "qedr_set_page fails when %d\n", mr->npages);
+               return -ENOMEM;
+       }
+-- 
+2.9.5
+
diff --git a/linux-next-cherry-picks/0007-qed-Refactor-mf_mode-to-consist-of-bits.patch b/linux-next-cherry-picks/0007-qed-Refactor-mf_mode-to-consist-of-bits.patch
new file mode 100644 (file)
index 0000000..3bb396f
--- /dev/null
@@ -0,0 +1,330 @@
+From 0bc5fe857274133ca028ebb15ff2e8549a369916 Mon Sep 17 00:00:00 2001
+From: Sudarsana Reddy Kalluru <sudarsana.kalluru@cavium.com>
+Date: Sat, 5 May 2018 18:42:59 -0700
+Subject: [PATCH 07/44] qed*: Refactor mf_mode to consist of bits.
+
+`mf_mode' field indicates the multi-partitioning mode the device is
+configured to. This method doesn't scale very well, adding a new MF mode
+requires going over all the existing conditions, and deciding whether those
+are needed for the new mode or not.
+The patch defines a set of bit-fields for modes which are derived according
+to the mode info shared by the MFW and all the configuration would be made
+according to those. To add a new mode, there would be a single place where
+we'll need to go and choose which bits apply and which don't.
+
+Signed-off-by: Sudarsana Reddy Kalluru <Sudarsana.Kalluru@cavium.com>
+Signed-off-by: Ariel Elior <ariel.elior@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/qlogic/qed/qed.h             | 41 ++++++++++++++++++++---
+ drivers/net/ethernet/qlogic/qed/qed_dev.c         | 39 +++++++++++----------
+ drivers/net/ethernet/qlogic/qed/qed_ll2.c         |  6 ++--
+ drivers/net/ethernet/qlogic/qed/qed_main.c        |  6 ++--
+ drivers/net/ethernet/qlogic/qed/qed_sp.h          |  3 +-
+ drivers/net/ethernet/qlogic/qed/qed_sp_commands.c | 16 +++------
+ drivers/net/ethernet/qlogic/qede/qede_main.c      |  4 +--
+ include/linux/qed/qed_if.h                        |  2 +-
+ 8 files changed, 71 insertions(+), 46 deletions(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
+index e07460a..c8f3507 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed.h
++++ b/drivers/net/ethernet/qlogic/qed/qed.h
+@@ -439,6 +439,41 @@ struct qed_fw_data {
+       u32                     init_ops_size;
+ };
++enum qed_mf_mode_bit {
++      /* Supports PF-classification based on tag */
++      QED_MF_OVLAN_CLSS,
++
++      /* Supports PF-classification based on MAC */
++      QED_MF_LLH_MAC_CLSS,
++
++      /* Supports PF-classification based on protocol type */
++      QED_MF_LLH_PROTO_CLSS,
++
++      /* Requires a default PF to be set */
++      QED_MF_NEED_DEF_PF,
++
++      /* Allow LL2 to multicast/broadcast */
++      QED_MF_LL2_NON_UNICAST,
++
++      /* Allow Cross-PF [& child VFs] Tx-switching */
++      QED_MF_INTER_PF_SWITCH,
++
++      /* Unified Fabtic Port support enabled */
++      QED_MF_UFP_SPECIFIC,
++
++      /* Disable Accelerated Receive Flow Steering (aRFS) */
++      QED_MF_DISABLE_ARFS,
++
++      /* Use vlan for steering */
++      QED_MF_8021Q_TAGGING,
++
++      /* Use stag for steering */
++      QED_MF_8021AD_TAGGING,
++
++      /* Allow DSCP to TC mapping */
++      QED_MF_DSCP_TO_TC_MAP,
++};
++
+ enum BAR_ID {
+       BAR_ID_0,               /* used for GRC */
+       BAR_ID_1                /* Used for doorbells */
+@@ -669,10 +704,8 @@ struct qed_dev {
+       u8                              num_funcs_in_port;
+       u8                              path_id;
+-      enum qed_mf_mode                mf_mode;
+-#define IS_MF_DEFAULT(_p_hwfn)  (((_p_hwfn)->cdev)->mf_mode == QED_MF_DEFAULT)
+-#define IS_MF_SI(_p_hwfn)       (((_p_hwfn)->cdev)->mf_mode == QED_MF_NPAR)
+-#define IS_MF_SD(_p_hwfn)       (((_p_hwfn)->cdev)->mf_mode == QED_MF_OVLAN)
++
++      unsigned long                   mf_bits;
+       int                             pcie_width;
+       int                             pcie_speed;
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
+index d2ad5e9..9b07d7f 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
+@@ -1149,18 +1149,10 @@ static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
+               return -EINVAL;
+       }
+-      switch (p_hwfn->cdev->mf_mode) {
+-      case QED_MF_DEFAULT:
+-      case QED_MF_NPAR:
+-              hw_mode |= 1 << MODE_MF_SI;
+-              break;
+-      case QED_MF_OVLAN:
++      if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits))
+               hw_mode |= 1 << MODE_MF_SD;
+-              break;
+-      default:
+-              DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
++      else
+               hw_mode |= 1 << MODE_MF_SI;
+-      }
+       hw_mode |= 1 << MODE_ASIC;
+@@ -1557,7 +1549,6 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
+               /* send function start command */
+               rc = qed_sp_pf_start(p_hwfn, p_ptt, p_tunn,
+-                                   p_hwfn->cdev->mf_mode,
+                                    allow_npar_tx_switch);
+               if (rc) {
+                       DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
+@@ -2651,17 +2642,25 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+       switch (mf_mode) {
+       case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
+-              p_hwfn->cdev->mf_mode = QED_MF_OVLAN;
++              p_hwfn->cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS);
+               break;
+       case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
+-              p_hwfn->cdev->mf_mode = QED_MF_NPAR;
++              p_hwfn->cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) |
++                                      BIT(QED_MF_LLH_PROTO_CLSS) |
++                                      BIT(QED_MF_LL2_NON_UNICAST) |
++                                      BIT(QED_MF_INTER_PF_SWITCH);
+               break;
+       case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
+-              p_hwfn->cdev->mf_mode = QED_MF_DEFAULT;
++              p_hwfn->cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) |
++                                      BIT(QED_MF_LLH_PROTO_CLSS) |
++                                      BIT(QED_MF_LL2_NON_UNICAST);
++              if (QED_IS_BB(p_hwfn->cdev))
++                      p_hwfn->cdev->mf_bits |= BIT(QED_MF_NEED_DEF_PF);
+               break;
+       }
+-      DP_INFO(p_hwfn, "Multi function mode is %08x\n",
+-              p_hwfn->cdev->mf_mode);
++
++      DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n",
++              p_hwfn->cdev->mf_bits);
+       /* Read Multi-function information from shmem */
+       addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+@@ -3462,7 +3461,7 @@ int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn,
+       u32 high = 0, low = 0, en;
+       int i;
+-      if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
++      if (!test_bit(QED_MF_LLH_MAC_CLSS, &p_hwfn->cdev->mf_bits))
+               return 0;
+       qed_llh_mac_to_filter(&high, &low, p_filter);
+@@ -3507,7 +3506,7 @@ void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn,
+       u32 high = 0, low = 0;
+       int i;
+-      if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
++      if (!test_bit(QED_MF_LLH_MAC_CLSS, &p_hwfn->cdev->mf_bits))
+               return;
+       qed_llh_mac_to_filter(&high, &low, p_filter);
+@@ -3549,7 +3548,7 @@ qed_llh_add_protocol_filter(struct qed_hwfn *p_hwfn,
+       u32 high = 0, low = 0, en;
+       int i;
+-      if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
++      if (!test_bit(QED_MF_LLH_PROTO_CLSS, &p_hwfn->cdev->mf_bits))
+               return 0;
+       switch (type) {
+@@ -3647,7 +3646,7 @@ qed_llh_remove_protocol_filter(struct qed_hwfn *p_hwfn,
+       u32 high = 0, low = 0;
+       int i;
+-      if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
++      if (!test_bit(QED_MF_LLH_PROTO_CLSS, &p_hwfn->cdev->mf_bits))
+               return;
+       switch (type) {
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+index 3850281..6c942c1 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+@@ -922,9 +922,9 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
+       p_ramrod->queue_id = p_ll2_conn->queue_id;
+       p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0;
+-      if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
+-          p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE) &&
+-          (conn_type != QED_LL2_TYPE_IWARP)) {
++      if (test_bit(QED_MF_LL2_NON_UNICAST, &p_hwfn->cdev->mf_bits) &&
++          p_ramrod->main_func_queue && conn_type != QED_LL2_TYPE_ROCE &&
++          conn_type != QED_LL2_TYPE_IWARP) {
+               p_ramrod->mf_si_bcast_accept_all = 1;
+               p_ramrod->mf_si_mcast_accept_all = 1;
+       } else {
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
+index d1d3787..307fe33 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
+@@ -264,7 +264,8 @@ int qed_fill_dev_info(struct qed_dev *cdev,
+       dev_info->pci_mem_end = cdev->pci_params.mem_end;
+       dev_info->pci_irq = cdev->pci_params.irq;
+       dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn);
+-      dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
++      dev_info->is_mf_default = !test_bit(QED_MF_LLH_MAC_CLSS,
++                                          &cdev->mf_bits);
+       dev_info->dev_type = cdev->type;
+       ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr);
+@@ -273,7 +274,8 @@ int qed_fill_dev_info(struct qed_dev *cdev,
+               dev_info->fw_minor = FW_MINOR_VERSION;
+               dev_info->fw_rev = FW_REVISION_VERSION;
+               dev_info->fw_eng = FW_ENGINEERING_VERSION;
+-              dev_info->mf_mode = cdev->mf_mode;
++              dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH,
++                                                     &cdev->mf_bits);
+               dev_info->tx_switching = true;
+               if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME)
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
+index ab4ad8a..7680222 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
+@@ -416,7 +416,6 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
+  * @param p_hwfn
+  * @param p_ptt
+  * @param p_tunn
+- * @param mode
+  * @param allow_npar_tx_switch
+  *
+  * @return int
+@@ -425,7 +424,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
+ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
+                   struct qed_ptt *p_ptt,
+                   struct qed_tunnel_info *p_tunn,
+-                  enum qed_mf_mode mode, bool allow_npar_tx_switch);
++                  bool allow_npar_tx_switch);
+ /**
+  * @brief qed_sp_pf_update - PF Function Update Ramrod
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+index 5e927b6..fbb3172 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+@@ -306,7 +306,7 @@ qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn,
+ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
+                   struct qed_ptt *p_ptt,
+                   struct qed_tunnel_info *p_tunn,
+-                  enum qed_mf_mode mode, bool allow_npar_tx_switch)
++                  bool allow_npar_tx_switch)
+ {
+       struct pf_start_ramrod_data *p_ramrod = NULL;
+       u16 sb = qed_int_get_sp_sb_id(p_hwfn);
+@@ -339,18 +339,10 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
+       p_ramrod->dont_log_ramrods      = 0;
+       p_ramrod->log_type_mask         = cpu_to_le16(0xf);
+-      switch (mode) {
+-      case QED_MF_DEFAULT:
+-      case QED_MF_NPAR:
+-              p_ramrod->mf_mode = MF_NPAR;
+-              break;
+-      case QED_MF_OVLAN:
++      if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits))
+               p_ramrod->mf_mode = MF_OVLAN;
+-              break;
+-      default:
+-              DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
++      else
+               p_ramrod->mf_mode = MF_NPAR;
+-      }
+       p_ramrod->outer_tag_config.outer_tag.tci =
+               cpu_to_le16(p_hwfn->hw_info.ovlan);
+@@ -365,7 +357,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
+       qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config);
+-      if (IS_MF_SI(p_hwfn))
++      if (test_bit(QED_MF_INTER_PF_SWITCH, &p_hwfn->cdev->mf_bits))
+               p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
+       switch (p_hwfn->hw_info.personality) {
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
+index a01e7d6..89c581c 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
+@@ -199,7 +199,7 @@ static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
+       /* Enable/Disable Tx switching for PF */
+       if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
+-          qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) {
++          !qed_info->b_inter_pf_switch && qed_info->tx_switching) {
+               vport_params->vport_id = 0;
+               vport_params->update_tx_switching_flg = 1;
+               vport_params->tx_switching_flg = num_vfs_param ? 1 : 0;
+@@ -1928,7 +1928,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
+       vport_update_params->update_vport_active_flg = 1;
+       vport_update_params->vport_active_flg = 1;
+-      if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) &&
++      if ((qed_info->b_inter_pf_switch || pci_num_vf(edev->pdev)) &&
+           qed_info->tx_switching) {
+               vport_update_params->update_tx_switching_flg = 1;
+               vport_update_params->tx_switching_flg = 1;
+diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
+index e53f9c7..5dac561 100644
+--- a/include/linux/qed/qed_if.h
++++ b/include/linux/qed/qed_if.h
+@@ -359,7 +359,7 @@ struct qed_dev_info {
+ #define QED_MFW_VERSION_3_OFFSET      24
+       u32             flash_size;
+-      u8              mf_mode;
++      bool            b_inter_pf_switch;
+       bool            tx_switching;
+       bool            rdma_supported;
+       u16             mtu;
+-- 
+2.9.5
+
diff --git a/linux-next-cherry-picks/0008-qed-Remove-unused-data-member-is_mf_default.patch b/linux-next-cherry-picks/0008-qed-Remove-unused-data-member-is_mf_default.patch
new file mode 100644 (file)
index 0000000..501a0fc
--- /dev/null
@@ -0,0 +1,44 @@
+From 27bf96e32c92599dc7523b36d6c761fc8312c8c0 Mon Sep 17 00:00:00 2001
+From: Sudarsana Reddy Kalluru <sudarsana.kalluru@cavium.com>
+Date: Sat, 5 May 2018 18:43:00 -0700
+Subject: [PATCH 08/44] qed: Remove unused data member 'is_mf_default'.
+
+The data member 'is_mf_default' is not used by the qed/qede drivers,
+removing the same.
+
+Signed-off-by: Sudarsana Reddy Kalluru <Sudarsana.Kalluru@cavium.com>
+Signed-off-by: Ariel Elior <ariel.elior@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/qlogic/qed/qed_main.c | 2 --
+ include/linux/qed/qed_if.h                 | 1 -
+ 2 files changed, 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
+index 307fe33..70bc563 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
+@@ -264,8 +264,6 @@ int qed_fill_dev_info(struct qed_dev *cdev,
+       dev_info->pci_mem_end = cdev->pci_params.mem_end;
+       dev_info->pci_irq = cdev->pci_params.irq;
+       dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn);
+-      dev_info->is_mf_default = !test_bit(QED_MF_LLH_MAC_CLSS,
+-                                          &cdev->mf_bits);
+       dev_info->dev_type = cdev->type;
+       ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr);
+diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
+index 5dac561..907976f 100644
+--- a/include/linux/qed/qed_if.h
++++ b/include/linux/qed/qed_if.h
+@@ -339,7 +339,6 @@ struct qed_dev_info {
+       u8              num_hwfns;
+       u8              hw_mac[ETH_ALEN];
+-      bool            is_mf_default;
+       /* FW version */
+       u16             fw_major;
+-- 
+2.9.5
+
diff --git a/linux-next-cherry-picks/0009-qed-Add-support-for-multi-function-mode-with-802.1ad.patch b/linux-next-cherry-picks/0009-qed-Add-support-for-multi-function-mode-with-802.1ad.patch
new file mode 100644 (file)
index 0000000..ba18a0c
--- /dev/null
@@ -0,0 +1,132 @@
+From b51bdfb9cbe2ecf99a4c45c48c6286963344786c Mon Sep 17 00:00:00 2001
+From: Sudarsana Reddy Kalluru <sudarsana.kalluru@cavium.com>
+Date: Sat, 5 May 2018 18:43:01 -0700
+Subject: [PATCH 09/44] qed: Add support for multi function mode with 802.1ad
+ tagging.
+
+The patch adds support for new Multi function mode wherein the traffic
+classification is done based on the 802.1ad tagging and the outer vlan tag
+provided by the management firmware.
+
+Signed-off-by: Sudarsana Reddy Kalluru <Sudarsana.Kalluru@cavium.com>
+Signed-off-by: Ariel Elior <ariel.elior@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/qlogic/qed/qed_dev.c         | 64 ++++++++++++++++-------
+ drivers/net/ethernet/qlogic/qed/qed_sp_commands.c |  5 ++
+ 2 files changed, 49 insertions(+), 20 deletions(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
+index 9b07d7f..95d00cb 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
+@@ -1668,6 +1668,18 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
+               if (rc)
+                       return rc;
++              if (IS_PF(cdev) && test_bit(QED_MF_8021AD_TAGGING,
++                                          &cdev->mf_bits)) {
++                      STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET,
++                                   ETH_P_8021AD);
++                      STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET,
++                                   ETH_P_8021AD);
++                      STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET,
++                                   ETH_P_8021AD);
++                      STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET,
++                                   ETH_P_8021AD);
++              }
++
+               qed_fill_load_req_params(&load_req_params,
+                                        p_params->p_drv_load_params);
+               rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
+@@ -2630,39 +2642,51 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+                  link->pause.autoneg,
+                  p_caps->default_eee, p_caps->eee_lpi_timer);
+-      /* Read Multi-function information from shmem */
+-      addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+-             offsetof(struct nvm_cfg1, glob) +
+-             offsetof(struct nvm_cfg1_glob, generic_cont0);
++      if (IS_LEAD_HWFN(p_hwfn)) {
++              struct qed_dev *cdev = p_hwfn->cdev;
+-      generic_cont0 = qed_rd(p_hwfn, p_ptt, addr);
++              /* Read Multi-function information from shmem */
++              addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
++                     offsetof(struct nvm_cfg1, glob) +
++                     offsetof(struct nvm_cfg1_glob, generic_cont0);
+-      mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
+-                NVM_CFG1_GLOB_MF_MODE_OFFSET;
++              generic_cont0 = qed_rd(p_hwfn, p_ptt, addr);
+-      switch (mf_mode) {
+-      case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
+-              p_hwfn->cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS);
+-              break;
+-      case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
+-              p_hwfn->cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) |
++              mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
++                        NVM_CFG1_GLOB_MF_MODE_OFFSET;
++
++              switch (mf_mode) {
++              case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
++                      cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS);
++                      break;
++              case NVM_CFG1_GLOB_MF_MODE_BD:
++                      cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) |
++                                      BIT(QED_MF_LLH_PROTO_CLSS) |
++                                      BIT(QED_MF_8021AD_TAGGING);
++                      break;
++              case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
++                      cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) |
+                                       BIT(QED_MF_LLH_PROTO_CLSS) |
+                                       BIT(QED_MF_LL2_NON_UNICAST) |
+                                       BIT(QED_MF_INTER_PF_SWITCH);
+-              break;
+-      case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
+-              p_hwfn->cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) |
++                      break;
++              case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
++                      cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) |
+                                       BIT(QED_MF_LLH_PROTO_CLSS) |
+                                       BIT(QED_MF_LL2_NON_UNICAST);
+-              if (QED_IS_BB(p_hwfn->cdev))
+-                      p_hwfn->cdev->mf_bits |= BIT(QED_MF_NEED_DEF_PF);
+-              break;
++                      if (QED_IS_BB(p_hwfn->cdev))
++                              cdev->mf_bits |= BIT(QED_MF_NEED_DEF_PF);
++                      break;
++              }
++
++              DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n",
++                      cdev->mf_bits);
+       }
+       DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n",
+               p_hwfn->cdev->mf_bits);
+-      /* Read Multi-function information from shmem */
++      /* Read device capabilities information from shmem */
+       addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+               offsetof(struct nvm_cfg1, glob) +
+               offsetof(struct nvm_cfg1_glob, device_capabilities);
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+index fbb3172..26bed26 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+@@ -346,6 +346,11 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
+       p_ramrod->outer_tag_config.outer_tag.tci =
+               cpu_to_le16(p_hwfn->hw_info.ovlan);
++      if (test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits)) {
++              p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021AD;
++              p_ramrod->outer_tag_config.enable_stag_pri_change = 1;
++      }
++
+       /* Place EQ address in RAMROD */
+       DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
+-- 
+2.9.5
+
diff --git a/linux-next-cherry-picks/0010-qed-Add-support-for-Unified-Fabric-Port.patch b/linux-next-cherry-picks/0010-qed-Add-support-for-Unified-Fabric-Port.patch
new file mode 100644 (file)
index 0000000..9dad6d7
--- /dev/null
@@ -0,0 +1,621 @@
+From cac6f691546b9efd50c31c0db97fe50d0357104a Mon Sep 17 00:00:00 2001
+From: Sudarsana Reddy Kalluru <sudarsana.kalluru@cavium.com>
+Date: Sat, 5 May 2018 18:43:02 -0700
+Subject: [PATCH 10/44] qed: Add support for Unified Fabric Port.
+
+This patch adds driver changes for supporting the Unified Fabric Port
+(UFP). This is a new paritioning mode wherein MFW provides the set of
+parameters to be used by the device such as traffic class, outer-vlan
+tag value, priority type etc. Drivers receives this info via notifications
+from mfw and configures the hardware accordingly.
+
+Signed-off-by: Sudarsana Reddy Kalluru <Sudarsana.Kalluru@cavium.com>
+Signed-off-by: Ariel Elior <ariel.elior@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/qlogic/qed/qed.h             | 20 ++++++
+ drivers/net/ethernet/qlogic/qed/qed_dcbx.c        | 14 +++-
+ drivers/net/ethernet/qlogic/qed/qed_dev.c         | 32 ++++++++--
+ drivers/net/ethernet/qlogic/qed/qed_fcoe.c        |  3 +
+ drivers/net/ethernet/qlogic/qed/qed_hsi.h         | 28 ++++++++
+ drivers/net/ethernet/qlogic/qed/qed_ll2.c         | 40 ++++++++----
+ drivers/net/ethernet/qlogic/qed/qed_mcp.c         | 78 +++++++++++++++++++++++
+ drivers/net/ethernet/qlogic/qed/qed_mcp.h         |  8 +++
+ drivers/net/ethernet/qlogic/qed/qed_sp.h          |  9 +++
+ drivers/net/ethernet/qlogic/qed/qed_sp_commands.c | 57 ++++++++++++++++-
+ include/linux/qed/qed_ll2_if.h                    | 10 ++-
+ 11 files changed, 276 insertions(+), 23 deletions(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
+index c8f3507..adcff49 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed.h
++++ b/drivers/net/ethernet/qlogic/qed/qed.h
+@@ -474,6 +474,24 @@ enum qed_mf_mode_bit {
+       QED_MF_DSCP_TO_TC_MAP,
+ };
++enum qed_ufp_mode {
++      QED_UFP_MODE_ETS,
++      QED_UFP_MODE_VNIC_BW,
++      QED_UFP_MODE_UNKNOWN
++};
++
++enum qed_ufp_pri_type {
++      QED_UFP_PRI_OS,
++      QED_UFP_PRI_VNIC,
++      QED_UFP_PRI_UNKNOWN
++};
++
++struct qed_ufp_info {
++      enum qed_ufp_pri_type pri_type;
++      enum qed_ufp_mode mode;
++      u8 tc;
++};
++
+ enum BAR_ID {
+       BAR_ID_0,               /* used for GRC */
+       BAR_ID_1                /* Used for doorbells */
+@@ -582,6 +600,8 @@ struct qed_hwfn {
+       struct qed_dcbx_info            *p_dcbx_info;
++      struct qed_ufp_info             ufp_info;
++
+       struct qed_dmae_info            dmae_info;
+       /* QM init */
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+index 449777f..8f31406 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+@@ -274,8 +274,8 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
+                    u32 pri_tc_tbl, int count, u8 dcbx_version)
+ {
+       enum dcbx_protocol_type type;
++      bool enable, ieee, eth_tlv;
+       u8 tc, priority_map;
+-      bool enable, ieee;
+       u16 protocol_id;
+       int priority;
+       int i;
+@@ -283,6 +283,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
+       DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count);
+       ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE);
++      eth_tlv = false;
+       /* Parse APP TLV */
+       for (i = 0; i < count; i++) {
+               protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry,
+@@ -304,13 +305,22 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
+                        * indication, but we only got here if there was an
+                        * app tlv for the protocol, so dcbx must be enabled.
+                        */
+-                      enable = !(type == DCBX_PROTOCOL_ETH);
++                      if (type == DCBX_PROTOCOL_ETH) {
++                              enable = false;
++                              eth_tlv = true;
++                      } else {
++                              enable = true;
++                      }
+                       qed_dcbx_update_app_info(p_data, p_hwfn, enable,
+                                                priority, tc, type);
+               }
+       }
++      /* If Eth TLV is not detected, use UFP TC as default TC */
++      if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) && !eth_tlv)
++              p_data->arr[DCBX_PROTOCOL_ETH].tc = p_hwfn->ufp_info.tc;
++
+       /* Update ramrod protocol data and hw_info fields
+        * with default info when corresponding APP TLV's are not detected.
+        * The enabled field has a different logic for ethernet as only for
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
+index 95d00cb..5605289 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
+@@ -1499,6 +1499,11 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
+               STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
+               STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
+                            p_hwfn->hw_info.ovlan);
++
++              DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
++                         "Configuring LLH_FUNC_FILTER_HDR_SEL\n");
++              STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET,
++                           1);
+       }
+       /* Enable classification by MAC if needed */
+@@ -1635,6 +1640,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
+       bool b_default_mtu = true;
+       struct qed_hwfn *p_hwfn;
+       int rc = 0, mfw_rc, i;
++      u16 ether_type;
+       if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
+               DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
+@@ -1668,16 +1674,22 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
+               if (rc)
+                       return rc;
+-              if (IS_PF(cdev) && test_bit(QED_MF_8021AD_TAGGING,
+-                                          &cdev->mf_bits)) {
++              if (IS_PF(cdev) && (test_bit(QED_MF_8021Q_TAGGING,
++                                           &cdev->mf_bits) ||
++                                  test_bit(QED_MF_8021AD_TAGGING,
++                                           &cdev->mf_bits))) {
++                      if (test_bit(QED_MF_8021Q_TAGGING, &cdev->mf_bits))
++                              ether_type = ETH_P_8021Q;
++                      else
++                              ether_type = ETH_P_8021AD;
+                       STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET,
+-                                   ETH_P_8021AD);
++                                   ether_type);
+                       STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET,
+-                                   ETH_P_8021AD);
++                                   ether_type);
+                       STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET,
+-                                   ETH_P_8021AD);
++                                   ether_type);
+                       STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET,
+-                                   ETH_P_8021AD);
++                                   ether_type);
+               }
+               qed_fill_load_req_params(&load_req_params,
+@@ -2659,6 +2671,12 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+               case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
+                       cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS);
+                       break;
++              case NVM_CFG1_GLOB_MF_MODE_UFP:
++                      cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) |
++                                      BIT(QED_MF_LLH_PROTO_CLSS) |
++                                      BIT(QED_MF_UFP_SPECIFIC) |
++                                      BIT(QED_MF_8021Q_TAGGING);
++                      break;
+               case NVM_CFG1_GLOB_MF_MODE_BD:
+                       cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) |
+                                       BIT(QED_MF_LLH_PROTO_CLSS) |
+@@ -2879,6 +2897,8 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
+               qed_mcp_cmd_port_init(p_hwfn, p_ptt);
+               qed_get_eee_caps(p_hwfn, p_ptt);
++
++              qed_mcp_read_ufp_config(p_hwfn, p_ptt);
+       }
+       if (qed_mcp_is_init(p_hwfn)) {
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
+index 2dc9b31..cc1b373 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
+@@ -313,6 +313,9 @@ qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn,
+       p_data->d_id.addr_mid = p_conn->d_id.addr_mid;
+       p_data->d_id.addr_lo = p_conn->d_id.addr_lo;
+       p_data->flags = p_conn->flags;
++      if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
++              SET_FIELD(p_data->flags,
++                        FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN, 1);
+       p_data->def_q_idx = p_conn->def_q_idx;
+       return qed_spq_post(p_hwfn, p_ent, NULL);
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+index 7f5ec42..b5f70ef 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+@@ -11993,6 +11993,16 @@ struct public_port {
+ #define EEE_REMOTE_TW_TX_OFFSET 0
+ #define EEE_REMOTE_TW_RX_MASK   0xffff0000
+ #define EEE_REMOTE_TW_RX_OFFSET 16
++
++      u32 oem_cfg_port;
++#define OEM_CFG_CHANNEL_TYPE_MASK                       0x00000003
++#define OEM_CFG_CHANNEL_TYPE_OFFSET                     0
++#define OEM_CFG_CHANNEL_TYPE_VLAN_PARTITION             0x1
++#define OEM_CFG_CHANNEL_TYPE_STAGGED                    0x2
++#define OEM_CFG_SCHED_TYPE_MASK                         0x0000000C
++#define OEM_CFG_SCHED_TYPE_OFFSET                       2
++#define OEM_CFG_SCHED_TYPE_ETS                          0x1
++#define OEM_CFG_SCHED_TYPE_VNIC_BW                      0x2
+ };
+ struct public_func {
+@@ -12069,6 +12079,23 @@ struct public_func {
+ #define DRV_ID_DRV_INIT_HW_MASK               0x80000000
+ #define DRV_ID_DRV_INIT_HW_SHIFT      31
+ #define DRV_ID_DRV_INIT_HW_FLAG               (1 << DRV_ID_DRV_INIT_HW_SHIFT)
++
++      u32 oem_cfg_func;
++#define OEM_CFG_FUNC_TC_MASK                    0x0000000F
++#define OEM_CFG_FUNC_TC_OFFSET                  0
++#define OEM_CFG_FUNC_TC_0                       0x0
++#define OEM_CFG_FUNC_TC_1                       0x1
++#define OEM_CFG_FUNC_TC_2                       0x2
++#define OEM_CFG_FUNC_TC_3                       0x3
++#define OEM_CFG_FUNC_TC_4                       0x4
++#define OEM_CFG_FUNC_TC_5                       0x5
++#define OEM_CFG_FUNC_TC_6                       0x6
++#define OEM_CFG_FUNC_TC_7                       0x7
++
++#define OEM_CFG_FUNC_HOST_PRI_CTRL_MASK         0x00000030
++#define OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET       4
++#define OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC         0x1
++#define OEM_CFG_FUNC_HOST_PRI_CTRL_OS           0x2
+ };
+ struct mcp_mac {
+@@ -12495,6 +12522,7 @@ enum MFW_DRV_MSG_TYPE {
+       MFW_DRV_MSG_BW_UPDATE10,
+       MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
+       MFW_DRV_MSG_BW_UPDATE11,
++      MFW_DRV_MSG_OEM_CFG_UPDATE,
+       MFW_DRV_MSG_MAX
+ };
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+index 6c942c1..c3c1a99 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+@@ -919,6 +919,10 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
+       p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg;
+       p_ramrod->inner_vlan_stripping_en =
+               p_ll2_conn->input.rx_vlan_removal_en;
++
++      if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) &&
++          p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE)
++              p_ramrod->report_outer_vlan = 1;
+       p_ramrod->queue_id = p_ll2_conn->queue_id;
+       p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0;
+@@ -1493,11 +1497,12 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
+       qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
+       if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
++              if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
++                      qed_llh_add_protocol_filter(p_hwfn, p_ptt,
++                                                  ETH_P_FCOE, 0,
++                                                  QED_LLH_FILTER_ETHERTYPE);
+               qed_llh_add_protocol_filter(p_hwfn, p_ptt,
+-                                          0x8906, 0,
+-                                          QED_LLH_FILTER_ETHERTYPE);
+-              qed_llh_add_protocol_filter(p_hwfn, p_ptt,
+-                                          0x8914, 0,
++                                          ETH_P_FIP, 0,
+                                           QED_LLH_FILTER_ETHERTYPE);
+       }
+@@ -1653,11 +1658,16 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
+       start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
+       if (QED_IS_IWARP_PERSONALITY(p_hwfn) &&
+-          p_ll2->input.conn_type == QED_LL2_TYPE_OOO)
++          p_ll2->input.conn_type == QED_LL2_TYPE_OOO) {
+               start_bd->nw_vlan_or_lb_echo =
+                   cpu_to_le16(IWARP_LL2_IN_ORDER_TX_QUEUE);
+-      else
++      } else {
+               start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan);
++              if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) &&
++                  p_ll2->input.conn_type == QED_LL2_TYPE_FCOE)
++                      pkt->remove_stag = true;
++      }
++
+       SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
+                 cpu_to_le16(pkt->l4_hdr_offset_w));
+       SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
+@@ -1668,6 +1678,9 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
+       SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_CSUM, !!(pkt->enable_ip_cksum));
+       SET_FIELD(bd_data, CORE_TX_BD_DATA_L4_CSUM, !!(pkt->enable_l4_cksum));
+       SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_LEN, !!(pkt->calc_ip_len));
++      SET_FIELD(bd_data, CORE_TX_BD_DATA_DISABLE_STAG_INSERTION,
++                !!(pkt->remove_stag));
++
+       start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
+       DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag);
+       start_bd->nbytes = cpu_to_le16(pkt->first_frag_len);
+@@ -1884,11 +1897,12 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
+               qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
+       if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
++              if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
++                      qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
++                                                     ETH_P_FCOE, 0,
++                                                    QED_LLH_FILTER_ETHERTYPE);
+               qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
+-                                             0x8906, 0,
+-                                             QED_LLH_FILTER_ETHERTYPE);
+-              qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
+-                                             0x8914, 0,
++                                             ETH_P_FIP, 0,
+                                              QED_LLH_FILTER_ETHERTYPE);
+       }
+@@ -2360,7 +2374,8 @@ static int qed_ll2_stop(struct qed_dev *cdev)
+       return -EINVAL;
+ }
+-static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
++static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
++                            unsigned long xmit_flags)
+ {
+       struct qed_ll2_tx_pkt_info pkt;
+       const skb_frag_t *frag;
+@@ -2405,6 +2420,9 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
+       pkt.first_frag = mapping;
+       pkt.first_frag_len = skb->len;
+       pkt.cookie = skb;
++      if (test_bit(QED_MF_UFP_SPECIFIC, &cdev->mf_bits) &&
++          test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags))
++              pkt.remove_stag = true;
+       rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle,
+                                      &pkt, 1);
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+index 0550f0e..e80f5e7 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+@@ -40,6 +40,7 @@
+ #include <linux/string.h>
+ #include <linux/etherdevice.h>
+ #include "qed.h"
++#include "qed_cxt.h"
+ #include "qed_dcbx.h"
+ #include "qed_hsi.h"
+ #include "qed_hw.h"
+@@ -1486,6 +1487,80 @@ static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+                   &resp, &param);
+ }
++void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
++{
++      struct public_func shmem_info;
++      u32 port_cfg, val;
++
++      if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
++              return;
++
++      memset(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info));
++      port_cfg = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
++                        offsetof(struct public_port, oem_cfg_port));
++      val = (port_cfg & OEM_CFG_CHANNEL_TYPE_MASK) >>
++              OEM_CFG_CHANNEL_TYPE_OFFSET;
++      if (val != OEM_CFG_CHANNEL_TYPE_STAGGED)
++              DP_NOTICE(p_hwfn, "Incorrect UFP Channel type  %d\n", val);
++
++      val = (port_cfg & OEM_CFG_SCHED_TYPE_MASK) >> OEM_CFG_SCHED_TYPE_OFFSET;
++      if (val == OEM_CFG_SCHED_TYPE_ETS) {
++              p_hwfn->ufp_info.mode = QED_UFP_MODE_ETS;
++      } else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW) {
++              p_hwfn->ufp_info.mode = QED_UFP_MODE_VNIC_BW;
++      } else {
++              p_hwfn->ufp_info.mode = QED_UFP_MODE_UNKNOWN;
++              DP_NOTICE(p_hwfn, "Unknown UFP scheduling mode %d\n", val);
++      }
++
++      qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
++      val = (port_cfg & OEM_CFG_FUNC_TC_MASK) >> OEM_CFG_FUNC_TC_OFFSET;
++      p_hwfn->ufp_info.tc = (u8)val;
++      val = (port_cfg & OEM_CFG_FUNC_HOST_PRI_CTRL_MASK) >>
++              OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET;
++      if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC) {
++              p_hwfn->ufp_info.pri_type = QED_UFP_PRI_VNIC;
++      } else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS) {
++              p_hwfn->ufp_info.pri_type = QED_UFP_PRI_OS;
++      } else {
++              p_hwfn->ufp_info.pri_type = QED_UFP_PRI_UNKNOWN;
++              DP_NOTICE(p_hwfn, "Unknown Host priority control %d\n", val);
++      }
++
++      DP_NOTICE(p_hwfn,
++                "UFP shmem config: mode = %d tc = %d pri_type = %d\n",
++                p_hwfn->ufp_info.mode,
++                p_hwfn->ufp_info.tc, p_hwfn->ufp_info.pri_type);
++}
++
++static int
++qed_mcp_handle_ufp_event(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
++{
++      qed_mcp_read_ufp_config(p_hwfn, p_ptt);
++
++      if (p_hwfn->ufp_info.mode == QED_UFP_MODE_VNIC_BW) {
++              p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc;
++              p_hwfn->hw_info.offload_tc = p_hwfn->ufp_info.tc;
++
++              qed_qm_reconf(p_hwfn, p_ptt);
++      } else if (p_hwfn->ufp_info.mode == QED_UFP_MODE_ETS) {
++              /* Merge UFP TC with the dcbx TC data */
++              qed_dcbx_mib_update_event(p_hwfn, p_ptt,
++                                        QED_DCBX_OPERATIONAL_MIB);
++      } else {
++              DP_ERR(p_hwfn, "Invalid sched type, discard the UFP config\n");
++              return -EINVAL;
++      }
++
++      /* update storm FW with negotiation results */
++      qed_sp_pf_update_ufp(p_hwfn);
++
++      /* update stag pcp value */
++      qed_sp_pf_update_stag(p_hwfn);
++
++      return 0;
++}
++
+ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
+                         struct qed_ptt *p_ptt)
+ {
+@@ -1529,6 +1604,9 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
+                       qed_dcbx_mib_update_event(p_hwfn, p_ptt,
+                                                 QED_DCBX_OPERATIONAL_MIB);
+                       break;
++              case MFW_DRV_MSG_OEM_CFG_UPDATE:
++                      qed_mcp_handle_ufp_event(p_hwfn, p_ptt);
++                      break;
+               case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
+                       qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
+                       break;
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+index 3af3896..250579b 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+@@ -1005,6 +1005,14 @@ int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+ int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+ /**
++ * @brief Read ufp config from the shared memory.
++ *
++ * @param p_hwfn
++ * @param p_ptt
++ */
++void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
++
++/**
+  * @brief Populate the nvm info shadow in the given hardware function
+  *
+  * @param p_hwfn
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
+index 7680222..e95431f 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
+@@ -462,6 +462,15 @@ int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn);
+  * @return int
+  */
++/**
++ * @brief qed_sp_pf_update_ufp - PF ufp update Ramrod
++ *
++ * @param p_hwfn
++ *
++ * @return int
++ */
++int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn);
++
+ int qed_sp_pf_stop(struct qed_hwfn *p_hwfn);
+ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+index 26bed26..8de644b4 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+@@ -314,7 +314,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
+       struct qed_spq_entry *p_ent = NULL;
+       struct qed_sp_init_data init_data;
+       int rc = -EINVAL;
+-      u8 page_cnt;
++      u8 page_cnt, i;
+       /* update initial eq producer */
+       qed_eq_prod_update(p_hwfn,
+@@ -345,12 +345,30 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
+               p_ramrod->mf_mode = MF_NPAR;
+       p_ramrod->outer_tag_config.outer_tag.tci =
+-              cpu_to_le16(p_hwfn->hw_info.ovlan);
+-      if (test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits)) {
++                              cpu_to_le16(p_hwfn->hw_info.ovlan);
++      if (test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits)) {
++              p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021Q;
++      } else if (test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits)) {
+               p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021AD;
+               p_ramrod->outer_tag_config.enable_stag_pri_change = 1;
+       }
++      p_ramrod->outer_tag_config.pri_map_valid = 1;
++      for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
++              p_ramrod->outer_tag_config.inner_to_outer_pri_map[i] = i;
++
++      /* enable_stag_pri_change should be set if port is in BD mode or,
++       * UFP with Host Control mode.
++       */
++      if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) {
++              if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_OS)
++                      p_ramrod->outer_tag_config.enable_stag_pri_change = 1;
++              else
++                      p_ramrod->outer_tag_config.enable_stag_pri_change = 0;
++
++              p_ramrod->outer_tag_config.outer_tag.tci |=
++                  cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13));
++      }
+       /* Place EQ address in RAMROD */
+       DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
+@@ -431,6 +449,39 @@ int qed_sp_pf_update(struct qed_hwfn *p_hwfn)
+       return qed_spq_post(p_hwfn, p_ent, NULL);
+ }
++int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn)
++{
++      struct qed_spq_entry *p_ent = NULL;
++      struct qed_sp_init_data init_data;
++      int rc = -EOPNOTSUPP;
++
++      if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_UNKNOWN) {
++              DP_INFO(p_hwfn, "Invalid priority type %d\n",
++                      p_hwfn->ufp_info.pri_type);
++              return -EINVAL;
++      }
++
++      /* Get SPQ entry */
++      memset(&init_data, 0, sizeof(init_data));
++      init_data.cid = qed_spq_get_cid(p_hwfn);
++      init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
++      init_data.comp_mode = QED_SPQ_MODE_CB;
++
++      rc = qed_sp_init_request(p_hwfn, &p_ent,
++                               COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
++                               &init_data);
++      if (rc)
++              return rc;
++
++      p_ent->ramrod.pf_update.update_enable_stag_pri_change = true;
++      if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_OS)
++              p_ent->ramrod.pf_update.enable_stag_pri_change = 1;
++      else
++              p_ent->ramrod.pf_update.enable_stag_pri_change = 0;
++
++      return qed_spq_post(p_hwfn, p_ent, NULL);
++}
++
+ /* Set pf update ramrod command params */
+ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
+                             struct qed_ptt *p_ptt,
+diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h
+index 266c1fb..5eb0229 100644
+--- a/include/linux/qed/qed_ll2_if.h
++++ b/include/linux/qed/qed_ll2_if.h
+@@ -202,6 +202,7 @@ struct qed_ll2_tx_pkt_info {
+       bool enable_ip_cksum;
+       bool enable_l4_cksum;
+       bool calc_ip_len;
++      bool remove_stag;
+ };
+ #define QED_LL2_UNUSED_HANDLE   (0xff)
+@@ -220,6 +221,11 @@ struct qed_ll2_params {
+       u8 ll2_mac_address[ETH_ALEN];
+ };
++enum qed_ll2_xmit_flags {
++      /* FIP discovery packet */
++      QED_LL2_XMIT_FLAGS_FIP_DISCOVERY
++};
++
+ struct qed_ll2_ops {
+ /**
+  * @brief start - initializes ll2
+@@ -245,10 +251,12 @@ struct qed_ll2_ops {
+  *
+  * @param cdev
+  * @param skb
++ * @param xmit_flags - Transmit options defined by the enum qed_ll2_xmit_flags.
+  *
+  * @return 0 on success, otherwise error value.
+  */
+-      int (*start_xmit)(struct qed_dev *cdev, struct sk_buff *skb);
++      int (*start_xmit)(struct qed_dev *cdev, struct sk_buff *skb,
++                        unsigned long xmit_flags);
+ /**
+  * @brief register_cb_ops - protocol driver register the callback for Rx/Tx
+-- 
+2.9.5
+
diff --git a/linux-next-cherry-picks/0014-qede-Add-build_skb-support.patch b/linux-next-cherry-picks/0014-qede-Add-build_skb-support.patch
new file mode 100644 (file)
index 0000000..7fdc71e
--- /dev/null
@@ -0,0 +1,531 @@
+From 8a8633978b842c88fbcfe00d4e5dde96048f630e Mon Sep 17 00:00:00 2001
+From: Manish Chopra <manish.chopra@cavium.com>
+Date: Thu, 17 May 2018 12:05:00 -0700
+Subject: [PATCH 14/44] qede: Add build_skb() support.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This patch makes use of build_skb() throughout in driver's receieve
+data path [HW gro flow and non HW gro flow]. With this, driver can
+build skb directly from the page segments which are already mapped
+to the hardware instead of allocating new SKB via netdev_alloc_skb()
+and memcpy the data which is quite costly.
+
+This really improves performance (keeping same or slight gain in rx
+throughput) in terms of CPU utilization which is significantly reduced
+[almost half] in non HW gro flow where for every incoming MTU sized
+packet driver had to allocate skb, memcpy headers etc. Additionally
+in that flow, it also gets rid of bunch of additional overheads
+[eth_get_headlen() etc.] to split headers and data in the skb.
+
+Tested with:
+system: 2 sockets, 4 cores per socket, hyperthreading, 2x4x2=16 cores
+iperf [server]: iperf -s
+iperf [client]: iperf -c <server_ip> -t 500 -i 10 -P 32
+
+HW GRO off â€“ w/o build_skb(), throughput: 36.8 Gbits/sec
+
+Average:     CPU    %usr   %nice    %sys %iowait    %irq   %soft  %steal  %guest   %idle
+Average:     all    0.59    0.00   32.93    0.00    0.00   43.07    0.00    0.00   23.42
+
+HW GRO off - with build_skb(), throughput: 36.9 Gbits/sec
+
+Average:     CPU    %usr   %nice    %sys %iowait    %irq   %soft  %steal  %guest   %idle
+Average:     all    0.70    0.00   31.70    0.00    0.00   25.68    0.00    0.00   41.92
+
+HW GRO on - w/o build_skb(), throughput: 36.9 Gbits/sec
+
+Average:     CPU    %usr   %nice    %sys %iowait    %irq   %soft  %steal  %guest   %idle
+Average:     all    0.86    0.00   24.14    0.00    0.00    6.59    0.00    0.00   68.41
+
+HW GRO on - with build_skb(), throughput: 37.5 Gbits/sec
+
+Average:     CPU    %usr   %nice    %sys %iowait    %irq   %soft  %steal  %guest   %idle
+Average:     all    0.87    0.00   23.75    0.00    0.00    6.19    0.00    0.00   69.19
+
+Signed-off-by: Ariel Elior <ariel.elior@cavium.com>
+Signed-off-by: Manish Chopra <manish.chopra@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/qlogic/qede/qede.h         |   5 +-
+ drivers/net/ethernet/qlogic/qede/qede_ethtool.c |   3 +-
+ drivers/net/ethernet/qlogic/qede/qede_fp.c      | 227 +++++++++++++-----------
+ drivers/net/ethernet/qlogic/qede/qede_main.c    |  76 ++------
+ 4 files changed, 137 insertions(+), 174 deletions(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
+index 9935978c..2d3f09e 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede.h
++++ b/drivers/net/ethernet/qlogic/qede/qede.h
+@@ -290,15 +290,12 @@ struct qede_agg_info {
+        * aggregation.
+        */
+       struct sw_rx_data buffer;
+-      dma_addr_t buffer_mapping;
+-
+       struct sk_buff *skb;
+       /* We need some structs from the start cookie until termination */
+       u16 vlan_tag;
+-      u16 start_cqe_bd_len;
+-      u8 start_cqe_placement_offset;
++      bool tpa_start_fail;
+       u8 state;
+       u8 frag_id;
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+index ecbf1de..8c6fdad 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+@@ -1508,7 +1508,8 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
+               len =  le16_to_cpu(fp_cqe->len_on_first_bd);
+               data_ptr = (u8 *)(page_address(sw_rx_data->data) +
+                                 fp_cqe->placement_offset +
+-                                sw_rx_data->page_offset);
++                                sw_rx_data->page_offset +
++                                rxq->rx_headroom);
+               if (ether_addr_equal(data_ptr,  edev->ndev->dev_addr) &&
+                   ether_addr_equal(data_ptr + ETH_ALEN,
+                                    edev->ndev->dev_addr)) {
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
+index 1494130..6c70239 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
+@@ -660,7 +660,8 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
+       /* Add one frag and update the appropriate fields in the skb */
+       skb_fill_page_desc(skb, tpa_info->frag_id++,
+-                         current_bd->data, current_bd->page_offset,
++                         current_bd->data,
++                         current_bd->page_offset + rxq->rx_headroom,
+                          len_on_bd);
+       if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) {
+@@ -671,8 +672,7 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
+               goto out;
+       }
+-      qed_chain_consume(&rxq->rx_bd_ring);
+-      rxq->sw_rx_cons++;
++      qede_rx_bd_ring_consume(rxq);
+       skb->data_len += len_on_bd;
+       skb->truesize += rxq->rx_buf_seg_size;
+@@ -721,64 +721,129 @@ static u8 qede_check_tunn_csum(u16 flag)
+       return QEDE_CSUM_UNNECESSARY | tcsum;
+ }
++static inline struct sk_buff *
++qede_build_skb(struct qede_rx_queue *rxq,
++             struct sw_rx_data *bd, u16 len, u16 pad)
++{
++      struct sk_buff *skb;
++      void *buf;
++
++      buf = page_address(bd->data) + bd->page_offset;
++      skb = build_skb(buf, rxq->rx_buf_seg_size);
++
++      skb_reserve(skb, pad);
++      skb_put(skb, len);
++
++      return skb;
++}
++
++static struct sk_buff *
++qede_tpa_rx_build_skb(struct qede_dev *edev,
++                    struct qede_rx_queue *rxq,
++                    struct sw_rx_data *bd, u16 len, u16 pad,
++                    bool alloc_skb)
++{
++      struct sk_buff *skb;
++
++      skb = qede_build_skb(rxq, bd, len, pad);
++      bd->page_offset += rxq->rx_buf_seg_size;
++
++      if (bd->page_offset == PAGE_SIZE) {
++              if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
++                      DP_NOTICE(edev,
++                                "Failed to allocate RX buffer for tpa start\n");
++                      bd->page_offset -= rxq->rx_buf_seg_size;
++                      page_ref_inc(bd->data);
++                      dev_kfree_skb_any(skb);
++                      return NULL;
++              }
++      } else {
++              page_ref_inc(bd->data);
++              qede_reuse_page(rxq, bd);
++      }
++
++      /* We've consumed the first BD and prepared an SKB */
++      qede_rx_bd_ring_consume(rxq);
++
++      return skb;
++}
++
++static struct sk_buff *
++qede_rx_build_skb(struct qede_dev *edev,
++                struct qede_rx_queue *rxq,
++                struct sw_rx_data *bd, u16 len, u16 pad)
++{
++      struct sk_buff *skb = NULL;
++
++      /* For smaller frames still need to allocate skb, memcpy
++       * data and benefit in reusing the page segment instead of
++       * un-mapping it.
++       */
++      if ((len + pad <= edev->rx_copybreak)) {
++              unsigned int offset = bd->page_offset + pad;
++
++              skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
++              if (unlikely(!skb))
++                      return NULL;
++
++              skb_reserve(skb, pad);
++              memcpy(skb_put(skb, len),
++                     page_address(bd->data) + offset, len);
++              qede_reuse_page(rxq, bd);
++              goto out;
++      }
++
++      skb = qede_build_skb(rxq, bd, len, pad);
++
++      if (unlikely(qede_realloc_rx_buffer(rxq, bd))) {
++              /* Incr page ref count to reuse on allocation failure so
++               * that it doesn't get freed while freeing SKB [as its
++               * already mapped there].
++               */
++              page_ref_inc(bd->data);
++              dev_kfree_skb_any(skb);
++              return NULL;
++      }
++out:
++      /* We've consumed the first BD and prepared an SKB */
++      qede_rx_bd_ring_consume(rxq);
++
++      return skb;
++}
++
+ static void qede_tpa_start(struct qede_dev *edev,
+                          struct qede_rx_queue *rxq,
+                          struct eth_fast_path_rx_tpa_start_cqe *cqe)
+ {
+       struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
+-      struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring);
+-      struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
+-      struct sw_rx_data *replace_buf = &tpa_info->buffer;
+-      dma_addr_t mapping = tpa_info->buffer_mapping;
+       struct sw_rx_data *sw_rx_data_cons;
+-      struct sw_rx_data *sw_rx_data_prod;
++      u16 pad;
+       sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
+-      sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
++      pad = cqe->placement_offset + rxq->rx_headroom;
+-      /* Use pre-allocated replacement buffer - we can't release the agg.
+-       * start until its over and we don't want to risk allocation failing
+-       * here, so re-allocate when aggregation will be over.
+-       */
+-      sw_rx_data_prod->mapping = replace_buf->mapping;
+-
+-      sw_rx_data_prod->data = replace_buf->data;
+-      rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping));
+-      rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(mapping));
+-      sw_rx_data_prod->page_offset = replace_buf->page_offset;
+-
+-      rxq->sw_rx_prod++;
++      tpa_info->skb = qede_tpa_rx_build_skb(edev, rxq, sw_rx_data_cons,
++                                            le16_to_cpu(cqe->len_on_first_bd),
++                                            pad, false);
++      tpa_info->buffer.page_offset = sw_rx_data_cons->page_offset;
++      tpa_info->buffer.mapping = sw_rx_data_cons->mapping;
+-      /* move partial skb from cons to pool (don't unmap yet)
+-       * save mapping, incase we drop the packet later on.
+-       */
+-      tpa_info->buffer = *sw_rx_data_cons;
+-      mapping = HILO_U64(le32_to_cpu(rx_bd_cons->addr.hi),
+-                         le32_to_cpu(rx_bd_cons->addr.lo));
+-
+-      tpa_info->buffer_mapping = mapping;
+-      rxq->sw_rx_cons++;
+-
+-      /* set tpa state to start only if we are able to allocate skb
+-       * for this aggregation, otherwise mark as error and aggregation will
+-       * be dropped
+-       */
+-      tpa_info->skb = netdev_alloc_skb(edev->ndev,
+-                                       le16_to_cpu(cqe->len_on_first_bd));
+       if (unlikely(!tpa_info->skb)) {
+               DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
++
++              /* Consume from ring but do not produce since
++               * this might be used by FW still, it will be re-used
++               * at TPA end.
++               */
++              tpa_info->tpa_start_fail = true;
++              qede_rx_bd_ring_consume(rxq);
+               tpa_info->state = QEDE_AGG_STATE_ERROR;
+               goto cons_buf;
+       }
+-      /* Start filling in the aggregation info */
+-      skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
+       tpa_info->frag_id = 0;
+       tpa_info->state = QEDE_AGG_STATE_START;
+-      /* Store some information from first CQE */
+-      tpa_info->start_cqe_placement_offset = cqe->placement_offset;
+-      tpa_info->start_cqe_bd_len = le16_to_cpu(cqe->len_on_first_bd);
+       if ((le16_to_cpu(cqe->pars_flags.flags) >>
+            PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) &
+           PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK)
+@@ -899,6 +964,10 @@ static int qede_tpa_end(struct qede_dev *edev,
+       tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
+       skb = tpa_info->skb;
++      if (tpa_info->buffer.page_offset == PAGE_SIZE)
++              dma_unmap_page(rxq->dev, tpa_info->buffer.mapping,
++                             PAGE_SIZE, rxq->data_direction);
++
+       for (i = 0; cqe->len_list[i]; i++)
+               qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
+                                  le16_to_cpu(cqe->len_list[i]));
+@@ -919,11 +988,6 @@ static int qede_tpa_end(struct qede_dev *edev,
+                      "Strange - total packet len [cqe] is %4x but SKB has len %04x\n",
+                      le16_to_cpu(cqe->total_packet_len), skb->len);
+-      memcpy(skb->data,
+-             page_address(tpa_info->buffer.data) +
+-             tpa_info->start_cqe_placement_offset +
+-             tpa_info->buffer.page_offset, tpa_info->start_cqe_bd_len);
+-
+       /* Finalize the SKB */
+       skb->protocol = eth_type_trans(skb, edev->ndev);
+       skb->ip_summed = CHECKSUM_UNNECESSARY;
+@@ -940,6 +1004,12 @@ static int qede_tpa_end(struct qede_dev *edev,
+       return 1;
+ err:
+       tpa_info->state = QEDE_AGG_STATE_NONE;
++
++      if (tpa_info->tpa_start_fail) {
++              qede_reuse_page(rxq, &tpa_info->buffer);
++              tpa_info->tpa_start_fail = false;
++      }
++
+       dev_kfree_skb_any(tpa_info->skb);
+       tpa_info->skb = NULL;
+       return 0;
+@@ -1058,65 +1128,6 @@ static bool qede_rx_xdp(struct qede_dev *edev,
+       return false;
+ }
+-static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev,
+-                                          struct qede_rx_queue *rxq,
+-                                          struct sw_rx_data *bd, u16 len,
+-                                          u16 pad)
+-{
+-      unsigned int offset = bd->page_offset + pad;
+-      struct skb_frag_struct *frag;
+-      struct page *page = bd->data;
+-      unsigned int pull_len;
+-      struct sk_buff *skb;
+-      unsigned char *va;
+-
+-      /* Allocate a new SKB with a sufficient large header len */
+-      skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
+-      if (unlikely(!skb))
+-              return NULL;
+-
+-      /* Copy data into SKB - if it's small, we can simply copy it and
+-       * re-use the already allcoated & mapped memory.
+-       */
+-      if (len + pad <= edev->rx_copybreak) {
+-              skb_put_data(skb, page_address(page) + offset, len);
+-              qede_reuse_page(rxq, bd);
+-              goto out;
+-      }
+-
+-      frag = &skb_shinfo(skb)->frags[0];
+-
+-      skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+-                      page, offset, len, rxq->rx_buf_seg_size);
+-
+-      va = skb_frag_address(frag);
+-      pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE);
+-
+-      /* Align the pull_len to optimize memcpy */
+-      memcpy(skb->data, va, ALIGN(pull_len, sizeof(long)));
+-
+-      /* Correct the skb & frag sizes offset after the pull */
+-      skb_frag_size_sub(frag, pull_len);
+-      frag->page_offset += pull_len;
+-      skb->data_len -= pull_len;
+-      skb->tail += pull_len;
+-
+-      if (unlikely(qede_realloc_rx_buffer(rxq, bd))) {
+-              /* Incr page ref count to reuse on allocation failure so
+-               * that it doesn't get freed while freeing SKB [as its
+-               * already mapped there].
+-               */
+-              page_ref_inc(page);
+-              dev_kfree_skb_any(skb);
+-              return NULL;
+-      }
+-
+-out:
+-      /* We've consumed the first BD and prepared an SKB */
+-      qede_rx_bd_ring_consume(rxq);
+-      return skb;
+-}
+-
+ static int qede_rx_build_jumbo(struct qede_dev *edev,
+                              struct qede_rx_queue *rxq,
+                              struct sk_buff *skb,
+@@ -1157,7 +1168,7 @@ static int qede_rx_build_jumbo(struct qede_dev *edev,
+                              PAGE_SIZE, DMA_FROM_DEVICE);
+               skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
+-                                 bd->data, 0, cur_size);
++                                 bd->data, rxq->rx_headroom, cur_size);
+               skb->truesize += PAGE_SIZE;
+               skb->data_len += cur_size;
+@@ -1256,7 +1267,7 @@ static int qede_rx_process_cqe(struct qede_dev *edev,
+       /* Basic validation passed; Need to prepare an SKB. This would also
+        * guarantee to finally consume the first BD upon success.
+        */
+-      skb = qede_rx_allocate_skb(edev, rxq, bd, len, pad);
++      skb = qede_rx_build_skb(edev, rxq, bd, len, pad);
+       if (!skb) {
+               rxq->rx_alloc_errors++;
+               qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
+index 89c581c..40e2b92 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
+@@ -1197,30 +1197,8 @@ static void qede_free_rx_buffers(struct qede_dev *edev,
+       }
+ }
+-static void qede_free_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
+-{
+-      int i;
+-
+-      if (edev->gro_disable)
+-              return;
+-
+-      for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
+-              struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
+-              struct sw_rx_data *replace_buf = &tpa_info->buffer;
+-
+-              if (replace_buf->data) {
+-                      dma_unmap_page(&edev->pdev->dev,
+-                                     replace_buf->mapping,
+-                                     PAGE_SIZE, DMA_FROM_DEVICE);
+-                      __free_page(replace_buf->data);
+-              }
+-      }
+-}
+-
+ static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
+ {
+-      qede_free_sge_mem(edev, rxq);
+-
+       /* Free rx buffers */
+       qede_free_rx_buffers(edev, rxq);
+@@ -1232,45 +1210,15 @@ static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
+       edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
+ }
+-static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
++static void qede_set_tpa_param(struct qede_rx_queue *rxq)
+ {
+-      dma_addr_t mapping;
+       int i;
+-      if (edev->gro_disable)
+-              return 0;
+-
+       for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
+               struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
+-              struct sw_rx_data *replace_buf = &tpa_info->buffer;
+-
+-              replace_buf->data = alloc_pages(GFP_ATOMIC, 0);
+-              if (unlikely(!replace_buf->data)) {
+-                      DP_NOTICE(edev,
+-                                "Failed to allocate TPA skb pool [replacement buffer]\n");
+-                      goto err;
+-              }
+-
+-              mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0,
+-                                     PAGE_SIZE, DMA_FROM_DEVICE);
+-              if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
+-                      DP_NOTICE(edev,
+-                                "Failed to map TPA replacement buffer\n");
+-                      goto err;
+-              }
+-              replace_buf->mapping = mapping;
+-              tpa_info->buffer.page_offset = 0;
+-              tpa_info->buffer_mapping = mapping;
+               tpa_info->state = QEDE_AGG_STATE_NONE;
+       }
+-
+-      return 0;
+-err:
+-      qede_free_sge_mem(edev, rxq);
+-      edev->gro_disable = 1;
+-      edev->ndev->features &= ~NETIF_F_GRO_HW;
+-      return -ENOMEM;
+ }
+ /* This function allocates all memory needed per Rx queue */
+@@ -1281,19 +1229,24 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
+       rxq->num_rx_buffers = edev->q_num_rx_buffers;
+       rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
+-      rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : 0;
++
++      rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : NET_SKB_PAD;
++      size = rxq->rx_headroom +
++             SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+       /* Make sure that the headroom and  payload fit in a single page */
+-      if (rxq->rx_buf_size + rxq->rx_headroom > PAGE_SIZE)
+-              rxq->rx_buf_size = PAGE_SIZE - rxq->rx_headroom;
++      if (rxq->rx_buf_size + size > PAGE_SIZE)
++              rxq->rx_buf_size = PAGE_SIZE - size;
+-      /* Segment size to spilt a page in multiple equal parts,
++      /* Segment size to spilt a page in multiple equal parts ,
+        * unless XDP is used in which case we'd use the entire page.
+        */
+-      if (!edev->xdp_prog)
+-              rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size);
+-      else
++      if (!edev->xdp_prog) {
++              size = size + rxq->rx_buf_size;
++              rxq->rx_buf_seg_size = roundup_pow_of_two(size);
++      } else {
+               rxq->rx_buf_seg_size = PAGE_SIZE;
++      }
+       /* Allocate the parallel driver ring for Rx buffers */
+       size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
+@@ -1337,7 +1290,8 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
+               }
+       }
+-      rc = qede_alloc_sge_mem(edev, rxq);
++      if (!edev->gro_disable)
++              qede_set_tpa_param(rxq);
+ err:
+       return rc;
+ }
+-- 
+2.9.5
+
diff --git a/linux-next-cherry-picks/0015-qed-Add-MFW-interfaces-for-TLV-request-support.patch b/linux-next-cherry-picks/0015-qed-Add-MFW-interfaces-for-TLV-request-support.patch
new file mode 100644 (file)
index 0000000..5a20a7f
--- /dev/null
@@ -0,0 +1,281 @@
+From dd006921d67f4a96f3d1fa763aad4d5dcd86959b Mon Sep 17 00:00:00 2001
+From: Sudarsana Reddy Kalluru <sudarsana.kalluru@cavium.com>
+Date: Tue, 22 May 2018 00:28:37 -0700
+Subject: [PATCH 15/44] qed: Add MFW interfaces for TLV request support.
+
+The patch adds required management firmware (MFW) interfaces such as
+mailbox commands, TLV types etc.
+
+Signed-off-by: Sudarsana Reddy Kalluru <Sudarsana.Kalluru@cavium.com>
+Signed-off-by: Ariel Elior <ariel.elior@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/qlogic/qed/qed_hsi.h | 231 ++++++++++++++++++++++++++++++
+ 1 file changed, 231 insertions(+)
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+index b5f70ef..8e1e6e1 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+@@ -11863,6 +11863,8 @@ struct public_global {
+       u32 running_bundle_id;
+       s32 external_temperature;
+       u32 mdump_reason;
++      u32 data_ptr;
++      u32 data_size;
+ };
+ struct fw_flr_mb {
+@@ -12322,6 +12324,7 @@ struct public_drv_mb {
+ #define DRV_MSG_CODE_BIST_TEST                        0x001e0000
+ #define DRV_MSG_CODE_SET_LED_MODE             0x00200000
+ #define DRV_MSG_CODE_RESOURCE_CMD     0x00230000
++#define DRV_MSG_CODE_GET_TLV_DONE             0x002f0000
+ #define RESOURCE_CMD_REQ_RESC_MASK            0x0000001F
+ #define RESOURCE_CMD_REQ_RESC_SHIFT           0
+@@ -12523,6 +12526,7 @@ enum MFW_DRV_MSG_TYPE {
+       MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
+       MFW_DRV_MSG_BW_UPDATE11,
+       MFW_DRV_MSG_OEM_CFG_UPDATE,
++      MFW_DRV_MSG_GET_TLV_REQ,
+       MFW_DRV_MSG_MAX
+ };
+@@ -12558,6 +12562,233 @@ struct mcp_public_data {
+       struct public_func func[MCP_GLOB_FUNC_MAX];
+ };
++/* OCBB definitions */
++enum tlvs {
++      /* Category 1: Device Properties */
++      DRV_TLV_CLP_STR,
++      DRV_TLV_CLP_STR_CTD,
++      /* Category 6: Device Configuration */
++      DRV_TLV_SCSI_TO,
++      DRV_TLV_R_T_TOV,
++      DRV_TLV_R_A_TOV,
++      DRV_TLV_E_D_TOV,
++      DRV_TLV_CR_TOV,
++      DRV_TLV_BOOT_TYPE,
++      /* Category 8: Port Configuration */
++      DRV_TLV_NPIV_ENABLED,
++      /* Category 10: Function Configuration */
++      DRV_TLV_FEATURE_FLAGS,
++      DRV_TLV_LOCAL_ADMIN_ADDR,
++      DRV_TLV_ADDITIONAL_MAC_ADDR_1,
++      DRV_TLV_ADDITIONAL_MAC_ADDR_2,
++      DRV_TLV_LSO_MAX_OFFLOAD_SIZE,
++      DRV_TLV_LSO_MIN_SEGMENT_COUNT,
++      DRV_TLV_PROMISCUOUS_MODE,
++      DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE,
++      DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE,
++      DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG,
++      DRV_TLV_FLEX_NIC_OUTER_VLAN_ID,
++      DRV_TLV_OS_DRIVER_STATES,
++      DRV_TLV_PXE_BOOT_PROGRESS,
++      /* Category 12: FC/FCoE Configuration */
++      DRV_TLV_NPIV_STATE,
++      DRV_TLV_NUM_OF_NPIV_IDS,
++      DRV_TLV_SWITCH_NAME,
++      DRV_TLV_SWITCH_PORT_NUM,
++      DRV_TLV_SWITCH_PORT_ID,
++      DRV_TLV_VENDOR_NAME,
++      DRV_TLV_SWITCH_MODEL,
++      DRV_TLV_SWITCH_FW_VER,
++      DRV_TLV_QOS_PRIORITY_PER_802_1P,
++      DRV_TLV_PORT_ALIAS,
++      DRV_TLV_PORT_STATE,
++      DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE,
++      DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE,
++      DRV_TLV_LINK_FAILURE_COUNT,
++      DRV_TLV_FCOE_BOOT_PROGRESS,
++      /* Category 13: iSCSI Configuration */
++      DRV_TLV_TARGET_LLMNR_ENABLED,
++      DRV_TLV_HEADER_DIGEST_FLAG_ENABLED,
++      DRV_TLV_DATA_DIGEST_FLAG_ENABLED,
++      DRV_TLV_AUTHENTICATION_METHOD,
++      DRV_TLV_ISCSI_BOOT_TARGET_PORTAL,
++      DRV_TLV_MAX_FRAME_SIZE,
++      DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE,
++      DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE,
++      DRV_TLV_ISCSI_BOOT_PROGRESS,
++      /* Category 20: Device Data */
++      DRV_TLV_PCIE_BUS_RX_UTILIZATION,
++      DRV_TLV_PCIE_BUS_TX_UTILIZATION,
++      DRV_TLV_DEVICE_CPU_CORES_UTILIZATION,
++      DRV_TLV_LAST_VALID_DCC_TLV_RECEIVED,
++      DRV_TLV_NCSI_RX_BYTES_RECEIVED,
++      DRV_TLV_NCSI_TX_BYTES_SENT,
++      /* Category 22: Base Port Data */
++      DRV_TLV_RX_DISCARDS,
++      DRV_TLV_RX_ERRORS,
++      DRV_TLV_TX_ERRORS,
++      DRV_TLV_TX_DISCARDS,
++      DRV_TLV_RX_FRAMES_RECEIVED,
++      DRV_TLV_TX_FRAMES_SENT,
++      /* Category 23: FC/FCoE Port Data */
++      DRV_TLV_RX_BROADCAST_PACKETS,
++      DRV_TLV_TX_BROADCAST_PACKETS,
++      /* Category 28: Base Function Data */
++      DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4,
++      DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6,
++      DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH,
++      DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH,
++      DRV_TLV_PF_RX_FRAMES_RECEIVED,
++      DRV_TLV_RX_BYTES_RECEIVED,
++      DRV_TLV_PF_TX_FRAMES_SENT,
++      DRV_TLV_TX_BYTES_SENT,
++      DRV_TLV_IOV_OFFLOAD,
++      DRV_TLV_PCI_ERRORS_CAP_ID,
++      DRV_TLV_UNCORRECTABLE_ERROR_STATUS,
++      DRV_TLV_UNCORRECTABLE_ERROR_MASK,
++      DRV_TLV_CORRECTABLE_ERROR_STATUS,
++      DRV_TLV_CORRECTABLE_ERROR_MASK,
++      DRV_TLV_PCI_ERRORS_AECC_REGISTER,
++      DRV_TLV_TX_QUEUES_EMPTY,
++      DRV_TLV_RX_QUEUES_EMPTY,
++      DRV_TLV_TX_QUEUES_FULL,
++      DRV_TLV_RX_QUEUES_FULL,
++      /* Category 29: FC/FCoE Function Data */
++      DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH,
++      DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH,
++      DRV_TLV_FCOE_RX_FRAMES_RECEIVED,
++      DRV_TLV_FCOE_RX_BYTES_RECEIVED,
++      DRV_TLV_FCOE_TX_FRAMES_SENT,
++      DRV_TLV_FCOE_TX_BYTES_SENT,
++      DRV_TLV_CRC_ERROR_COUNT,
++      DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID,
++      DRV_TLV_CRC_ERROR_1_TIMESTAMP,
++      DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID,
++      DRV_TLV_CRC_ERROR_2_TIMESTAMP,
++      DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID,
++      DRV_TLV_CRC_ERROR_3_TIMESTAMP,
++      DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID,
++      DRV_TLV_CRC_ERROR_4_TIMESTAMP,
++      DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID,
++      DRV_TLV_CRC_ERROR_5_TIMESTAMP,
++      DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT,
++      DRV_TLV_LOSS_OF_SIGNAL_ERRORS,
++      DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT,
++      DRV_TLV_DISPARITY_ERROR_COUNT,
++      DRV_TLV_CODE_VIOLATION_ERROR_COUNT,
++      DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1,
++      DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2,
++      DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3,
++      DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4,
++      DRV_TLV_LAST_FLOGI_TIMESTAMP,
++      DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1,
++      DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2,
++      DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3,
++      DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4,
++      DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP,
++      DRV_TLV_LAST_FLOGI_RJT,
++      DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP,
++      DRV_TLV_FDISCS_SENT_COUNT,
++      DRV_TLV_FDISC_ACCS_RECEIVED,
++      DRV_TLV_FDISC_RJTS_RECEIVED,
++      DRV_TLV_PLOGI_SENT_COUNT,
++      DRV_TLV_PLOGI_ACCS_RECEIVED,
++      DRV_TLV_PLOGI_RJTS_RECEIVED,
++      DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID,
++      DRV_TLV_PLOGI_1_TIMESTAMP,
++      DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID,
++      DRV_TLV_PLOGI_2_TIMESTAMP,
++      DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID,
++      DRV_TLV_PLOGI_3_TIMESTAMP,
++      DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID,
++      DRV_TLV_PLOGI_4_TIMESTAMP,
++      DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID,
++      DRV_TLV_PLOGI_5_TIMESTAMP,
++      DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID,
++      DRV_TLV_PLOGI_1_ACC_TIMESTAMP,
++      DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID,
++      DRV_TLV_PLOGI_2_ACC_TIMESTAMP,
++      DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID,
++      DRV_TLV_PLOGI_3_ACC_TIMESTAMP,
++      DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID,
++      DRV_TLV_PLOGI_4_ACC_TIMESTAMP,
++      DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID,
++      DRV_TLV_PLOGI_5_ACC_TIMESTAMP,
++      DRV_TLV_LOGOS_ISSUED,
++      DRV_TLV_LOGO_ACCS_RECEIVED,
++      DRV_TLV_LOGO_RJTS_RECEIVED,
++      DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID,
++      DRV_TLV_LOGO_1_TIMESTAMP,
++      DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID,
++      DRV_TLV_LOGO_2_TIMESTAMP,
++      DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID,
++      DRV_TLV_LOGO_3_TIMESTAMP,
++      DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID,
++      DRV_TLV_LOGO_4_TIMESTAMP,
++      DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID,
++      DRV_TLV_LOGO_5_TIMESTAMP,
++      DRV_TLV_LOGOS_RECEIVED,
++      DRV_TLV_ACCS_ISSUED,
++      DRV_TLV_PRLIS_ISSUED,
++      DRV_TLV_ACCS_RECEIVED,
++      DRV_TLV_ABTS_SENT_COUNT,
++      DRV_TLV_ABTS_ACCS_RECEIVED,
++      DRV_TLV_ABTS_RJTS_RECEIVED,
++      DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID,
++      DRV_TLV_ABTS_1_TIMESTAMP,
++      DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID,
++      DRV_TLV_ABTS_2_TIMESTAMP,
++      DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID,
++      DRV_TLV_ABTS_3_TIMESTAMP,
++      DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID,
++      DRV_TLV_ABTS_4_TIMESTAMP,
++      DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID,
++      DRV_TLV_ABTS_5_TIMESTAMP,
++      DRV_TLV_RSCNS_RECEIVED,
++      DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1,
++      DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2,
++      DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3,
++      DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4,
++      DRV_TLV_LUN_RESETS_ISSUED,
++      DRV_TLV_ABORT_TASK_SETS_ISSUED,
++      DRV_TLV_TPRLOS_SENT,
++      DRV_TLV_NOS_SENT_COUNT,
++      DRV_TLV_NOS_RECEIVED_COUNT,
++      DRV_TLV_OLS_COUNT,
++      DRV_TLV_LR_COUNT,
++      DRV_TLV_LRR_COUNT,
++      DRV_TLV_LIP_SENT_COUNT,
++      DRV_TLV_LIP_RECEIVED_COUNT,
++      DRV_TLV_EOFA_COUNT,
++      DRV_TLV_EOFNI_COUNT,
++      DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT,
++      DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT,
++      DRV_TLV_SCSI_STATUS_BUSY_COUNT,
++      DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT,
++      DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT,
++      DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT,
++      DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT,
++      DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT,
++      DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT,
++      DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ,
++      DRV_TLV_SCSI_CHECK_1_TIMESTAMP,
++      DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ,
++      DRV_TLV_SCSI_CHECK_2_TIMESTAMP,
++      DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ,
++      DRV_TLV_SCSI_CHECK_3_TIMESTAMP,
++      DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ,
++      DRV_TLV_SCSI_CHECK_4_TIMESTAMP,
++      DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ,
++      DRV_TLV_SCSI_CHECK_5_TIMESTAMP,
++      /* Category 30: iSCSI Function Data */
++      DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH,
++      DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH,
++      DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED,
++      DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED,
++      DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT,
++      DRV_TLV_ISCSI_PDU_TX_BYTES_SENT
++};
++
+ struct nvm_cfg_mac_address {
+       u32 mac_addr_hi;
+ #define NVM_CFG_MAC_ADDRESS_HI_MASK   0x0000FFFF
+-- 
+2.9.5
+
diff --git a/linux-next-cherry-picks/0016-qed-Add-support-for-tlv-request-processing.patch b/linux-next-cherry-picks/0016-qed-Add-support-for-tlv-request-processing.patch
new file mode 100644 (file)
index 0000000..466cfe9
--- /dev/null
@@ -0,0 +1,585 @@
+From 2528c389936efbbece25088426fe7c3c91ff355f Mon Sep 17 00:00:00 2001
+From: Sudarsana Reddy Kalluru <sudarsana.kalluru@cavium.com>
+Date: Tue, 22 May 2018 00:28:38 -0700
+Subject: [PATCH 16/44] qed: Add support for tlv request processing.
+
+The patch adds driver support for processing TLV requests/repsonses
+from the mfw and upper driver layers respectively. The implementation
+reads the requested TLVs from the shared memory, requests the values
+from upper layer drivers, populates this info (TLVs) shared memory and
+notifies MFW about the TLV values.
+
+Signed-off-by: Sudarsana Reddy Kalluru <Sudarsana.Kalluru@cavium.com>
+Signed-off-by: Ariel Elior <ariel.elior@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/qlogic/qed/Makefile      |   2 +-
+ drivers/net/ethernet/qlogic/qed/qed.h         |   5 +
+ drivers/net/ethernet/qlogic/qed/qed_main.c    |   6 +
+ drivers/net/ethernet/qlogic/qed/qed_mcp.h     |  53 ++++
+ drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c | 378 ++++++++++++++++++++++++++
+ include/linux/qed/qed_if.h                    |  37 +++
+ 6 files changed, 480 insertions(+), 1 deletion(-)
+ create mode 100644 drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
+
+diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
+index c70cf2a..a0acb94 100644
+--- a/drivers/net/ethernet/qlogic/qed/Makefile
++++ b/drivers/net/ethernet/qlogic/qed/Makefile
+@@ -3,7 +3,7 @@ obj-$(CONFIG_QED) := qed.o
+ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
+        qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \
+-       qed_selftest.o qed_dcbx.o qed_debug.o qed_ptp.o
++       qed_selftest.o qed_dcbx.o qed_debug.o qed_ptp.o qed_mng_tlv.o
+ qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
+ qed-$(CONFIG_QED_LL2) += qed_ll2.o
+ qed-$(CONFIG_QED_RDMA) += qed_roce.o qed_rdma.o qed_iwarp.o
+diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
+index adcff49..dfdbe52 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed.h
++++ b/drivers/net/ethernet/qlogic/qed/qed.h
+@@ -92,6 +92,8 @@ struct qed_eth_cb_ops;
+ struct qed_dev_info;
+ union qed_mcp_protocol_stats;
+ enum qed_mcp_protocol_type;
++enum qed_mfw_tlv_type;
++union qed_mfw_tlv_data;
+ /* helpers */
+ #define QED_MFW_GET_FIELD(name, field) \
+@@ -907,4 +909,7 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
+ int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
+ void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn);
++int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn,
++                        enum qed_mfw_tlv_type type,
++                        union qed_mfw_tlv_data *tlv_data);
+ #endif /* _QED_H */
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
+index 9feed3b..cbf0ea9 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
+@@ -2088,3 +2088,9 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
+               return;
+       }
+ }
++
++int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type,
++                        union qed_mfw_tlv_data *tlv_buf)
++{
++      return -EINVAL;
++}
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+index 250579b..591877f 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+@@ -213,6 +213,40 @@ enum qed_ov_wol {
+       QED_OV_WOL_ENABLED
+ };
++enum qed_mfw_tlv_type {
++      QED_MFW_TLV_GENERIC = 0x1,      /* Core driver TLVs */
++      QED_MFW_TLV_ETH = 0x2,          /* L2 driver TLVs */
++      QED_MFW_TLV_MAX = 0x4,
++};
++
++struct qed_mfw_tlv_generic {
++#define QED_MFW_TLV_FLAGS_SIZE        2
++      struct {
++              u8 ipv4_csum_offload;
++              u8 lso_supported;
++              bool b_set;
++      } flags;
++
++#define QED_MFW_TLV_MAC_COUNT 3
++      /* First entry for primary MAC, 2 secondary MACs possible */
++      u8 mac[QED_MFW_TLV_MAC_COUNT][6];
++      bool mac_set[QED_MFW_TLV_MAC_COUNT];
++
++      u64 rx_frames;
++      bool rx_frames_set;
++      u64 rx_bytes;
++      bool rx_bytes_set;
++      u64 tx_frames;
++      bool tx_frames_set;
++      u64 tx_bytes;
++      bool tx_bytes_set;
++};
++
++union qed_mfw_tlv_data {
++      struct qed_mfw_tlv_generic generic;
++      struct qed_mfw_tlv_eth eth;
++};
++
+ /**
+  * @brief - returns the link params of the hw function
+  *
+@@ -561,6 +595,17 @@ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
+                                  struct bist_nvm_image_att *p_image_att,
+                                  u32 image_index);
++/**
++ * @brief - Processes the TLV request from MFW i.e., get the required TLV info
++ *          from the qed client and send it to the MFW.
++ *
++ * @param p_hwfn
++ * @param p_ptt
++ *
++ * @param return 0 upon success.
++ */
++int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
++
+ /* Using hwfn number (and not pf_num) is required since in CMT mode,
+  * same pf_num may be used by two different hwfn
+  * TODO - this shouldn't really be in .h file, but until all fields
+@@ -621,6 +666,14 @@ struct qed_mcp_mb_params {
+       u32                     mcp_param;
+ };
++struct qed_drv_tlv_hdr {
++      u8 tlv_type;
++      u8 tlv_length;  /* In dwords - not including this header */
++      u8 tlv_reserved;
++#define QED_DRV_TLV_FLAGS_CHANGED 0x01
++      u8 tlv_flags;
++};
++
+ /**
+  * @brief Initialize the interface with the MCP
+  *
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
+new file mode 100644
+index 0000000..d58a714
+--- /dev/null
++++ b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
+@@ -0,0 +1,378 @@
++// SPDX-License-Identifier: GPL-2.0
++#include <linux/types.h>
++#include <asm/byteorder.h>
++#include <linux/bug.h>
++#include <linux/errno.h>
++#include <linux/kernel.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++#include <linux/vmalloc.h>
++#include "qed.h"
++#include "qed_hw.h"
++#include "qed_mcp.h"
++#include "qed_reg_addr.h"
++
++#define TLV_TYPE(p)     (p[0])
++#define TLV_LENGTH(p)   (p[1])
++#define TLV_FLAGS(p)    (p[3])
++
++#define QED_TLV_DATA_MAX (14)
++struct qed_tlv_parsed_buf {
++      /* To be filled with the address to set in Value field */
++      void *p_val;
++
++      /* To be used internally in case the value has to be modified */
++      u8 data[QED_TLV_DATA_MAX];
++};
++
++static int qed_mfw_get_tlv_group(u8 tlv_type, u8 *tlv_group)
++{
++      switch (tlv_type) {
++      case DRV_TLV_FEATURE_FLAGS:
++      case DRV_TLV_LOCAL_ADMIN_ADDR:
++      case DRV_TLV_ADDITIONAL_MAC_ADDR_1:
++      case DRV_TLV_ADDITIONAL_MAC_ADDR_2:
++      case DRV_TLV_OS_DRIVER_STATES:
++      case DRV_TLV_PXE_BOOT_PROGRESS:
++      case DRV_TLV_RX_FRAMES_RECEIVED:
++      case DRV_TLV_RX_BYTES_RECEIVED:
++      case DRV_TLV_TX_FRAMES_SENT:
++      case DRV_TLV_TX_BYTES_SENT:
++      case DRV_TLV_NPIV_ENABLED:
++      case DRV_TLV_PCIE_BUS_RX_UTILIZATION:
++      case DRV_TLV_PCIE_BUS_TX_UTILIZATION:
++      case DRV_TLV_DEVICE_CPU_CORES_UTILIZATION:
++      case DRV_TLV_LAST_VALID_DCC_TLV_RECEIVED:
++      case DRV_TLV_NCSI_RX_BYTES_RECEIVED:
++      case DRV_TLV_NCSI_TX_BYTES_SENT:
++              *tlv_group |= QED_MFW_TLV_GENERIC;
++              break;
++      case DRV_TLV_LSO_MAX_OFFLOAD_SIZE:
++      case DRV_TLV_LSO_MIN_SEGMENT_COUNT:
++      case DRV_TLV_PROMISCUOUS_MODE:
++      case DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE:
++      case DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE:
++      case DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG:
++      case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4:
++      case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6:
++      case DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
++      case DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
++      case DRV_TLV_IOV_OFFLOAD:
++      case DRV_TLV_TX_QUEUES_EMPTY:
++      case DRV_TLV_RX_QUEUES_EMPTY:
++      case DRV_TLV_TX_QUEUES_FULL:
++      case DRV_TLV_RX_QUEUES_FULL:
++              *tlv_group |= QED_MFW_TLV_ETH;
++              break;
++      default:
++              return -EINVAL;
++      }
++
++      return 0;
++}
++
++/* Returns size of the data buffer or, -1 in case TLV data is not available. */
++static int
++qed_mfw_get_gen_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
++                        struct qed_mfw_tlv_generic *p_drv_buf,
++                        struct qed_tlv_parsed_buf *p_buf)
++{
++      switch (p_tlv->tlv_type) {
++      case DRV_TLV_FEATURE_FLAGS:
++              if (p_drv_buf->flags.b_set) {
++                      memset(p_buf->data, 0, sizeof(u8) * QED_TLV_DATA_MAX);
++                      p_buf->data[0] = p_drv_buf->flags.ipv4_csum_offload ?
++                          1 : 0;
++                      p_buf->data[0] |= (p_drv_buf->flags.lso_supported ?
++                                         1 : 0) << 1;
++                      p_buf->p_val = p_buf->data;
++                      return QED_MFW_TLV_FLAGS_SIZE;
++              }
++              break;
++
++      case DRV_TLV_LOCAL_ADMIN_ADDR:
++      case DRV_TLV_ADDITIONAL_MAC_ADDR_1:
++      case DRV_TLV_ADDITIONAL_MAC_ADDR_2:
++              {
++                      int idx = p_tlv->tlv_type - DRV_TLV_LOCAL_ADMIN_ADDR;
++
++                      if (p_drv_buf->mac_set[idx]) {
++                              p_buf->p_val = p_drv_buf->mac[idx];
++                              return ETH_ALEN;
++                      }
++                      break;
++              }
++
++      case DRV_TLV_RX_FRAMES_RECEIVED:
++              if (p_drv_buf->rx_frames_set) {
++                      p_buf->p_val = &p_drv_buf->rx_frames;
++                      return sizeof(p_drv_buf->rx_frames);
++              }
++              break;
++      case DRV_TLV_RX_BYTES_RECEIVED:
++              if (p_drv_buf->rx_bytes_set) {
++                      p_buf->p_val = &p_drv_buf->rx_bytes;
++                      return sizeof(p_drv_buf->rx_bytes);
++              }
++              break;
++      case DRV_TLV_TX_FRAMES_SENT:
++              if (p_drv_buf->tx_frames_set) {
++                      p_buf->p_val = &p_drv_buf->tx_frames;
++                      return sizeof(p_drv_buf->tx_frames);
++              }
++              break;
++      case DRV_TLV_TX_BYTES_SENT:
++              if (p_drv_buf->tx_bytes_set) {
++                      p_buf->p_val = &p_drv_buf->tx_bytes;
++                      return sizeof(p_drv_buf->tx_bytes);
++              }
++              break;
++      default:
++              break;
++      }
++
++      return -1;
++}
++
++static int
++qed_mfw_get_eth_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
++                        struct qed_mfw_tlv_eth *p_drv_buf,
++                        struct qed_tlv_parsed_buf *p_buf)
++{
++      switch (p_tlv->tlv_type) {
++      case DRV_TLV_LSO_MAX_OFFLOAD_SIZE:
++              if (p_drv_buf->lso_maxoff_size_set) {
++                      p_buf->p_val = &p_drv_buf->lso_maxoff_size;
++                      return sizeof(p_drv_buf->lso_maxoff_size);
++              }
++              break;
++      case DRV_TLV_LSO_MIN_SEGMENT_COUNT:
++              if (p_drv_buf->lso_minseg_size_set) {
++                      p_buf->p_val = &p_drv_buf->lso_minseg_size;
++                      return sizeof(p_drv_buf->lso_minseg_size);
++              }
++              break;
++      case DRV_TLV_PROMISCUOUS_MODE:
++              if (p_drv_buf->prom_mode_set) {
++                      p_buf->p_val = &p_drv_buf->prom_mode;
++                      return sizeof(p_drv_buf->prom_mode);
++              }
++              break;
++      case DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE:
++              if (p_drv_buf->tx_descr_size_set) {
++                      p_buf->p_val = &p_drv_buf->tx_descr_size;
++                      return sizeof(p_drv_buf->tx_descr_size);
++              }
++              break;
++      case DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE:
++              if (p_drv_buf->rx_descr_size_set) {
++                      p_buf->p_val = &p_drv_buf->rx_descr_size;
++                      return sizeof(p_drv_buf->rx_descr_size);
++              }
++              break;
++      case DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG:
++              if (p_drv_buf->netq_count_set) {
++                      p_buf->p_val = &p_drv_buf->netq_count;
++                      return sizeof(p_drv_buf->netq_count);
++              }
++              break;
++      case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4:
++              if (p_drv_buf->tcp4_offloads_set) {
++                      p_buf->p_val = &p_drv_buf->tcp4_offloads;
++                      return sizeof(p_drv_buf->tcp4_offloads);
++              }
++              break;
++      case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6:
++              if (p_drv_buf->tcp6_offloads_set) {
++                      p_buf->p_val = &p_drv_buf->tcp6_offloads;
++                      return sizeof(p_drv_buf->tcp6_offloads);
++              }
++              break;
++      case DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
++              if (p_drv_buf->tx_descr_qdepth_set) {
++                      p_buf->p_val = &p_drv_buf->tx_descr_qdepth;
++                      return sizeof(p_drv_buf->tx_descr_qdepth);
++              }
++              break;
++      case DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
++              if (p_drv_buf->rx_descr_qdepth_set) {
++                      p_buf->p_val = &p_drv_buf->rx_descr_qdepth;
++                      return sizeof(p_drv_buf->rx_descr_qdepth);
++              }
++              break;
++      case DRV_TLV_IOV_OFFLOAD:
++              if (p_drv_buf->iov_offload_set) {
++                      p_buf->p_val = &p_drv_buf->iov_offload;
++                      return sizeof(p_drv_buf->iov_offload);
++              }
++              break;
++      case DRV_TLV_TX_QUEUES_EMPTY:
++              if (p_drv_buf->txqs_empty_set) {
++                      p_buf->p_val = &p_drv_buf->txqs_empty;
++                      return sizeof(p_drv_buf->txqs_empty);
++              }
++              break;
++      case DRV_TLV_RX_QUEUES_EMPTY:
++              if (p_drv_buf->rxqs_empty_set) {
++                      p_buf->p_val = &p_drv_buf->rxqs_empty;
++                      return sizeof(p_drv_buf->rxqs_empty);
++              }
++              break;
++      case DRV_TLV_TX_QUEUES_FULL:
++              if (p_drv_buf->num_txqs_full_set) {
++                      p_buf->p_val = &p_drv_buf->num_txqs_full;
++                      return sizeof(p_drv_buf->num_txqs_full);
++              }
++              break;
++      case DRV_TLV_RX_QUEUES_FULL:
++              if (p_drv_buf->num_rxqs_full_set) {
++                      p_buf->p_val = &p_drv_buf->num_rxqs_full;
++                      return sizeof(p_drv_buf->num_rxqs_full);
++              }
++              break;
++      default:
++              break;
++      }
++
++      return -1;
++}
++
++static int qed_mfw_update_tlvs(struct qed_hwfn *p_hwfn,
++                             u8 tlv_group, u8 *p_mfw_buf, u32 size)
++{
++      union qed_mfw_tlv_data *p_tlv_data;
++      struct qed_tlv_parsed_buf buffer;
++      struct qed_drv_tlv_hdr tlv;
++      int len = 0;
++      u32 offset;
++      u8 *p_tlv;
++
++      p_tlv_data = vzalloc(sizeof(*p_tlv_data));
++      if (!p_tlv_data)
++              return -ENOMEM;
++
++      if (qed_mfw_fill_tlv_data(p_hwfn, tlv_group, p_tlv_data)) {
++              vfree(p_tlv_data);
++              return -EINVAL;
++      }
++
++      memset(&tlv, 0, sizeof(tlv));
++      for (offset = 0; offset < size;
++           offset += sizeof(tlv) + sizeof(u32) * tlv.tlv_length) {
++              p_tlv = &p_mfw_buf[offset];
++              tlv.tlv_type = TLV_TYPE(p_tlv);
++              tlv.tlv_length = TLV_LENGTH(p_tlv);
++              tlv.tlv_flags = TLV_FLAGS(p_tlv);
++
++              DP_VERBOSE(p_hwfn, QED_MSG_SP,
++                         "Type %d length = %d flags = 0x%x\n", tlv.tlv_type,
++                         tlv.tlv_length, tlv.tlv_flags);
++
++              if (tlv_group == QED_MFW_TLV_GENERIC)
++                      len = qed_mfw_get_gen_tlv_value(&tlv,
++                                                      &p_tlv_data->generic,
++                                                      &buffer);
++              else if (tlv_group == QED_MFW_TLV_ETH)
++                      len = qed_mfw_get_eth_tlv_value(&tlv,
++                                                      &p_tlv_data->eth,
++                                                      &buffer);
++
++              if (len > 0) {
++                      WARN(len > 4 * tlv.tlv_length,
++                           "Incorrect MFW TLV length %d, it shouldn't be greater than %d\n",
++                           len, 4 * tlv.tlv_length);
++                      len = min_t(int, len, 4 * tlv.tlv_length);
++                      tlv.tlv_flags |= QED_DRV_TLV_FLAGS_CHANGED;
++                      TLV_FLAGS(p_tlv) = tlv.tlv_flags;
++                      memcpy(p_mfw_buf + offset + sizeof(tlv),
++                             buffer.p_val, len);
++              }
++      }
++
++      vfree(p_tlv_data);
++
++      return 0;
++}
++
++int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
++{
++      u32 addr, size, offset, resp, param, val, global_offsize, global_addr;
++      u8 tlv_group = 0, id, *p_mfw_buf = NULL, *p_temp;
++      struct qed_drv_tlv_hdr tlv;
++      int rc;
++
++      addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
++                                  PUBLIC_GLOBAL);
++      global_offsize = qed_rd(p_hwfn, p_ptt, addr);
++      global_addr = SECTION_ADDR(global_offsize, 0);
++      addr = global_addr + offsetof(struct public_global, data_ptr);
++      addr = qed_rd(p_hwfn, p_ptt, addr);
++      size = qed_rd(p_hwfn, p_ptt, global_addr +
++                    offsetof(struct public_global, data_size));
++
++      if (!size) {
++              DP_NOTICE(p_hwfn, "Invalid TLV req size = %d\n", size);
++              goto drv_done;
++      }
++
++      p_mfw_buf = vzalloc(size);
++      if (!p_mfw_buf) {
++              DP_NOTICE(p_hwfn, "Failed allocate memory for p_mfw_buf\n");
++              goto drv_done;
++      }
++
++      /* Read the TLV request to local buffer. MFW represents the TLV in
++       * little endian format and mcp returns it bigendian format. Hence
++       * driver need to convert data to little endian first and then do the
++       * memcpy (casting) to preserve the MFW TLV format in the driver buffer.
++       *
++       */
++      for (offset = 0; offset < size; offset += sizeof(u32)) {
++              val = qed_rd(p_hwfn, p_ptt, addr + offset);
++              val = be32_to_cpu(val);
++              memcpy(&p_mfw_buf[offset], &val, sizeof(u32));
++      }
++
++      /* Parse the headers to enumerate the requested TLV groups */
++      for (offset = 0; offset < size;
++           offset += sizeof(tlv) + sizeof(u32) * tlv.tlv_length) {
++              p_temp = &p_mfw_buf[offset];
++              tlv.tlv_type = TLV_TYPE(p_temp);
++              tlv.tlv_length = TLV_LENGTH(p_temp);
++              if (qed_mfw_get_tlv_group(tlv.tlv_type, &tlv_group))
++                      DP_VERBOSE(p_hwfn, NETIF_MSG_DRV,
++                                 "Un recognized TLV %d\n", tlv.tlv_type);
++      }
++
++      /* Sanitize the TLV groups according to personality */
++      if ((tlv_group & QED_MFW_TLV_ETH) && !QED_IS_L2_PERSONALITY(p_hwfn)) {
++              DP_VERBOSE(p_hwfn, QED_MSG_SP,
++                         "Skipping L2 TLVs for non-L2 function\n");
++              tlv_group &= ~QED_MFW_TLV_ETH;
++      }
++
++      /* Update the TLV values in the local buffer */
++      for (id = QED_MFW_TLV_GENERIC; id < QED_MFW_TLV_MAX; id <<= 1) {
++              if (tlv_group & id)
++                      if (qed_mfw_update_tlvs(p_hwfn, id, p_mfw_buf, size))
++                              goto drv_done;
++      }
++
++      /* Write the TLV data to shared memory. The stream of 4 bytes first need
++       * to be mem-copied to u32 element to make it as LSB format. And then
++       * converted to big endian as required by mcp-write.
++       */
++      for (offset = 0; offset < size; offset += sizeof(u32)) {
++              memcpy(&val, &p_mfw_buf[offset], sizeof(u32));
++              val = cpu_to_be32(val);
++              qed_wr(p_hwfn, p_ptt, addr + offset, val);
++      }
++
++drv_done:
++      rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_TLV_DONE, 0, &resp,
++                       &param);
++
++      vfree(p_mfw_buf);
++
++      return rc;
++}
+diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
+index 907976f..8e4fad4 100644
+--- a/include/linux/qed/qed_if.h
++++ b/include/linux/qed/qed_if.h
+@@ -182,6 +182,43 @@ enum qed_led_mode {
+       QED_LED_MODE_RESTORE
+ };
++struct qed_mfw_tlv_eth {
++      u16 lso_maxoff_size;
++      bool lso_maxoff_size_set;
++      u16 lso_minseg_size;
++      bool lso_minseg_size_set;
++      u8 prom_mode;
++      bool prom_mode_set;
++      u16 tx_descr_size;
++      bool tx_descr_size_set;
++      u16 rx_descr_size;
++      bool rx_descr_size_set;
++      u16 netq_count;
++      bool netq_count_set;
++      u32 tcp4_offloads;
++      bool tcp4_offloads_set;
++      u32 tcp6_offloads;
++      bool tcp6_offloads_set;
++      u16 tx_descr_qdepth;
++      bool tx_descr_qdepth_set;
++      u16 rx_descr_qdepth;
++      bool rx_descr_qdepth_set;
++      u8 iov_offload;
++#define QED_MFW_TLV_IOV_OFFLOAD_NONE            (0)
++#define QED_MFW_TLV_IOV_OFFLOAD_MULTIQUEUE      (1)
++#define QED_MFW_TLV_IOV_OFFLOAD_VEB             (2)
++#define QED_MFW_TLV_IOV_OFFLOAD_VEPA            (3)
++      bool iov_offload_set;
++      u8 txqs_empty;
++      bool txqs_empty_set;
++      u8 rxqs_empty;
++      bool rxqs_empty_set;
++      u8 num_txqs_full;
++      bool num_txqs_full_set;
++      u8 num_rxqs_full;
++      bool num_rxqs_full_set;
++};
++
+ #define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \
+                                           (void __iomem *)(reg_addr))
+-- 
+2.9.5
+
diff --git a/linux-next-cherry-picks/0017-qed-Add-support-for-processing-fcoe-tlv-request.patch b/linux-next-cherry-picks/0017-qed-Add-support-for-processing-fcoe-tlv-request.patch
new file mode 100644 (file)
index 0000000..e0aeabe
--- /dev/null
@@ -0,0 +1,1103 @@
+From f240b6882211aae7155a9839dff1426e2853fe30 Mon Sep 17 00:00:00 2001
+From: Sudarsana Reddy Kalluru <sudarsana.kalluru@cavium.com>
+Date: Tue, 22 May 2018 00:28:39 -0700
+Subject: [PATCH 17/44] qed: Add support for processing fcoe tlv request.
+
+Signed-off-by: Sudarsana Reddy Kalluru <Sudarsana.Kalluru@cavium.com>
+Signed-off-by: Ariel Elior <ariel.elior@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/qlogic/qed/qed_mcp.h     |   4 +-
+ drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c | 828 ++++++++++++++++++++++++++
+ include/linux/qed/qed_if.h                    | 193 ++++++
+ 3 files changed, 1024 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+index 591877f..b31f5d8 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+@@ -216,7 +216,8 @@ enum qed_ov_wol {
+ enum qed_mfw_tlv_type {
+       QED_MFW_TLV_GENERIC = 0x1,      /* Core driver TLVs */
+       QED_MFW_TLV_ETH = 0x2,          /* L2 driver TLVs */
+-      QED_MFW_TLV_MAX = 0x4,
++      QED_MFW_TLV_FCOE = 0x4,         /* FCoE protocol TLVs */
++      QED_MFW_TLV_MAX = 0x8,
+ };
+ struct qed_mfw_tlv_generic {
+@@ -245,6 +246,7 @@ struct qed_mfw_tlv_generic {
+ union qed_mfw_tlv_data {
+       struct qed_mfw_tlv_generic generic;
+       struct qed_mfw_tlv_eth eth;
++      struct qed_mfw_tlv_fcoe fcoe;
+ };
+ /**
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
+index d58a714..1873cfc 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
+@@ -64,6 +64,157 @@ static int qed_mfw_get_tlv_group(u8 tlv_type, u8 *tlv_group)
+       case DRV_TLV_RX_QUEUES_FULL:
+               *tlv_group |= QED_MFW_TLV_ETH;
+               break;
++      case DRV_TLV_SCSI_TO:
++      case DRV_TLV_R_T_TOV:
++      case DRV_TLV_R_A_TOV:
++      case DRV_TLV_E_D_TOV:
++      case DRV_TLV_CR_TOV:
++      case DRV_TLV_BOOT_TYPE:
++      case DRV_TLV_NPIV_STATE:
++      case DRV_TLV_NUM_OF_NPIV_IDS:
++      case DRV_TLV_SWITCH_NAME:
++      case DRV_TLV_SWITCH_PORT_NUM:
++      case DRV_TLV_SWITCH_PORT_ID:
++      case DRV_TLV_VENDOR_NAME:
++      case DRV_TLV_SWITCH_MODEL:
++      case DRV_TLV_SWITCH_FW_VER:
++      case DRV_TLV_QOS_PRIORITY_PER_802_1P:
++      case DRV_TLV_PORT_ALIAS:
++      case DRV_TLV_PORT_STATE:
++      case DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE:
++      case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE:
++      case DRV_TLV_LINK_FAILURE_COUNT:
++      case DRV_TLV_FCOE_BOOT_PROGRESS:
++      case DRV_TLV_RX_BROADCAST_PACKETS:
++      case DRV_TLV_TX_BROADCAST_PACKETS:
++      case DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
++      case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
++      case DRV_TLV_FCOE_RX_FRAMES_RECEIVED:
++      case DRV_TLV_FCOE_RX_BYTES_RECEIVED:
++      case DRV_TLV_FCOE_TX_FRAMES_SENT:
++      case DRV_TLV_FCOE_TX_BYTES_SENT:
++      case DRV_TLV_CRC_ERROR_COUNT:
++      case DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID:
++      case DRV_TLV_CRC_ERROR_1_TIMESTAMP:
++      case DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID:
++      case DRV_TLV_CRC_ERROR_2_TIMESTAMP:
++      case DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID:
++      case DRV_TLV_CRC_ERROR_3_TIMESTAMP:
++      case DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID:
++      case DRV_TLV_CRC_ERROR_4_TIMESTAMP:
++      case DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID:
++      case DRV_TLV_CRC_ERROR_5_TIMESTAMP:
++      case DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT:
++      case DRV_TLV_LOSS_OF_SIGNAL_ERRORS:
++      case DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT:
++      case DRV_TLV_DISPARITY_ERROR_COUNT:
++      case DRV_TLV_CODE_VIOLATION_ERROR_COUNT:
++      case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1:
++      case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2:
++      case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3:
++      case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4:
++      case DRV_TLV_LAST_FLOGI_TIMESTAMP:
++      case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1:
++      case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2:
++      case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3:
++      case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4:
++      case DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP:
++      case DRV_TLV_LAST_FLOGI_RJT:
++      case DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP:
++      case DRV_TLV_FDISCS_SENT_COUNT:
++      case DRV_TLV_FDISC_ACCS_RECEIVED:
++      case DRV_TLV_FDISC_RJTS_RECEIVED:
++      case DRV_TLV_PLOGI_SENT_COUNT:
++      case DRV_TLV_PLOGI_ACCS_RECEIVED:
++      case DRV_TLV_PLOGI_RJTS_RECEIVED:
++      case DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID:
++      case DRV_TLV_PLOGI_1_TIMESTAMP:
++      case DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID:
++      case DRV_TLV_PLOGI_2_TIMESTAMP:
++      case DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID:
++      case DRV_TLV_PLOGI_3_TIMESTAMP:
++      case DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID:
++      case DRV_TLV_PLOGI_4_TIMESTAMP:
++      case DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID:
++      case DRV_TLV_PLOGI_5_TIMESTAMP:
++      case DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID:
++      case DRV_TLV_PLOGI_1_ACC_TIMESTAMP:
++      case DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID:
++      case DRV_TLV_PLOGI_2_ACC_TIMESTAMP:
++      case DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID:
++      case DRV_TLV_PLOGI_3_ACC_TIMESTAMP:
++      case DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID:
++      case DRV_TLV_PLOGI_4_ACC_TIMESTAMP:
++      case DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID:
++      case DRV_TLV_PLOGI_5_ACC_TIMESTAMP:
++      case DRV_TLV_LOGOS_ISSUED:
++      case DRV_TLV_LOGO_ACCS_RECEIVED:
++      case DRV_TLV_LOGO_RJTS_RECEIVED:
++      case DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID:
++      case DRV_TLV_LOGO_1_TIMESTAMP:
++      case DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID:
++      case DRV_TLV_LOGO_2_TIMESTAMP:
++      case DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID:
++      case DRV_TLV_LOGO_3_TIMESTAMP:
++      case DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID:
++      case DRV_TLV_LOGO_4_TIMESTAMP:
++      case DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID:
++      case DRV_TLV_LOGO_5_TIMESTAMP:
++      case DRV_TLV_LOGOS_RECEIVED:
++      case DRV_TLV_ACCS_ISSUED:
++      case DRV_TLV_PRLIS_ISSUED:
++      case DRV_TLV_ACCS_RECEIVED:
++      case DRV_TLV_ABTS_SENT_COUNT:
++      case DRV_TLV_ABTS_ACCS_RECEIVED:
++      case DRV_TLV_ABTS_RJTS_RECEIVED:
++      case DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID:
++      case DRV_TLV_ABTS_1_TIMESTAMP:
++      case DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID:
++      case DRV_TLV_ABTS_2_TIMESTAMP:
++      case DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID:
++      case DRV_TLV_ABTS_3_TIMESTAMP:
++      case DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID:
++      case DRV_TLV_ABTS_4_TIMESTAMP:
++      case DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID:
++      case DRV_TLV_ABTS_5_TIMESTAMP:
++      case DRV_TLV_RSCNS_RECEIVED:
++      case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1:
++      case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2:
++      case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3:
++      case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4:
++      case DRV_TLV_LUN_RESETS_ISSUED:
++      case DRV_TLV_ABORT_TASK_SETS_ISSUED:
++      case DRV_TLV_TPRLOS_SENT:
++      case DRV_TLV_NOS_SENT_COUNT:
++      case DRV_TLV_NOS_RECEIVED_COUNT:
++      case DRV_TLV_OLS_COUNT:
++      case DRV_TLV_LR_COUNT:
++      case DRV_TLV_LRR_COUNT:
++      case DRV_TLV_LIP_SENT_COUNT:
++      case DRV_TLV_LIP_RECEIVED_COUNT:
++      case DRV_TLV_EOFA_COUNT:
++      case DRV_TLV_EOFNI_COUNT:
++      case DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT:
++      case DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT:
++      case DRV_TLV_SCSI_STATUS_BUSY_COUNT:
++      case DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT:
++      case DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT:
++      case DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT:
++      case DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT:
++      case DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT:
++      case DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT:
++      case DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ:
++      case DRV_TLV_SCSI_CHECK_1_TIMESTAMP:
++      case DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ:
++      case DRV_TLV_SCSI_CHECK_2_TIMESTAMP:
++      case DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ:
++      case DRV_TLV_SCSI_CHECK_3_TIMESTAMP:
++      case DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ:
++      case DRV_TLV_SCSI_CHECK_4_TIMESTAMP:
++      case DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ:
++      case DRV_TLV_SCSI_CHECK_5_TIMESTAMP:
++              *tlv_group = QED_MFW_TLV_FCOE;
++              break;
+       default:
+               return -EINVAL;
+       }
+@@ -237,6 +388,672 @@ qed_mfw_get_eth_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
+       return -1;
+ }
++static int
++qed_mfw_get_tlv_time_value(struct qed_mfw_tlv_time *p_time,
++                         struct qed_tlv_parsed_buf *p_buf)
++{
++      if (!p_time->b_set)
++              return -1;
++
++      /* Validate numbers */
++      if (p_time->month > 12)
++              p_time->month = 0;
++      if (p_time->day > 31)
++              p_time->day = 0;
++      if (p_time->hour > 23)
++              p_time->hour = 0;
++      if (p_time->min > 59)
++              p_time->hour = 0;
++      if (p_time->msec > 999)
++              p_time->msec = 0;
++      if (p_time->usec > 999)
++              p_time->usec = 0;
++
++      memset(p_buf->data, 0, sizeof(u8) * QED_TLV_DATA_MAX);
++      snprintf(p_buf->data, 14, "%d%d%d%d%d%d",
++               p_time->month, p_time->day,
++               p_time->hour, p_time->min, p_time->msec, p_time->usec);
++
++      p_buf->p_val = p_buf->data;
++
++      return QED_MFW_TLV_TIME_SIZE;
++}
++
++static int
++qed_mfw_get_fcoe_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
++                         struct qed_mfw_tlv_fcoe *p_drv_buf,
++                         struct qed_tlv_parsed_buf *p_buf)
++{
++      struct qed_mfw_tlv_time *p_time;
++      u8 idx;
++
++      switch (p_tlv->tlv_type) {
++      case DRV_TLV_SCSI_TO:
++              if (p_drv_buf->scsi_timeout_set) {
++                      p_buf->p_val = &p_drv_buf->scsi_timeout;
++                      return sizeof(p_drv_buf->scsi_timeout);
++              }
++              break;
++      case DRV_TLV_R_T_TOV:
++              if (p_drv_buf->rt_tov_set) {
++                      p_buf->p_val = &p_drv_buf->rt_tov;
++                      return sizeof(p_drv_buf->rt_tov);
++              }
++              break;
++      case DRV_TLV_R_A_TOV:
++              if (p_drv_buf->ra_tov_set) {
++                      p_buf->p_val = &p_drv_buf->ra_tov;
++                      return sizeof(p_drv_buf->ra_tov);
++              }
++              break;
++      case DRV_TLV_E_D_TOV:
++              if (p_drv_buf->ed_tov_set) {
++                      p_buf->p_val = &p_drv_buf->ed_tov;
++                      return sizeof(p_drv_buf->ed_tov);
++              }
++              break;
++      case DRV_TLV_CR_TOV:
++              if (p_drv_buf->cr_tov_set) {
++                      p_buf->p_val = &p_drv_buf->cr_tov;
++                      return sizeof(p_drv_buf->cr_tov);
++              }
++              break;
++      case DRV_TLV_BOOT_TYPE:
++              if (p_drv_buf->boot_type_set) {
++                      p_buf->p_val = &p_drv_buf->boot_type;
++                      return sizeof(p_drv_buf->boot_type);
++              }
++              break;
++      case DRV_TLV_NPIV_STATE:
++              if (p_drv_buf->npiv_state_set) {
++                      p_buf->p_val = &p_drv_buf->npiv_state;
++                      return sizeof(p_drv_buf->npiv_state);
++              }
++              break;
++      case DRV_TLV_NUM_OF_NPIV_IDS:
++              if (p_drv_buf->num_npiv_ids_set) {
++                      p_buf->p_val = &p_drv_buf->num_npiv_ids;
++                      return sizeof(p_drv_buf->num_npiv_ids);
++              }
++              break;
++      case DRV_TLV_SWITCH_NAME:
++              if (p_drv_buf->switch_name_set) {
++                      p_buf->p_val = &p_drv_buf->switch_name;
++                      return sizeof(p_drv_buf->switch_name);
++              }
++              break;
++      case DRV_TLV_SWITCH_PORT_NUM:
++              if (p_drv_buf->switch_portnum_set) {
++                      p_buf->p_val = &p_drv_buf->switch_portnum;
++                      return sizeof(p_drv_buf->switch_portnum);
++              }
++              break;
++      case DRV_TLV_SWITCH_PORT_ID:
++              if (p_drv_buf->switch_portid_set) {
++                      p_buf->p_val = &p_drv_buf->switch_portid;
++                      return sizeof(p_drv_buf->switch_portid);
++              }
++              break;
++      case DRV_TLV_VENDOR_NAME:
++              if (p_drv_buf->vendor_name_set) {
++                      p_buf->p_val = &p_drv_buf->vendor_name;
++                      return sizeof(p_drv_buf->vendor_name);
++              }
++              break;
++      case DRV_TLV_SWITCH_MODEL:
++              if (p_drv_buf->switch_model_set) {
++                      p_buf->p_val = &p_drv_buf->switch_model;
++                      return sizeof(p_drv_buf->switch_model);
++              }
++              break;
++      case DRV_TLV_SWITCH_FW_VER:
++              if (p_drv_buf->switch_fw_version_set) {
++                      p_buf->p_val = &p_drv_buf->switch_fw_version;
++                      return sizeof(p_drv_buf->switch_fw_version);
++              }
++              break;
++      case DRV_TLV_QOS_PRIORITY_PER_802_1P:
++              if (p_drv_buf->qos_pri_set) {
++                      p_buf->p_val = &p_drv_buf->qos_pri;
++                      return sizeof(p_drv_buf->qos_pri);
++              }
++              break;
++      case DRV_TLV_PORT_ALIAS:
++              if (p_drv_buf->port_alias_set) {
++                      p_buf->p_val = &p_drv_buf->port_alias;
++                      return sizeof(p_drv_buf->port_alias);
++              }
++              break;
++      case DRV_TLV_PORT_STATE:
++              if (p_drv_buf->port_state_set) {
++                      p_buf->p_val = &p_drv_buf->port_state;
++                      return sizeof(p_drv_buf->port_state);
++              }
++              break;
++      case DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE:
++              if (p_drv_buf->fip_tx_descr_size_set) {
++                      p_buf->p_val = &p_drv_buf->fip_tx_descr_size;
++                      return sizeof(p_drv_buf->fip_tx_descr_size);
++              }
++              break;
++      case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE:
++              if (p_drv_buf->fip_rx_descr_size_set) {
++                      p_buf->p_val = &p_drv_buf->fip_rx_descr_size;
++                      return sizeof(p_drv_buf->fip_rx_descr_size);
++              }
++              break;
++      case DRV_TLV_LINK_FAILURE_COUNT:
++              if (p_drv_buf->link_failures_set) {
++                      p_buf->p_val = &p_drv_buf->link_failures;
++                      return sizeof(p_drv_buf->link_failures);
++              }
++              break;
++      case DRV_TLV_FCOE_BOOT_PROGRESS:
++              if (p_drv_buf->fcoe_boot_progress_set) {
++                      p_buf->p_val = &p_drv_buf->fcoe_boot_progress;
++                      return sizeof(p_drv_buf->fcoe_boot_progress);
++              }
++              break;
++      case DRV_TLV_RX_BROADCAST_PACKETS:
++              if (p_drv_buf->rx_bcast_set) {
++                      p_buf->p_val = &p_drv_buf->rx_bcast;
++                      return sizeof(p_drv_buf->rx_bcast);
++              }
++              break;
++      case DRV_TLV_TX_BROADCAST_PACKETS:
++              if (p_drv_buf->tx_bcast_set) {
++                      p_buf->p_val = &p_drv_buf->tx_bcast;
++                      return sizeof(p_drv_buf->tx_bcast);
++              }
++              break;
++      case DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
++              if (p_drv_buf->fcoe_txq_depth_set) {
++                      p_buf->p_val = &p_drv_buf->fcoe_txq_depth;
++                      return sizeof(p_drv_buf->fcoe_txq_depth);
++              }
++              break;
++      case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
++              if (p_drv_buf->fcoe_rxq_depth_set) {
++                      p_buf->p_val = &p_drv_buf->fcoe_rxq_depth;
++                      return sizeof(p_drv_buf->fcoe_rxq_depth);
++              }
++              break;
++      case DRV_TLV_FCOE_RX_FRAMES_RECEIVED:
++              if (p_drv_buf->fcoe_rx_frames_set) {
++                      p_buf->p_val = &p_drv_buf->fcoe_rx_frames;
++                      return sizeof(p_drv_buf->fcoe_rx_frames);
++              }
++              break;
++      case DRV_TLV_FCOE_RX_BYTES_RECEIVED:
++              if (p_drv_buf->fcoe_rx_bytes_set) {
++                      p_buf->p_val = &p_drv_buf->fcoe_rx_bytes;
++                      return sizeof(p_drv_buf->fcoe_rx_bytes);
++              }
++              break;
++      case DRV_TLV_FCOE_TX_FRAMES_SENT:
++              if (p_drv_buf->fcoe_tx_frames_set) {
++                      p_buf->p_val = &p_drv_buf->fcoe_tx_frames;
++                      return sizeof(p_drv_buf->fcoe_tx_frames);
++              }
++              break;
++      case DRV_TLV_FCOE_TX_BYTES_SENT:
++              if (p_drv_buf->fcoe_tx_bytes_set) {
++                      p_buf->p_val = &p_drv_buf->fcoe_tx_bytes;
++                      return sizeof(p_drv_buf->fcoe_tx_bytes);
++              }
++              break;
++      case DRV_TLV_CRC_ERROR_COUNT:
++              if (p_drv_buf->crc_count_set) {
++                      p_buf->p_val = &p_drv_buf->crc_count;
++                      return sizeof(p_drv_buf->crc_count);
++              }
++              break;
++      case DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID:
++      case DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID:
++      case DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID:
++      case DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID:
++      case DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID:
++              idx = (p_tlv->tlv_type -
++                     DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID) / 2;
++
++              if (p_drv_buf->crc_err_src_fcid_set[idx]) {
++                      p_buf->p_val = &p_drv_buf->crc_err_src_fcid[idx];
++                      return sizeof(p_drv_buf->crc_err_src_fcid[idx]);
++              }
++              break;
++      case DRV_TLV_CRC_ERROR_1_TIMESTAMP:
++      case DRV_TLV_CRC_ERROR_2_TIMESTAMP:
++      case DRV_TLV_CRC_ERROR_3_TIMESTAMP:
++      case DRV_TLV_CRC_ERROR_4_TIMESTAMP:
++      case DRV_TLV_CRC_ERROR_5_TIMESTAMP:
++              idx = (p_tlv->tlv_type - DRV_TLV_CRC_ERROR_1_TIMESTAMP) / 2;
++
++              return qed_mfw_get_tlv_time_value(&p_drv_buf->crc_err[idx],
++                                                p_buf);
++      case DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT:
++              if (p_drv_buf->losync_err_set) {
++                      p_buf->p_val = &p_drv_buf->losync_err;
++                      return sizeof(p_drv_buf->losync_err);
++              }
++              break;
++      case DRV_TLV_LOSS_OF_SIGNAL_ERRORS:
++              if (p_drv_buf->losig_err_set) {
++                      p_buf->p_val = &p_drv_buf->losig_err;
++                      return sizeof(p_drv_buf->losig_err);
++              }
++              break;
++      case DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT:
++              if (p_drv_buf->primtive_err_set) {
++                      p_buf->p_val = &p_drv_buf->primtive_err;
++                      return sizeof(p_drv_buf->primtive_err);
++              }
++              break;
++      case DRV_TLV_DISPARITY_ERROR_COUNT:
++              if (p_drv_buf->disparity_err_set) {
++                      p_buf->p_val = &p_drv_buf->disparity_err;
++                      return sizeof(p_drv_buf->disparity_err);
++              }
++              break;
++      case DRV_TLV_CODE_VIOLATION_ERROR_COUNT:
++              if (p_drv_buf->code_violation_err_set) {
++                      p_buf->p_val = &p_drv_buf->code_violation_err;
++                      return sizeof(p_drv_buf->code_violation_err);
++              }
++              break;
++      case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1:
++      case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2:
++      case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3:
++      case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4:
++              idx = p_tlv->tlv_type -
++                      DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1;
++              if (p_drv_buf->flogi_param_set[idx]) {
++                      p_buf->p_val = &p_drv_buf->flogi_param[idx];
++                      return sizeof(p_drv_buf->flogi_param[idx]);
++              }
++              break;
++      case DRV_TLV_LAST_FLOGI_TIMESTAMP:
++              return qed_mfw_get_tlv_time_value(&p_drv_buf->flogi_tstamp,
++                                                p_buf);
++      case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1:
++      case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2:
++      case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3:
++      case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4:
++              idx = p_tlv->tlv_type -
++                      DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1;
++
++              if (p_drv_buf->flogi_acc_param_set[idx]) {
++                      p_buf->p_val = &p_drv_buf->flogi_acc_param[idx];
++                      return sizeof(p_drv_buf->flogi_acc_param[idx]);
++              }
++              break;
++      case DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP:
++              return qed_mfw_get_tlv_time_value(&p_drv_buf->flogi_acc_tstamp,
++                                                p_buf);
++      case DRV_TLV_LAST_FLOGI_RJT:
++              if (p_drv_buf->flogi_rjt_set) {
++                      p_buf->p_val = &p_drv_buf->flogi_rjt;
++                      return sizeof(p_drv_buf->flogi_rjt);
++              }
++              break;
++      case DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP:
++              return qed_mfw_get_tlv_time_value(&p_drv_buf->flogi_rjt_tstamp,
++                                                p_buf);
++      case DRV_TLV_FDISCS_SENT_COUNT:
++              if (p_drv_buf->fdiscs_set) {
++                      p_buf->p_val = &p_drv_buf->fdiscs;
++                      return sizeof(p_drv_buf->fdiscs);
++              }
++              break;
++      case DRV_TLV_FDISC_ACCS_RECEIVED:
++              if (p_drv_buf->fdisc_acc_set) {
++                      p_buf->p_val = &p_drv_buf->fdisc_acc;
++                      return sizeof(p_drv_buf->fdisc_acc);
++              }
++              break;
++      case DRV_TLV_FDISC_RJTS_RECEIVED:
++              if (p_drv_buf->fdisc_rjt_set) {
++                      p_buf->p_val = &p_drv_buf->fdisc_rjt;
++                      return sizeof(p_drv_buf->fdisc_rjt);
++              }
++              break;
++      case DRV_TLV_PLOGI_SENT_COUNT:
++              if (p_drv_buf->plogi_set) {
++                      p_buf->p_val = &p_drv_buf->plogi;
++                      return sizeof(p_drv_buf->plogi);
++              }
++              break;
++      case DRV_TLV_PLOGI_ACCS_RECEIVED:
++              if (p_drv_buf->plogi_acc_set) {
++                      p_buf->p_val = &p_drv_buf->plogi_acc;
++                      return sizeof(p_drv_buf->plogi_acc);
++              }
++              break;
++      case DRV_TLV_PLOGI_RJTS_RECEIVED:
++              if (p_drv_buf->plogi_rjt_set) {
++                      p_buf->p_val = &p_drv_buf->plogi_rjt;
++                      return sizeof(p_drv_buf->plogi_rjt);
++              }
++              break;
++      case DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID:
++      case DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID:
++      case DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID:
++      case DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID:
++      case DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID:
++              idx = (p_tlv->tlv_type -
++                     DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID) / 2;
++
++              if (p_drv_buf->plogi_dst_fcid_set[idx]) {
++                      p_buf->p_val = &p_drv_buf->plogi_dst_fcid[idx];
++                      return sizeof(p_drv_buf->plogi_dst_fcid[idx]);
++              }
++              break;
++      case DRV_TLV_PLOGI_1_TIMESTAMP:
++      case DRV_TLV_PLOGI_2_TIMESTAMP:
++      case DRV_TLV_PLOGI_3_TIMESTAMP:
++      case DRV_TLV_PLOGI_4_TIMESTAMP:
++      case DRV_TLV_PLOGI_5_TIMESTAMP:
++              idx = (p_tlv->tlv_type - DRV_TLV_PLOGI_1_TIMESTAMP) / 2;
++
++              return qed_mfw_get_tlv_time_value(&p_drv_buf->plogi_tstamp[idx],
++                                                p_buf);
++      case DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID:
++      case DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID:
++      case DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID:
++      case DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID:
++      case DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID:
++              idx = (p_tlv->tlv_type -
++                     DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID) / 2;
++
++              if (p_drv_buf->plogi_acc_src_fcid_set[idx]) {
++                      p_buf->p_val = &p_drv_buf->plogi_acc_src_fcid[idx];
++                      return sizeof(p_drv_buf->plogi_acc_src_fcid[idx]);
++              }
++              break;
++      case DRV_TLV_PLOGI_1_ACC_TIMESTAMP:
++      case DRV_TLV_PLOGI_2_ACC_TIMESTAMP:
++      case DRV_TLV_PLOGI_3_ACC_TIMESTAMP:
++      case DRV_TLV_PLOGI_4_ACC_TIMESTAMP:
++      case DRV_TLV_PLOGI_5_ACC_TIMESTAMP:
++              idx = (p_tlv->tlv_type - DRV_TLV_PLOGI_1_ACC_TIMESTAMP) / 2;
++              p_time = &p_drv_buf->plogi_acc_tstamp[idx];
++
++              return qed_mfw_get_tlv_time_value(p_time, p_buf);
++      case DRV_TLV_LOGOS_ISSUED:
++              if (p_drv_buf->tx_plogos_set) {
++                      p_buf->p_val = &p_drv_buf->tx_plogos;
++                      return sizeof(p_drv_buf->tx_plogos);
++              }
++              break;
++      case DRV_TLV_LOGO_ACCS_RECEIVED:
++              if (p_drv_buf->plogo_acc_set) {
++                      p_buf->p_val = &p_drv_buf->plogo_acc;
++                      return sizeof(p_drv_buf->plogo_acc);
++              }
++              break;
++      case DRV_TLV_LOGO_RJTS_RECEIVED:
++              if (p_drv_buf->plogo_rjt_set) {
++                      p_buf->p_val = &p_drv_buf->plogo_rjt;
++                      return sizeof(p_drv_buf->plogo_rjt);
++              }
++              break;
++      case DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID:
++      case DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID:
++      case DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID:
++      case DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID:
++      case DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID:
++              idx = (p_tlv->tlv_type - DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID) /
++                      2;
++
++              if (p_drv_buf->plogo_src_fcid_set[idx]) {
++                      p_buf->p_val = &p_drv_buf->plogo_src_fcid[idx];
++                      return sizeof(p_drv_buf->plogo_src_fcid[idx]);
++              }
++              break;
++      case DRV_TLV_LOGO_1_TIMESTAMP:
++      case DRV_TLV_LOGO_2_TIMESTAMP:
++      case DRV_TLV_LOGO_3_TIMESTAMP:
++      case DRV_TLV_LOGO_4_TIMESTAMP:
++      case DRV_TLV_LOGO_5_TIMESTAMP:
++              idx = (p_tlv->tlv_type - DRV_TLV_LOGO_1_TIMESTAMP) / 2;
++
++              return qed_mfw_get_tlv_time_value(&p_drv_buf->plogo_tstamp[idx],
++                                                p_buf);
++      case DRV_TLV_LOGOS_RECEIVED:
++              if (p_drv_buf->rx_logos_set) {
++                      p_buf->p_val = &p_drv_buf->rx_logos;
++                      return sizeof(p_drv_buf->rx_logos);
++              }
++              break;
++      case DRV_TLV_ACCS_ISSUED:
++              if (p_drv_buf->tx_accs_set) {
++                      p_buf->p_val = &p_drv_buf->tx_accs;
++                      return sizeof(p_drv_buf->tx_accs);
++              }
++              break;
++      case DRV_TLV_PRLIS_ISSUED:
++              if (p_drv_buf->tx_prlis_set) {
++                      p_buf->p_val = &p_drv_buf->tx_prlis;
++                      return sizeof(p_drv_buf->tx_prlis);
++              }
++              break;
++      case DRV_TLV_ACCS_RECEIVED:
++              if (p_drv_buf->rx_accs_set) {
++                      p_buf->p_val = &p_drv_buf->rx_accs;
++                      return sizeof(p_drv_buf->rx_accs);
++              }
++              break;
++      case DRV_TLV_ABTS_SENT_COUNT:
++              if (p_drv_buf->tx_abts_set) {
++                      p_buf->p_val = &p_drv_buf->tx_abts;
++                      return sizeof(p_drv_buf->tx_abts);
++              }
++              break;
++      case DRV_TLV_ABTS_ACCS_RECEIVED:
++              if (p_drv_buf->rx_abts_acc_set) {
++                      p_buf->p_val = &p_drv_buf->rx_abts_acc;
++                      return sizeof(p_drv_buf->rx_abts_acc);
++              }
++              break;
++      case DRV_TLV_ABTS_RJTS_RECEIVED:
++              if (p_drv_buf->rx_abts_rjt_set) {
++                      p_buf->p_val = &p_drv_buf->rx_abts_rjt;
++                      return sizeof(p_drv_buf->rx_abts_rjt);
++              }
++              break;
++      case DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID:
++      case DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID:
++      case DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID:
++      case DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID:
++      case DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID:
++              idx = (p_tlv->tlv_type -
++                     DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID) / 2;
++
++              if (p_drv_buf->abts_dst_fcid_set[idx]) {
++                      p_buf->p_val = &p_drv_buf->abts_dst_fcid[idx];
++                      return sizeof(p_drv_buf->abts_dst_fcid[idx]);
++              }
++              break;
++      case DRV_TLV_ABTS_1_TIMESTAMP:
++      case DRV_TLV_ABTS_2_TIMESTAMP:
++      case DRV_TLV_ABTS_3_TIMESTAMP:
++      case DRV_TLV_ABTS_4_TIMESTAMP:
++      case DRV_TLV_ABTS_5_TIMESTAMP:
++              idx = (p_tlv->tlv_type - DRV_TLV_ABTS_1_TIMESTAMP) / 2;
++
++              return qed_mfw_get_tlv_time_value(&p_drv_buf->abts_tstamp[idx],
++                                                p_buf);
++      case DRV_TLV_RSCNS_RECEIVED:
++              if (p_drv_buf->rx_rscn_set) {
++                      p_buf->p_val = &p_drv_buf->rx_rscn;
++                      return sizeof(p_drv_buf->rx_rscn);
++              }
++              break;
++      case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1:
++      case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2:
++      case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3:
++      case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4:
++              idx = p_tlv->tlv_type - DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1;
++
++              if (p_drv_buf->rx_rscn_nport_set[idx]) {
++                      p_buf->p_val = &p_drv_buf->rx_rscn_nport[idx];
++                      return sizeof(p_drv_buf->rx_rscn_nport[idx]);
++              }
++              break;
++      case DRV_TLV_LUN_RESETS_ISSUED:
++              if (p_drv_buf->tx_lun_rst_set) {
++                      p_buf->p_val = &p_drv_buf->tx_lun_rst;
++                      return sizeof(p_drv_buf->tx_lun_rst);
++              }
++              break;
++      case DRV_TLV_ABORT_TASK_SETS_ISSUED:
++              if (p_drv_buf->abort_task_sets_set) {
++                      p_buf->p_val = &p_drv_buf->abort_task_sets;
++                      return sizeof(p_drv_buf->abort_task_sets);
++              }
++              break;
++      case DRV_TLV_TPRLOS_SENT:
++              if (p_drv_buf->tx_tprlos_set) {
++                      p_buf->p_val = &p_drv_buf->tx_tprlos;
++                      return sizeof(p_drv_buf->tx_tprlos);
++              }
++              break;
++      case DRV_TLV_NOS_SENT_COUNT:
++              if (p_drv_buf->tx_nos_set) {
++                      p_buf->p_val = &p_drv_buf->tx_nos;
++                      return sizeof(p_drv_buf->tx_nos);
++              }
++              break;
++      case DRV_TLV_NOS_RECEIVED_COUNT:
++              if (p_drv_buf->rx_nos_set) {
++                      p_buf->p_val = &p_drv_buf->rx_nos;
++                      return sizeof(p_drv_buf->rx_nos);
++              }
++              break;
++      case DRV_TLV_OLS_COUNT:
++              if (p_drv_buf->ols_set) {
++                      p_buf->p_val = &p_drv_buf->ols;
++                      return sizeof(p_drv_buf->ols);
++              }
++              break;
++      case DRV_TLV_LR_COUNT:
++              if (p_drv_buf->lr_set) {
++                      p_buf->p_val = &p_drv_buf->lr;
++                      return sizeof(p_drv_buf->lr);
++              }
++              break;
++      case DRV_TLV_LRR_COUNT:
++              if (p_drv_buf->lrr_set) {
++                      p_buf->p_val = &p_drv_buf->lrr;
++                      return sizeof(p_drv_buf->lrr);
++              }
++              break;
++      case DRV_TLV_LIP_SENT_COUNT:
++              if (p_drv_buf->tx_lip_set) {
++                      p_buf->p_val = &p_drv_buf->tx_lip;
++                      return sizeof(p_drv_buf->tx_lip);
++              }
++              break;
++      case DRV_TLV_LIP_RECEIVED_COUNT:
++              if (p_drv_buf->rx_lip_set) {
++                      p_buf->p_val = &p_drv_buf->rx_lip;
++                      return sizeof(p_drv_buf->rx_lip);
++              }
++              break;
++      case DRV_TLV_EOFA_COUNT:
++              if (p_drv_buf->eofa_set) {
++                      p_buf->p_val = &p_drv_buf->eofa;
++                      return sizeof(p_drv_buf->eofa);
++              }
++              break;
++      case DRV_TLV_EOFNI_COUNT:
++              if (p_drv_buf->eofni_set) {
++                      p_buf->p_val = &p_drv_buf->eofni;
++                      return sizeof(p_drv_buf->eofni);
++              }
++              break;
++      case DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT:
++              if (p_drv_buf->scsi_chks_set) {
++                      p_buf->p_val = &p_drv_buf->scsi_chks;
++                      return sizeof(p_drv_buf->scsi_chks);
++              }
++              break;
++      case DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT:
++              if (p_drv_buf->scsi_cond_met_set) {
++                      p_buf->p_val = &p_drv_buf->scsi_cond_met;
++                      return sizeof(p_drv_buf->scsi_cond_met);
++              }
++              break;
++      case DRV_TLV_SCSI_STATUS_BUSY_COUNT:
++              if (p_drv_buf->scsi_busy_set) {
++                      p_buf->p_val = &p_drv_buf->scsi_busy;
++                      return sizeof(p_drv_buf->scsi_busy);
++              }
++              break;
++      case DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT:
++              if (p_drv_buf->scsi_inter_set) {
++                      p_buf->p_val = &p_drv_buf->scsi_inter;
++                      return sizeof(p_drv_buf->scsi_inter);
++              }
++              break;
++      case DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT:
++              if (p_drv_buf->scsi_inter_cond_met_set) {
++                      p_buf->p_val = &p_drv_buf->scsi_inter_cond_met;
++                      return sizeof(p_drv_buf->scsi_inter_cond_met);
++              }
++              break;
++      case DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT:
++              if (p_drv_buf->scsi_rsv_conflicts_set) {
++                      p_buf->p_val = &p_drv_buf->scsi_rsv_conflicts;
++                      return sizeof(p_drv_buf->scsi_rsv_conflicts);
++              }
++              break;
++      case DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT:
++              if (p_drv_buf->scsi_tsk_full_set) {
++                      p_buf->p_val = &p_drv_buf->scsi_tsk_full;
++                      return sizeof(p_drv_buf->scsi_tsk_full);
++              }
++              break;
++      case DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT:
++              if (p_drv_buf->scsi_aca_active_set) {
++                      p_buf->p_val = &p_drv_buf->scsi_aca_active;
++                      return sizeof(p_drv_buf->scsi_aca_active);
++              }
++              break;
++      case DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT:
++              if (p_drv_buf->scsi_tsk_abort_set) {
++                      p_buf->p_val = &p_drv_buf->scsi_tsk_abort;
++                      return sizeof(p_drv_buf->scsi_tsk_abort);
++              }
++              break;
++      case DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ:
++      case DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ:
++      case DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ:
++      case DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ:
++      case DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ:
++              idx = (p_tlv->tlv_type -
++                     DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ) / 2;
++
++              if (p_drv_buf->scsi_rx_chk_set[idx]) {
++                      p_buf->p_val = &p_drv_buf->scsi_rx_chk[idx];
++                      return sizeof(p_drv_buf->scsi_rx_chk[idx]);
++              }
++              break;
++      case DRV_TLV_SCSI_CHECK_1_TIMESTAMP:
++      case DRV_TLV_SCSI_CHECK_2_TIMESTAMP:
++      case DRV_TLV_SCSI_CHECK_3_TIMESTAMP:
++      case DRV_TLV_SCSI_CHECK_4_TIMESTAMP:
++      case DRV_TLV_SCSI_CHECK_5_TIMESTAMP:
++              idx = (p_tlv->tlv_type - DRV_TLV_SCSI_CHECK_1_TIMESTAMP) / 2;
++              p_time = &p_drv_buf->scsi_chk_tstamp[idx];
++
++              return qed_mfw_get_tlv_time_value(p_time, p_buf);
++      default:
++              break;
++      }
++
++      return -1;
++}
++
+ static int qed_mfw_update_tlvs(struct qed_hwfn *p_hwfn,
+                              u8 tlv_group, u8 *p_mfw_buf, u32 size)
+ {
+@@ -276,6 +1093,10 @@ static int qed_mfw_update_tlvs(struct qed_hwfn *p_hwfn,
+                       len = qed_mfw_get_eth_tlv_value(&tlv,
+                                                       &p_tlv_data->eth,
+                                                       &buffer);
++              else if (tlv_group == QED_MFW_TLV_FCOE)
++                      len = qed_mfw_get_fcoe_tlv_value(&tlv,
++                                                       &p_tlv_data->fcoe,
++                                                       &buffer);
+               if (len > 0) {
+                       WARN(len > 4 * tlv.tlv_length,
+@@ -351,6 +1172,13 @@ int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+               tlv_group &= ~QED_MFW_TLV_ETH;
+       }
++      if ((tlv_group & QED_MFW_TLV_FCOE) &&
++          p_hwfn->hw_info.personality != QED_PCI_FCOE) {
++              DP_VERBOSE(p_hwfn, QED_MSG_SP,
++                         "Skipping FCoE TLVs for non-FCoE function\n");
++              tlv_group &= ~QED_MFW_TLV_FCOE;
++      }
++
+       /* Update the TLV values in the local buffer */
+       for (id = QED_MFW_TLV_GENERIC; id < QED_MFW_TLV_MAX; id <<= 1) {
+               if (tlv_group & id)
+diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
+index 8e4fad4..74c2b9a 100644
+--- a/include/linux/qed/qed_if.h
++++ b/include/linux/qed/qed_if.h
+@@ -219,6 +219,199 @@ struct qed_mfw_tlv_eth {
+       bool num_rxqs_full_set;
+ };
++#define QED_MFW_TLV_TIME_SIZE 14
++struct qed_mfw_tlv_time {
++      bool b_set;
++      u8 month;
++      u8 day;
++      u8 hour;
++      u8 min;
++      u16 msec;
++      u16 usec;
++};
++
++struct qed_mfw_tlv_fcoe {
++      u8 scsi_timeout;
++      bool scsi_timeout_set;
++      u32 rt_tov;
++      bool rt_tov_set;
++      u32 ra_tov;
++      bool ra_tov_set;
++      u32 ed_tov;
++      bool ed_tov_set;
++      u32 cr_tov;
++      bool cr_tov_set;
++      u8 boot_type;
++      bool boot_type_set;
++      u8 npiv_state;
++      bool npiv_state_set;
++      u32 num_npiv_ids;
++      bool num_npiv_ids_set;
++      u8 switch_name[8];
++      bool switch_name_set;
++      u16 switch_portnum;
++      bool switch_portnum_set;
++      u8 switch_portid[3];
++      bool switch_portid_set;
++      u8 vendor_name[8];
++      bool vendor_name_set;
++      u8 switch_model[8];
++      bool switch_model_set;
++      u8 switch_fw_version[8];
++      bool switch_fw_version_set;
++      u8 qos_pri;
++      bool qos_pri_set;
++      u8 port_alias[3];
++      bool port_alias_set;
++      u8 port_state;
++#define QED_MFW_TLV_PORT_STATE_OFFLINE  (0)
++#define QED_MFW_TLV_PORT_STATE_LOOP             (1)
++#define QED_MFW_TLV_PORT_STATE_P2P              (2)
++#define QED_MFW_TLV_PORT_STATE_FABRIC           (3)
++      bool port_state_set;
++      u16 fip_tx_descr_size;
++      bool fip_tx_descr_size_set;
++      u16 fip_rx_descr_size;
++      bool fip_rx_descr_size_set;
++      u16 link_failures;
++      bool link_failures_set;
++      u8 fcoe_boot_progress;
++      bool fcoe_boot_progress_set;
++      u64 rx_bcast;
++      bool rx_bcast_set;
++      u64 tx_bcast;
++      bool tx_bcast_set;
++      u16 fcoe_txq_depth;
++      bool fcoe_txq_depth_set;
++      u16 fcoe_rxq_depth;
++      bool fcoe_rxq_depth_set;
++      u64 fcoe_rx_frames;
++      bool fcoe_rx_frames_set;
++      u64 fcoe_rx_bytes;
++      bool fcoe_rx_bytes_set;
++      u64 fcoe_tx_frames;
++      bool fcoe_tx_frames_set;
++      u64 fcoe_tx_bytes;
++      bool fcoe_tx_bytes_set;
++      u16 crc_count;
++      bool crc_count_set;
++      u32 crc_err_src_fcid[5];
++      bool crc_err_src_fcid_set[5];
++      struct qed_mfw_tlv_time crc_err[5];
++      u16 losync_err;
++      bool losync_err_set;
++      u16 losig_err;
++      bool losig_err_set;
++      u16 primtive_err;
++      bool primtive_err_set;
++      u16 disparity_err;
++      bool disparity_err_set;
++      u16 code_violation_err;
++      bool code_violation_err_set;
++      u32 flogi_param[4];
++      bool flogi_param_set[4];
++      struct qed_mfw_tlv_time flogi_tstamp;
++      u32 flogi_acc_param[4];
++      bool flogi_acc_param_set[4];
++      struct qed_mfw_tlv_time flogi_acc_tstamp;
++      u32 flogi_rjt;
++      bool flogi_rjt_set;
++      struct qed_mfw_tlv_time flogi_rjt_tstamp;
++      u32 fdiscs;
++      bool fdiscs_set;
++      u8 fdisc_acc;
++      bool fdisc_acc_set;
++      u8 fdisc_rjt;
++      bool fdisc_rjt_set;
++      u8 plogi;
++      bool plogi_set;
++      u8 plogi_acc;
++      bool plogi_acc_set;
++      u8 plogi_rjt;
++      bool plogi_rjt_set;
++      u32 plogi_dst_fcid[5];
++      bool plogi_dst_fcid_set[5];
++      struct qed_mfw_tlv_time plogi_tstamp[5];
++      u32 plogi_acc_src_fcid[5];
++      bool plogi_acc_src_fcid_set[5];
++      struct qed_mfw_tlv_time plogi_acc_tstamp[5];
++      u8 tx_plogos;
++      bool tx_plogos_set;
++      u8 plogo_acc;
++      bool plogo_acc_set;
++      u8 plogo_rjt;
++      bool plogo_rjt_set;
++      u32 plogo_src_fcid[5];
++      bool plogo_src_fcid_set[5];
++      struct qed_mfw_tlv_time plogo_tstamp[5];
++      u8 rx_logos;
++      bool rx_logos_set;
++      u8 tx_accs;
++      bool tx_accs_set;
++      u8 tx_prlis;
++      bool tx_prlis_set;
++      u8 rx_accs;
++      bool rx_accs_set;
++      u8 tx_abts;
++      bool tx_abts_set;
++      u8 rx_abts_acc;
++      bool rx_abts_acc_set;
++      u8 rx_abts_rjt;
++      bool rx_abts_rjt_set;
++      u32 abts_dst_fcid[5];
++      bool abts_dst_fcid_set[5];
++      struct qed_mfw_tlv_time abts_tstamp[5];
++      u8 rx_rscn;
++      bool rx_rscn_set;
++      u32 rx_rscn_nport[4];
++      bool rx_rscn_nport_set[4];
++      u8 tx_lun_rst;
++      bool tx_lun_rst_set;
++      u8 abort_task_sets;
++      bool abort_task_sets_set;
++      u8 tx_tprlos;
++      bool tx_tprlos_set;
++      u8 tx_nos;
++      bool tx_nos_set;
++      u8 rx_nos;
++      bool rx_nos_set;
++      u8 ols;
++      bool ols_set;
++      u8 lr;
++      bool lr_set;
++      u8 lrr;
++      bool lrr_set;
++      u8 tx_lip;
++      bool tx_lip_set;
++      u8 rx_lip;
++      bool rx_lip_set;
++      u8 eofa;
++      bool eofa_set;
++      u8 eofni;
++      bool eofni_set;
++      u8 scsi_chks;
++      bool scsi_chks_set;
++      u8 scsi_cond_met;
++      bool scsi_cond_met_set;
++      u8 scsi_busy;
++      bool scsi_busy_set;
++      u8 scsi_inter;
++      bool scsi_inter_set;
++      u8 scsi_inter_cond_met;
++      bool scsi_inter_cond_met_set;
++      u8 scsi_rsv_conflicts;
++      bool scsi_rsv_conflicts_set;
++      u8 scsi_tsk_full;
++      bool scsi_tsk_full_set;
++      u8 scsi_aca_active;
++      bool scsi_aca_active_set;
++      u8 scsi_tsk_abort;
++      bool scsi_tsk_abort_set;
++      u32 scsi_rx_chk[5];
++      bool scsi_rx_chk_set[5];
++      struct qed_mfw_tlv_time scsi_chk_tstamp[5];
++};
++
+ #define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \
+                                           (void __iomem *)(reg_addr))
+-- 
+2.9.5
+
diff --git a/linux-next-cherry-picks/0018-qed-Add-support-for-processing-iscsi-tlv-request.patch b/linux-next-cherry-picks/0018-qed-Add-support-for-processing-iscsi-tlv-request.patch
new file mode 100644 (file)
index 0000000..17cf92b
--- /dev/null
@@ -0,0 +1,249 @@
+From 77a509e4f6d14c2e09acbab5a89a769740bda62c Mon Sep 17 00:00:00 2001
+From: Sudarsana Reddy Kalluru <sudarsana.kalluru@cavium.com>
+Date: Tue, 22 May 2018 00:28:40 -0700
+Subject: [PATCH 18/44] qed: Add support for processing iscsi tlv request.
+
+Signed-off-by: Sudarsana Reddy Kalluru <Sudarsana.Kalluru@cavium.com>
+Signed-off-by: Ariel Elior <ariel.elior@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/qlogic/qed/qed_mcp.h     |   4 +-
+ drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c | 131 ++++++++++++++++++++++++++
+ include/linux/qed/qed_if.h                    |  36 +++++++
+ 3 files changed, 170 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+index b31f5d8..632a838 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+@@ -217,7 +217,8 @@ enum qed_mfw_tlv_type {
+       QED_MFW_TLV_GENERIC = 0x1,      /* Core driver TLVs */
+       QED_MFW_TLV_ETH = 0x2,          /* L2 driver TLVs */
+       QED_MFW_TLV_FCOE = 0x4,         /* FCoE protocol TLVs */
+-      QED_MFW_TLV_MAX = 0x8,
++      QED_MFW_TLV_ISCSI = 0x8,        /* SCSI protocol TLVs */
++      QED_MFW_TLV_MAX = 0x16,
+ };
+ struct qed_mfw_tlv_generic {
+@@ -247,6 +248,7 @@ union qed_mfw_tlv_data {
+       struct qed_mfw_tlv_generic generic;
+       struct qed_mfw_tlv_eth eth;
+       struct qed_mfw_tlv_fcoe fcoe;
++      struct qed_mfw_tlv_iscsi iscsi;
+ };
+ /**
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
+index 1873cfc..6c16158 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
+@@ -215,6 +215,23 @@ static int qed_mfw_get_tlv_group(u8 tlv_type, u8 *tlv_group)
+       case DRV_TLV_SCSI_CHECK_5_TIMESTAMP:
+               *tlv_group = QED_MFW_TLV_FCOE;
+               break;
++      case DRV_TLV_TARGET_LLMNR_ENABLED:
++      case DRV_TLV_HEADER_DIGEST_FLAG_ENABLED:
++      case DRV_TLV_DATA_DIGEST_FLAG_ENABLED:
++      case DRV_TLV_AUTHENTICATION_METHOD:
++      case DRV_TLV_ISCSI_BOOT_TARGET_PORTAL:
++      case DRV_TLV_MAX_FRAME_SIZE:
++      case DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE:
++      case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE:
++      case DRV_TLV_ISCSI_BOOT_PROGRESS:
++      case DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
++      case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
++      case DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED:
++      case DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED:
++      case DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT:
++      case DRV_TLV_ISCSI_PDU_TX_BYTES_SENT:
++              *tlv_group |= QED_MFW_TLV_ISCSI;
++              break;
+       default:
+               return -EINVAL;
+       }
+@@ -1054,6 +1071,109 @@ qed_mfw_get_fcoe_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
+       return -1;
+ }
++static int
++qed_mfw_get_iscsi_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
++                          struct qed_mfw_tlv_iscsi *p_drv_buf,
++                          struct qed_tlv_parsed_buf *p_buf)
++{
++      switch (p_tlv->tlv_type) {
++      case DRV_TLV_TARGET_LLMNR_ENABLED:
++              if (p_drv_buf->target_llmnr_set) {
++                      p_buf->p_val = &p_drv_buf->target_llmnr;
++                      return sizeof(p_drv_buf->target_llmnr);
++              }
++              break;
++      case DRV_TLV_HEADER_DIGEST_FLAG_ENABLED:
++              if (p_drv_buf->header_digest_set) {
++                      p_buf->p_val = &p_drv_buf->header_digest;
++                      return sizeof(p_drv_buf->header_digest);
++              }
++              break;
++      case DRV_TLV_DATA_DIGEST_FLAG_ENABLED:
++              if (p_drv_buf->data_digest_set) {
++                      p_buf->p_val = &p_drv_buf->data_digest;
++                      return sizeof(p_drv_buf->data_digest);
++              }
++              break;
++      case DRV_TLV_AUTHENTICATION_METHOD:
++              if (p_drv_buf->auth_method_set) {
++                      p_buf->p_val = &p_drv_buf->auth_method;
++                      return sizeof(p_drv_buf->auth_method);
++              }
++              break;
++      case DRV_TLV_ISCSI_BOOT_TARGET_PORTAL:
++              if (p_drv_buf->boot_taget_portal_set) {
++                      p_buf->p_val = &p_drv_buf->boot_taget_portal;
++                      return sizeof(p_drv_buf->boot_taget_portal);
++              }
++              break;
++      case DRV_TLV_MAX_FRAME_SIZE:
++              if (p_drv_buf->frame_size_set) {
++                      p_buf->p_val = &p_drv_buf->frame_size;
++                      return sizeof(p_drv_buf->frame_size);
++              }
++              break;
++      case DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE:
++              if (p_drv_buf->tx_desc_size_set) {
++                      p_buf->p_val = &p_drv_buf->tx_desc_size;
++                      return sizeof(p_drv_buf->tx_desc_size);
++              }
++              break;
++      case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE:
++              if (p_drv_buf->rx_desc_size_set) {
++                      p_buf->p_val = &p_drv_buf->rx_desc_size;
++                      return sizeof(p_drv_buf->rx_desc_size);
++              }
++              break;
++      case DRV_TLV_ISCSI_BOOT_PROGRESS:
++              if (p_drv_buf->boot_progress_set) {
++                      p_buf->p_val = &p_drv_buf->boot_progress;
++                      return sizeof(p_drv_buf->boot_progress);
++              }
++              break;
++      case DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
++              if (p_drv_buf->tx_desc_qdepth_set) {
++                      p_buf->p_val = &p_drv_buf->tx_desc_qdepth;
++                      return sizeof(p_drv_buf->tx_desc_qdepth);
++              }
++              break;
++      case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
++              if (p_drv_buf->rx_desc_qdepth_set) {
++                      p_buf->p_val = &p_drv_buf->rx_desc_qdepth;
++                      return sizeof(p_drv_buf->rx_desc_qdepth);
++              }
++              break;
++      case DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED:
++              if (p_drv_buf->rx_frames_set) {
++                      p_buf->p_val = &p_drv_buf->rx_frames;
++                      return sizeof(p_drv_buf->rx_frames);
++              }
++              break;
++      case DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED:
++              if (p_drv_buf->rx_bytes_set) {
++                      p_buf->p_val = &p_drv_buf->rx_bytes;
++                      return sizeof(p_drv_buf->rx_bytes);
++              }
++              break;
++      case DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT:
++              if (p_drv_buf->tx_frames_set) {
++                      p_buf->p_val = &p_drv_buf->tx_frames;
++                      return sizeof(p_drv_buf->tx_frames);
++              }
++              break;
++      case DRV_TLV_ISCSI_PDU_TX_BYTES_SENT:
++              if (p_drv_buf->tx_bytes_set) {
++                      p_buf->p_val = &p_drv_buf->tx_bytes;
++                      return sizeof(p_drv_buf->tx_bytes);
++              }
++              break;
++      default:
++              break;
++      }
++
++      return -1;
++}
++
+ static int qed_mfw_update_tlvs(struct qed_hwfn *p_hwfn,
+                              u8 tlv_group, u8 *p_mfw_buf, u32 size)
+ {
+@@ -1097,6 +1217,10 @@ static int qed_mfw_update_tlvs(struct qed_hwfn *p_hwfn,
+                       len = qed_mfw_get_fcoe_tlv_value(&tlv,
+                                                        &p_tlv_data->fcoe,
+                                                        &buffer);
++              else
++                      len = qed_mfw_get_iscsi_tlv_value(&tlv,
++                                                        &p_tlv_data->iscsi,
++                                                        &buffer);
+               if (len > 0) {
+                       WARN(len > 4 * tlv.tlv_length,
+@@ -1179,6 +1303,13 @@ int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+               tlv_group &= ~QED_MFW_TLV_FCOE;
+       }
++      if ((tlv_group & QED_MFW_TLV_ISCSI) &&
++          p_hwfn->hw_info.personality != QED_PCI_ISCSI) {
++              DP_VERBOSE(p_hwfn, QED_MSG_SP,
++                         "Skipping iSCSI TLVs for non-iSCSI function\n");
++              tlv_group &= ~QED_MFW_TLV_ISCSI;
++      }
++
+       /* Update the TLV values in the local buffer */
+       for (id = QED_MFW_TLV_GENERIC; id < QED_MFW_TLV_MAX; id <<= 1) {
+               if (tlv_group & id)
+diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
+index 74c2b9a..92b5352 100644
+--- a/include/linux/qed/qed_if.h
++++ b/include/linux/qed/qed_if.h
+@@ -412,6 +412,42 @@ struct qed_mfw_tlv_fcoe {
+       struct qed_mfw_tlv_time scsi_chk_tstamp[5];
+ };
++struct qed_mfw_tlv_iscsi {
++      u8 target_llmnr;
++      bool target_llmnr_set;
++      u8 header_digest;
++      bool header_digest_set;
++      u8 data_digest;
++      bool data_digest_set;
++      u8 auth_method;
++#define QED_MFW_TLV_AUTH_METHOD_NONE            (1)
++#define QED_MFW_TLV_AUTH_METHOD_CHAP            (2)
++#define QED_MFW_TLV_AUTH_METHOD_MUTUAL_CHAP     (3)
++      bool auth_method_set;
++      u16 boot_taget_portal;
++      bool boot_taget_portal_set;
++      u16 frame_size;
++      bool frame_size_set;
++      u16 tx_desc_size;
++      bool tx_desc_size_set;
++      u16 rx_desc_size;
++      bool rx_desc_size_set;
++      u8 boot_progress;
++      bool boot_progress_set;
++      u16 tx_desc_qdepth;
++      bool tx_desc_qdepth_set;
++      u16 rx_desc_qdepth;
++      bool rx_desc_qdepth_set;
++      u64 rx_frames;
++      bool rx_frames_set;
++      u64 rx_bytes;
++      bool rx_bytes_set;
++      u64 tx_frames;
++      bool tx_frames_set;
++      u64 tx_bytes;
++      bool tx_bytes_set;
++};
++
+ #define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \
+                                           (void __iomem *)(reg_addr))
+-- 
+2.9.5
+
diff --git a/linux-next-cherry-picks/0019-qed-Add-driver-infrastucture-for-handling-mfw-reques.patch b/linux-next-cherry-picks/0019-qed-Add-driver-infrastucture-for-handling-mfw-reques.patch
new file mode 100644 (file)
index 0000000..ef40a07
--- /dev/null
@@ -0,0 +1,290 @@
+From 59ccf86fe69a6a77afebe706913d6b551d84d5bc Mon Sep 17 00:00:00 2001
+From: Sudarsana Reddy Kalluru <sudarsana.kalluru@cavium.com>
+Date: Tue, 22 May 2018 00:28:41 -0700
+Subject: [PATCH 19/44] qed: Add driver infrastucture for handling mfw
+ requests.
+
+MFW requests the TLVs in interrupt context. Extracting of the required
+data from upper layers and populating of the TLVs require process context.
+The patch adds work-queues for processing the tlv requests. It also adds
+the implementation for requesting the tlv values from appropriate protocol
+driver.
+
+Signed-off-by: Sudarsana Reddy Kalluru <Sudarsana.Kalluru@cavium.com>
+Signed-off-by: Ariel Elior <ariel.elior@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/qlogic/qed/qed.h      |   8 ++
+ drivers/net/ethernet/qlogic/qed/qed_main.c | 151 ++++++++++++++++++++++++++++-
+ drivers/net/ethernet/qlogic/qed/qed_mcp.c  |   2 +
+ include/linux/qed/qed_if.h                 |  10 ++
+ 4 files changed, 170 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
+index dfdbe52..00db340 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed.h
++++ b/drivers/net/ethernet/qlogic/qed/qed.h
+@@ -515,6 +515,10 @@ struct qed_simd_fp_handler {
+       void    (*func)(void *);
+ };
++enum qed_slowpath_wq_flag {
++      QED_SLOWPATH_MFW_TLV_REQ,
++};
++
+ struct qed_hwfn {
+       struct qed_dev                  *cdev;
+       u8                              my_id;          /* ID inside the PF */
+@@ -644,6 +648,9 @@ struct qed_hwfn {
+ #endif
+       struct z_stream_s               *stream;
++      struct workqueue_struct *slowpath_wq;
++      struct delayed_work slowpath_task;
++      unsigned long slowpath_task_flags;
+ };
+ struct pci_params {
+@@ -908,6 +915,7 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
+                           union qed_mcp_protocol_stats *stats);
+ int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
+ void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn);
++int qed_mfw_tlv_req(struct qed_hwfn *hwfn);
+ int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn,
+                         enum qed_mfw_tlv_type type,
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
+index cbf0ea9..68c4399 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
+@@ -946,6 +946,68 @@ static void qed_update_pf_params(struct qed_dev *cdev,
+       }
+ }
++static void qed_slowpath_wq_stop(struct qed_dev *cdev)
++{
++      int i;
++
++      if (IS_VF(cdev))
++              return;
++
++      for_each_hwfn(cdev, i) {
++              if (!cdev->hwfns[i].slowpath_wq)
++                      continue;
++
++              flush_workqueue(cdev->hwfns[i].slowpath_wq);
++              destroy_workqueue(cdev->hwfns[i].slowpath_wq);
++      }
++}
++
++static void qed_slowpath_task(struct work_struct *work)
++{
++      struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
++                                           slowpath_task.work);
++      struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
++
++      if (!ptt) {
++              queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
++              return;
++      }
++
++      if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ,
++                             &hwfn->slowpath_task_flags))
++              qed_mfw_process_tlv_req(hwfn, ptt);
++
++      qed_ptt_release(hwfn, ptt);
++}
++
++static int qed_slowpath_wq_start(struct qed_dev *cdev)
++{
++      struct qed_hwfn *hwfn;
++      char name[NAME_SIZE];
++      int i;
++
++      if (IS_VF(cdev))
++              return 0;
++
++      for_each_hwfn(cdev, i) {
++              hwfn = &cdev->hwfns[i];
++
++              snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x",
++                       cdev->pdev->bus->number,
++                       PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
++
++              hwfn->slowpath_wq = alloc_workqueue(name, 0, 0);
++              if (!hwfn->slowpath_wq) {
++                      DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n");
++                      return -ENOMEM;
++              }
++
++              INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task);
++      }
++
++      return 0;
++}
++
+ static int qed_slowpath_start(struct qed_dev *cdev,
+                             struct qed_slowpath_params *params)
+ {
+@@ -961,6 +1023,9 @@ static int qed_slowpath_start(struct qed_dev *cdev,
+       if (qed_iov_wq_start(cdev))
+               goto err;
++      if (qed_slowpath_wq_start(cdev))
++              goto err;
++
+       if (IS_PF(cdev)) {
+               rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
+                                     &cdev->pdev->dev);
+@@ -1095,6 +1160,8 @@ static int qed_slowpath_start(struct qed_dev *cdev,
+       qed_iov_wq_stop(cdev, false);
++      qed_slowpath_wq_stop(cdev);
++
+       return rc;
+ }
+@@ -1103,6 +1170,8 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
+       if (!cdev)
+               return -ENODEV;
++      qed_slowpath_wq_stop(cdev);
++
+       qed_ll2_dealloc_if(cdev);
+       if (IS_PF(cdev)) {
+@@ -2089,8 +2158,88 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
+       }
+ }
++int qed_mfw_tlv_req(struct qed_hwfn *hwfn)
++{
++      DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
++                 "Scheduling slowpath task [Flag: %d]\n",
++                 QED_SLOWPATH_MFW_TLV_REQ);
++      smp_mb__before_atomic();
++      set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags);
++      smp_mb__after_atomic();
++      queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
++
++      return 0;
++}
++
++static void
++qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv)
++{
++      struct qed_common_cb_ops *op = cdev->protocol_ops.common;
++      struct qed_eth_stats_common *p_common;
++      struct qed_generic_tlvs gen_tlvs;
++      struct qed_eth_stats stats;
++      int i;
++
++      memset(&gen_tlvs, 0, sizeof(gen_tlvs));
++      op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs);
++
++      if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM)
++              tlv->flags.ipv4_csum_offload = true;
++      if (gen_tlvs.feat_flags & QED_TLV_LSO)
++              tlv->flags.lso_supported = true;
++      tlv->flags.b_set = true;
++
++      for (i = 0; i < QED_TLV_MAC_COUNT; i++) {
++              if (is_valid_ether_addr(gen_tlvs.mac[i])) {
++                      ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]);
++                      tlv->mac_set[i] = true;
++              }
++      }
++
++      qed_get_vport_stats(cdev, &stats);
++      p_common = &stats.common;
++      tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
++                       p_common->rx_bcast_pkts;
++      tlv->rx_frames_set = true;
++      tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
++                      p_common->rx_bcast_bytes;
++      tlv->rx_bytes_set = true;
++      tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
++                       p_common->tx_bcast_pkts;
++      tlv->tx_frames_set = true;
++      tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
++                      p_common->tx_bcast_bytes;
++      tlv->rx_bytes_set = true;
++}
++
+ int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type,
+                         union qed_mfw_tlv_data *tlv_buf)
+ {
+-      return -EINVAL;
++      struct qed_dev *cdev = hwfn->cdev;
++      struct qed_common_cb_ops *ops;
++
++      ops = cdev->protocol_ops.common;
++      if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) {
++              DP_NOTICE(hwfn, "Can't collect TLV management info\n");
++              return -EINVAL;
++      }
++
++      switch (type) {
++      case QED_MFW_TLV_GENERIC:
++              qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic);
++              break;
++      case QED_MFW_TLV_ETH:
++              ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth);
++              break;
++      case QED_MFW_TLV_FCOE:
++              ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe);
++              break;
++      case QED_MFW_TLV_ISCSI:
++              ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi);
++              break;
++      default:
++              break;
++      }
++
++      return 0;
+ }
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+index e80f5e7..2612e3e 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+@@ -1622,6 +1622,8 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
+               case MFW_DRV_MSG_S_TAG_UPDATE:
+                       qed_mcp_update_stag(p_hwfn, p_ptt);
+                       break;
++              case MFW_DRV_MSG_GET_TLV_REQ:
++                      qed_mfw_tlv_req(p_hwfn);
+                       break;
+               default:
+                       DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
+diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
+index 92b5352..44af652 100644
+--- a/include/linux/qed/qed_if.h
++++ b/include/linux/qed/qed_if.h
+@@ -751,6 +751,14 @@ struct qed_int_info {
+       u8                      used_cnt;
+ };
++struct qed_generic_tlvs {
++#define QED_TLV_IP_CSUM         BIT(0)
++#define QED_TLV_LSO             BIT(1)
++      u16 feat_flags;
++#define QED_TLV_MAC_COUNT     3
++      u8 mac[QED_TLV_MAC_COUNT][ETH_ALEN];
++};
++
+ #define QED_NVM_SIGNATURE 0x12435687
+ enum qed_nvm_flash_cmd {
+@@ -765,6 +773,8 @@ struct qed_common_cb_ops {
+       void    (*link_update)(void                     *dev,
+                              struct qed_link_output   *link);
+       void    (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type);
++      void (*get_generic_tlv_data)(void *dev, struct qed_generic_tlvs *data);
++      void (*get_protocol_tlv_data)(void *dev, void *data);
+ };
+ struct qed_selftest_ops {
+-- 
+2.9.5
+
diff --git a/linux-next-cherry-picks/0020-qede-Add-support-for-populating-ethernet-TLVs.patch b/linux-next-cherry-picks/0020-qede-Add-support-for-populating-ethernet-TLVs.patch
new file mode 100644 (file)
index 0000000..9b78945
--- /dev/null
@@ -0,0 +1,140 @@
+From d25b859ccd614d2397569b833491372d129d1982 Mon Sep 17 00:00:00 2001
+From: Sudarsana Reddy Kalluru <sudarsana.kalluru@cavium.com>
+Date: Tue, 22 May 2018 00:28:42 -0700
+Subject: [PATCH 20/44] qede: Add support for populating ethernet TLVs.
+
+This patch adds callbacks for providing the ethernet protocol driver TLVs.
+
+Signed-off-by: Sudarsana Reddy Kalluru <Sudarsana.Kalluru@cavium.com>
+Signed-off-by: Ariel Elior <ariel.elior@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/qlogic/qede/qede_main.c | 101 +++++++++++++++++++++++++++
+ 1 file changed, 101 insertions(+)
+
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
+index 7abaf27..9e70f71 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
+@@ -133,6 +133,9 @@ static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
+ static void qede_remove(struct pci_dev *pdev);
+ static void qede_shutdown(struct pci_dev *pdev);
+ static void qede_link_update(void *dev, struct qed_link_output *link);
++static void qede_get_eth_tlv_data(void *edev, void *data);
++static void qede_get_generic_tlv_data(void *edev,
++                                    struct qed_generic_tlvs *data);
+ /* The qede lock is used to protect driver state change and driver flows that
+  * are not reentrant.
+@@ -228,6 +231,8 @@ static struct qed_eth_cb_ops qede_ll_ops = {
+               .arfs_filter_op = qede_arfs_filter_op,
+ #endif
+               .link_update = qede_link_update,
++              .get_generic_tlv_data = qede_get_generic_tlv_data,
++              .get_protocol_tlv_data = qede_get_eth_tlv_data,
+       },
+       .force_mac = qede_force_mac,
+       .ports_update = qede_udp_ports_update,
+@@ -2131,3 +2136,99 @@ static void qede_link_update(void *dev, struct qed_link_output *link)
+               }
+       }
+ }
++
++static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq)
++{
++      struct netdev_queue *netdev_txq;
++
++      netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index);
++      if (netif_xmit_stopped(netdev_txq))
++              return true;
++
++      return false;
++}
++
++static void qede_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
++{
++      struct qede_dev *edev = dev;
++      struct netdev_hw_addr *ha;
++      int i;
++
++      if (edev->ndev->features & NETIF_F_IP_CSUM)
++              data->feat_flags |= QED_TLV_IP_CSUM;
++      if (edev->ndev->features & NETIF_F_TSO)
++              data->feat_flags |= QED_TLV_LSO;
++
++      ether_addr_copy(data->mac[0], edev->ndev->dev_addr);
++      memset(data->mac[1], 0, ETH_ALEN);
++      memset(data->mac[2], 0, ETH_ALEN);
++      /* Copy the first two UC macs */
++      netif_addr_lock_bh(edev->ndev);
++      i = 1;
++      netdev_for_each_uc_addr(ha, edev->ndev) {
++              ether_addr_copy(data->mac[i++], ha->addr);
++              if (i == QED_TLV_MAC_COUNT)
++                      break;
++      }
++
++      netif_addr_unlock_bh(edev->ndev);
++}
++
++static void qede_get_eth_tlv_data(void *dev, void *data)
++{
++      struct qed_mfw_tlv_eth *etlv = data;
++      struct qede_dev *edev = dev;
++      struct qede_fastpath *fp;
++      int i;
++
++      etlv->lso_maxoff_size = 0XFFFF;
++      etlv->lso_maxoff_size_set = true;
++      etlv->lso_minseg_size = (u16)ETH_TX_LSO_WINDOW_MIN_LEN;
++      etlv->lso_minseg_size_set = true;
++      etlv->prom_mode = !!(edev->ndev->flags & IFF_PROMISC);
++      etlv->prom_mode_set = true;
++      etlv->tx_descr_size = QEDE_TSS_COUNT(edev);
++      etlv->tx_descr_size_set = true;
++      etlv->rx_descr_size = QEDE_RSS_COUNT(edev);
++      etlv->rx_descr_size_set = true;
++      etlv->iov_offload = QED_MFW_TLV_IOV_OFFLOAD_VEB;
++      etlv->iov_offload_set = true;
++
++      /* Fill information regarding queues; Should be done under the qede
++       * lock to guarantee those don't change beneath our feet.
++       */
++      etlv->txqs_empty = true;
++      etlv->rxqs_empty = true;
++      etlv->num_txqs_full = 0;
++      etlv->num_rxqs_full = 0;
++
++      __qede_lock(edev);
++      for_each_queue(i) {
++              fp = &edev->fp_array[i];
++              if (fp->type & QEDE_FASTPATH_TX) {
++                      if (fp->txq->sw_tx_cons != fp->txq->sw_tx_prod)
++                              etlv->txqs_empty = false;
++                      if (qede_is_txq_full(edev, fp->txq))
++                              etlv->num_txqs_full++;
++              }
++              if (fp->type & QEDE_FASTPATH_RX) {
++                      if (qede_has_rx_work(fp->rxq))
++                              etlv->rxqs_empty = false;
++
++                      /* This one is a bit tricky; Firmware might stop
++                       * placing packets if ring is not yet full.
++                       * Give an approximation.
++                       */
++                      if (le16_to_cpu(*fp->rxq->hw_cons_ptr) -
++                          qed_chain_get_cons_idx(&fp->rxq->rx_comp_ring) >
++                          RX_RING_SIZE - 100)
++                              etlv->num_rxqs_full++;
++              }
++      }
++      __qede_unlock(edev);
++
++      etlv->txqs_empty_set = true;
++      etlv->rxqs_empty_set = true;
++      etlv->num_txqs_full_set = true;
++      etlv->num_rxqs_full_set = true;
++}
+-- 
+2.9.5
+
diff --git a/linux-next-cherry-picks/0021-qede-Refactor-ethtool-rx-classification-flow.patch b/linux-next-cherry-picks/0021-qede-Refactor-ethtool-rx-classification-flow.patch
new file mode 100644 (file)
index 0000000..7803748
--- /dev/null
@@ -0,0 +1,680 @@
+From 87885310c199be78a144dff4fec8a94f081920b8 Mon Sep 17 00:00:00 2001
+From: Manish Chopra <manish.chopra@cavium.com>
+Date: Thu, 24 May 2018 09:54:49 -0700
+Subject: [PATCH 21/44] qede: Refactor ethtool rx classification flow.
+
+This patch simplifies the ethtool rx flow configuration
+[via ethtool -U/-N] flow code base by dividing it logically
+into various APIs based on given protocols. It also separates
+various validations and calculations done along the flow
+in their own APIs.
+
+Signed-off-by: Manish Chopra <manish.chopra@cavium.com>
+Signed-off-by: Shahed Shaikh <shahed.shaikh@cavium.com>
+Signed-off-by: Ariel Elior <ariel.elior@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/qlogic/qede/qede_filter.c | 512 ++++++++++++++++---------
+ 1 file changed, 330 insertions(+), 182 deletions(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
+index 43569b1..bd5b4e4 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
+@@ -38,6 +38,7 @@
+ #include <linux/qed/qed_if.h>
+ #include "qede.h"
++#define QEDE_FILTER_PRINT_MAX_LEN     (64)
+ struct qede_arfs_tuple {
+       union {
+               __be32 src_ipv4;
+@@ -51,6 +52,18 @@ struct qede_arfs_tuple {
+       __be16  dst_port;
+       __be16  eth_proto;
+       u8      ip_proto;
++
++      /* Describe filtering mode needed for this kind of filter */
++      enum qed_filter_config_mode mode;
++
++      /* Used to compare new/old filters. Return true if IPs match */
++      bool (*ip_comp)(struct qede_arfs_tuple *a, struct qede_arfs_tuple *b);
++
++      /* Given an address into ethhdr build a header from tuple info */
++      void (*build_hdr)(struct qede_arfs_tuple *t, void *header);
++
++      /* Stringify the tuple for a print into the provided buffer */
++      void (*stringify)(struct qede_arfs_tuple *t, void *buffer);
+ };
+ struct qede_arfs_fltr_node {
+@@ -90,7 +103,9 @@ struct qede_arfs {
+       spinlock_t              arfs_list_lock;
+       unsigned long           *arfs_fltr_bmap;
+       int                     filter_count;
+-      bool                    enable;
++
++      /* Currently configured filtering mode */
++      enum qed_filter_config_mode mode;
+ };
+ static void qede_configure_arfs_fltr(struct qede_dev *edev,
+@@ -110,11 +125,15 @@ static void qede_configure_arfs_fltr(struct qede_dev *edev,
+       params.qid = rxq_id;
+       params.b_is_add = add_fltr;
+-      DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
+-                 "%s arfs filter flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
+-                 add_fltr ? "Adding" : "Deleting",
+-                 n->flow_id, n->sw_id, ntohs(n->tuple.src_port),
+-                 ntohs(n->tuple.dst_port), rxq_id);
++      if (n->tuple.stringify) {
++              char tuple_buffer[QEDE_FILTER_PRINT_MAX_LEN];
++
++              n->tuple.stringify(&n->tuple, tuple_buffer);
++              DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
++                         "%s sw_id[0x%x]: %s [queue %d]\n",
++                         add_fltr ? "Adding" : "Deleting",
++                         n->sw_id, tuple_buffer, rxq_id);
++      }
+       n->used = true;
+       n->filter_op = add_fltr;
+@@ -145,14 +164,13 @@ qede_enqueue_fltr_and_config_searcher(struct qede_dev *edev,
+       INIT_HLIST_NODE(&fltr->node);
+       hlist_add_head(&fltr->node,
+                      QEDE_ARFS_BUCKET_HEAD(edev, bucket_idx));
+-      edev->arfs->filter_count++;
+-
+-      if (edev->arfs->filter_count == 1 && !edev->arfs->enable) {
+-              enum qed_filter_config_mode mode;
+-              mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
+-              edev->ops->configure_arfs_searcher(edev->cdev, mode);
+-              edev->arfs->enable = true;
++      edev->arfs->filter_count++;
++      if (edev->arfs->filter_count == 1 &&
++          edev->arfs->mode == QED_FILTER_CONFIG_MODE_DISABLE) {
++              edev->ops->configure_arfs_searcher(edev->cdev,
++                                                 fltr->tuple.mode);
++              edev->arfs->mode = fltr->tuple.mode;
+       }
+       return 0;
+@@ -167,14 +185,15 @@ qede_dequeue_fltr_and_config_searcher(struct qede_dev *edev,
+                        fltr->buf_len, DMA_TO_DEVICE);
+       qede_free_arfs_filter(edev, fltr);
+-      edev->arfs->filter_count--;
+-      if (!edev->arfs->filter_count && edev->arfs->enable) {
++      edev->arfs->filter_count--;
++      if (!edev->arfs->filter_count &&
++          edev->arfs->mode != QED_FILTER_CONFIG_MODE_DISABLE) {
+               enum qed_filter_config_mode mode;
+               mode = QED_FILTER_CONFIG_MODE_DISABLE;
+-              edev->arfs->enable = false;
+               edev->ops->configure_arfs_searcher(edev->cdev, mode);
++              edev->arfs->mode = QED_FILTER_CONFIG_MODE_DISABLE;
+       }
+ }
+@@ -264,25 +283,17 @@ void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr)
+               }
+       }
++#ifdef CONFIG_RFS_ACCEL
+       spin_lock_bh(&edev->arfs->arfs_list_lock);
+-      if (!edev->arfs->filter_count) {
+-              if (edev->arfs->enable) {
+-                      enum qed_filter_config_mode mode;
+-
+-                      mode = QED_FILTER_CONFIG_MODE_DISABLE;
+-                      edev->arfs->enable = false;
+-                      edev->ops->configure_arfs_searcher(edev->cdev, mode);
+-              }
+-#ifdef CONFIG_RFS_ACCEL
+-      } else {
++      if (edev->arfs->filter_count) {
+               set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
+               schedule_delayed_work(&edev->sp_task,
+                                     QEDE_SP_TASK_POLL_DELAY);
+-#endif
+       }
+       spin_unlock_bh(&edev->arfs->arfs_list_lock);
++#endif
+ }
+ /* This function waits until all aRFS filters get deleted and freed.
+@@ -512,6 +523,7 @@ int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+       eth->h_proto = skb->protocol;
+       n->tuple.eth_proto = skb->protocol;
+       n->tuple.ip_proto = ip_proto;
++      n->tuple.mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
+       memcpy(n->data + ETH_HLEN, skb->data, skb_headlen(skb));
+       rc = qede_enqueue_fltr_and_config_searcher(edev, n, tbl_idx);
+@@ -1339,38 +1351,6 @@ qede_get_arfs_fltr_by_loc(struct hlist_head *head, u32 location)
+       return NULL;
+ }
+-static bool
+-qede_compare_user_flow_ips(struct qede_arfs_fltr_node *tpos,
+-                         struct ethtool_rx_flow_spec *fsp,
+-                         __be16 proto)
+-{
+-      if (proto == htons(ETH_P_IP)) {
+-              struct ethtool_tcpip4_spec *ip;
+-
+-              ip = &fsp->h_u.tcp_ip4_spec;
+-
+-              if (tpos->tuple.src_ipv4 == ip->ip4src &&
+-                  tpos->tuple.dst_ipv4 == ip->ip4dst)
+-                      return true;
+-              else
+-                      return false;
+-      } else {
+-              struct ethtool_tcpip6_spec *ip6;
+-              struct in6_addr *src;
+-
+-              ip6 = &fsp->h_u.tcp_ip6_spec;
+-              src = &tpos->tuple.src_ipv6;
+-
+-              if (!memcmp(src, &ip6->ip6src, sizeof(struct in6_addr)) &&
+-                  !memcmp(&tpos->tuple.dst_ipv6, &ip6->ip6dst,
+-                          sizeof(struct in6_addr)))
+-                      return true;
+-              else
+-                      return false;
+-      }
+-      return false;
+-}
+-
+ int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info,
+                         u32 *rule_locs)
+ {
+@@ -1461,96 +1441,306 @@ int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd)
+ }
+ static int
+-qede_validate_and_check_flow_exist(struct qede_dev *edev,
+-                                 struct ethtool_rx_flow_spec *fsp,
+-                                 int *min_hlen)
++qede_poll_arfs_filter_config(struct qede_dev *edev,
++                           struct qede_arfs_fltr_node *fltr)
+ {
+-      __be16 src_port = 0x0, dst_port = 0x0;
+-      struct qede_arfs_fltr_node *fltr;
+-      struct hlist_node *temp;
+-      struct hlist_head *head;
+-      __be16 eth_proto;
+-      u8 ip_proto;
++      int count = QEDE_ARFS_POLL_COUNT;
+-      if (fsp->location >= QEDE_RFS_MAX_FLTR ||
+-          fsp->ring_cookie >= QEDE_RSS_COUNT(edev))
+-              return -EINVAL;
++      while (fltr->used && count) {
++              msleep(20);
++              count--;
++      }
++
++      if (count == 0 || fltr->fw_rc) {
++              DP_NOTICE(edev, "Timeout in polling filter config\n");
++              qede_dequeue_fltr_and_config_searcher(edev, fltr);
++              return -EIO;
++      }
++
++      return fltr->fw_rc;
++}
++
++static int qede_flow_get_min_header_size(struct qede_arfs_tuple *t)
++{
++      int size = ETH_HLEN;
++
++      if (t->eth_proto == htons(ETH_P_IP))
++              size += sizeof(struct iphdr);
++      else
++              size += sizeof(struct ipv6hdr);
++
++      if (t->ip_proto == IPPROTO_TCP)
++              size += sizeof(struct tcphdr);
++      else
++              size += sizeof(struct udphdr);
++
++      return size;
++}
++
++static bool qede_flow_spec_ipv4_cmp(struct qede_arfs_tuple *a,
++                                  struct qede_arfs_tuple *b)
++{
++      if (a->eth_proto != htons(ETH_P_IP) ||
++          b->eth_proto != htons(ETH_P_IP))
++              return false;
++
++      return (a->src_ipv4 == b->src_ipv4) &&
++             (a->dst_ipv4 == b->dst_ipv4);
++}
++
++static void qede_flow_build_ipv4_hdr(struct qede_arfs_tuple *t,
++                                   void *header)
++{
++      __be16 *ports = (__be16 *)(header + ETH_HLEN + sizeof(struct iphdr));
++      struct iphdr *ip = (struct iphdr *)(header + ETH_HLEN);
++      struct ethhdr *eth = (struct ethhdr *)header;
++
++      eth->h_proto = t->eth_proto;
++      ip->saddr = t->src_ipv4;
++      ip->daddr = t->dst_ipv4;
++      ip->version = 0x4;
++      ip->ihl = 0x5;
++      ip->protocol = t->ip_proto;
++      ip->tot_len = cpu_to_be16(qede_flow_get_min_header_size(t) - ETH_HLEN);
++
++      /* ports is weakly typed to suit both TCP and UDP ports */
++      ports[0] = t->src_port;
++      ports[1] = t->dst_port;
++}
++
++static void qede_flow_stringify_ipv4_hdr(struct qede_arfs_tuple *t,
++                                       void *buffer)
++{
++      const char *prefix = t->ip_proto == IPPROTO_TCP ? "TCP" : "UDP";
++
++      snprintf(buffer, QEDE_FILTER_PRINT_MAX_LEN,
++               "%s %pI4 (%04x) -> %pI4 (%04x)",
++               prefix, &t->src_ipv4, t->src_port,
++               &t->dst_ipv4, t->dst_port);
++}
++
++static bool qede_flow_spec_ipv6_cmp(struct qede_arfs_tuple *a,
++                                  struct qede_arfs_tuple *b)
++{
++      if (a->eth_proto != htons(ETH_P_IPV6) ||
++          b->eth_proto != htons(ETH_P_IPV6))
++              return false;
++
++      if (memcmp(&a->src_ipv6, &b->src_ipv6, sizeof(struct in6_addr)))
++              return false;
++
++      if (memcmp(&a->dst_ipv6, &b->dst_ipv6, sizeof(struct in6_addr)))
++              return false;
++
++      return true;
++}
+-      if (fsp->flow_type == TCP_V4_FLOW) {
+-              *min_hlen += sizeof(struct iphdr) +
+-                              sizeof(struct tcphdr);
+-              eth_proto = htons(ETH_P_IP);
+-              ip_proto = IPPROTO_TCP;
+-      } else if (fsp->flow_type == UDP_V4_FLOW) {
+-              *min_hlen += sizeof(struct iphdr) +
+-                              sizeof(struct udphdr);
+-              eth_proto = htons(ETH_P_IP);
+-              ip_proto = IPPROTO_UDP;
+-      } else if (fsp->flow_type == TCP_V6_FLOW) {
+-              *min_hlen += sizeof(struct ipv6hdr) +
+-                              sizeof(struct tcphdr);
+-              eth_proto = htons(ETH_P_IPV6);
+-              ip_proto = IPPROTO_TCP;
+-      } else if (fsp->flow_type == UDP_V6_FLOW) {
+-              *min_hlen += sizeof(struct ipv6hdr) +
+-                              sizeof(struct udphdr);
+-              eth_proto = htons(ETH_P_IPV6);
+-              ip_proto = IPPROTO_UDP;
++static void qede_flow_build_ipv6_hdr(struct qede_arfs_tuple *t,
++                                   void *header)
++{
++      __be16 *ports = (__be16 *)(header + ETH_HLEN + sizeof(struct ipv6hdr));
++      struct ipv6hdr *ip6 = (struct ipv6hdr *)(header + ETH_HLEN);
++      struct ethhdr *eth = (struct ethhdr *)header;
++
++      eth->h_proto = t->eth_proto;
++      memcpy(&ip6->saddr, &t->src_ipv6, sizeof(struct in6_addr));
++      memcpy(&ip6->daddr, &t->dst_ipv6, sizeof(struct in6_addr));
++      ip6->version = 0x6;
++
++      if (t->ip_proto == IPPROTO_TCP) {
++              ip6->nexthdr = NEXTHDR_TCP;
++              ip6->payload_len = cpu_to_be16(sizeof(struct tcphdr));
+       } else {
+-              DP_NOTICE(edev, "Unsupported flow type = 0x%x\n",
+-                        fsp->flow_type);
+-              return -EPROTONOSUPPORT;
++              ip6->nexthdr = NEXTHDR_UDP;
++              ip6->payload_len = cpu_to_be16(sizeof(struct udphdr));
+       }
+-      if (eth_proto == htons(ETH_P_IP)) {
+-              src_port = fsp->h_u.tcp_ip4_spec.psrc;
+-              dst_port = fsp->h_u.tcp_ip4_spec.pdst;
++      /* ports is weakly typed to suit both TCP and UDP ports */
++      ports[0] = t->src_port;
++      ports[1] = t->dst_port;
++}
++
++static int qede_flow_spec_to_tuple_ipv4_common(struct qede_dev *edev,
++                                             struct qede_arfs_tuple *t,
++                                             struct ethtool_rx_flow_spec *fs)
++{
++      t->eth_proto = htons(ETH_P_IP);
++      t->src_ipv4 = fs->h_u.tcp_ip4_spec.ip4src;
++      t->dst_ipv4 = fs->h_u.tcp_ip4_spec.ip4dst;
++      t->src_port = fs->h_u.tcp_ip4_spec.psrc;
++      t->dst_port = fs->h_u.tcp_ip4_spec.pdst;
++
++      /* We must have a valid 4-tuple */
++      if (t->src_port && t->dst_port && t->src_ipv4 && t->dst_ipv4) {
++              t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
+       } else {
+-              src_port = fsp->h_u.tcp_ip6_spec.psrc;
+-              dst_port = fsp->h_u.tcp_ip6_spec.pdst;
++              DP_INFO(edev, "Invalid N-tuple\n");
++              return -EOPNOTSUPP;
+       }
+-      head = QEDE_ARFS_BUCKET_HEAD(edev, 0);
+-      hlist_for_each_entry_safe(fltr, temp, head, node) {
+-              if ((fltr->tuple.ip_proto == ip_proto &&
+-                   fltr->tuple.eth_proto == eth_proto &&
+-                   qede_compare_user_flow_ips(fltr, fsp, eth_proto) &&
+-                   fltr->tuple.src_port == src_port &&
+-                   fltr->tuple.dst_port == dst_port) ||
+-                  fltr->sw_id == fsp->location)
+-                      return -EEXIST;
++      t->ip_comp = qede_flow_spec_ipv4_cmp;
++      t->build_hdr = qede_flow_build_ipv4_hdr;
++      t->stringify = qede_flow_stringify_ipv4_hdr;
++
++      return 0;
++}
++
++static int qede_flow_spec_to_tuple_tcpv4(struct qede_dev *edev,
++                                       struct qede_arfs_tuple *t,
++                                       struct ethtool_rx_flow_spec *fs)
++{
++      t->ip_proto = IPPROTO_TCP;
++
++      if (qede_flow_spec_to_tuple_ipv4_common(edev, t, fs))
++              return -EINVAL;
++
++      return 0;
++}
++
++static int qede_flow_spec_to_tuple_udpv4(struct qede_dev *edev,
++                                       struct qede_arfs_tuple *t,
++                                       struct ethtool_rx_flow_spec *fs)
++{
++      t->ip_proto = IPPROTO_UDP;
++
++      if (qede_flow_spec_to_tuple_ipv4_common(edev, t, fs))
++              return -EINVAL;
++
++      return 0;
++}
++
++static int qede_flow_spec_to_tuple_ipv6_common(struct qede_dev *edev,
++                                             struct qede_arfs_tuple *t,
++                                             struct ethtool_rx_flow_spec *fs)
++{
++      struct in6_addr zero_addr;
++      void *p;
++
++      p = &zero_addr;
++      memset(p, 0, sizeof(zero_addr));
++
++      t->eth_proto = htons(ETH_P_IPV6);
++      memcpy(&t->src_ipv6, &fs->h_u.tcp_ip6_spec.ip6src,
++             sizeof(struct in6_addr));
++      memcpy(&t->dst_ipv6, &fs->h_u.tcp_ip6_spec.ip6dst,
++             sizeof(struct in6_addr));
++      t->src_port = fs->h_u.tcp_ip6_spec.psrc;
++      t->dst_port = fs->h_u.tcp_ip6_spec.pdst;
++
++      /* We must make sure we have a valid 4-tuple */
++      if (t->src_port && t->dst_port &&
++          memcmp(&t->src_ipv6, p, sizeof(struct in6_addr)) &&
++          memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr))) {
++              t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
++      } else {
++              DP_INFO(edev, "Invalid N-tuple\n");
++              return -EOPNOTSUPP;
+       }
++      t->ip_comp = qede_flow_spec_ipv6_cmp;
++      t->build_hdr = qede_flow_build_ipv6_hdr;
++
+       return 0;
+ }
+-static int
+-qede_poll_arfs_filter_config(struct qede_dev *edev,
+-                           struct qede_arfs_fltr_node *fltr)
++static int qede_flow_spec_to_tuple_tcpv6(struct qede_dev *edev,
++                                       struct qede_arfs_tuple *t,
++                                       struct ethtool_rx_flow_spec *fs)
+ {
+-      int count = QEDE_ARFS_POLL_COUNT;
++      t->ip_proto = IPPROTO_TCP;
+-      while (fltr->used && count) {
+-              msleep(20);
+-              count--;
++      if (qede_flow_spec_to_tuple_ipv6_common(edev, t, fs))
++              return -EINVAL;
++
++      return 0;
++}
++
++static int qede_flow_spec_to_tuple_udpv6(struct qede_dev *edev,
++                                       struct qede_arfs_tuple *t,
++                                       struct ethtool_rx_flow_spec *fs)
++{
++      t->ip_proto = IPPROTO_UDP;
++
++      if (qede_flow_spec_to_tuple_ipv6_common(edev, t, fs))
++              return -EINVAL;
++
++      return 0;
++}
++
++static int qede_flow_spec_to_tuple(struct qede_dev *edev,
++                                 struct qede_arfs_tuple *t,
++                                 struct ethtool_rx_flow_spec *fs)
++{
++      memset(t, 0, sizeof(*t));
++
++      switch ((fs->flow_type & ~FLOW_EXT)) {
++      case TCP_V4_FLOW:
++              return qede_flow_spec_to_tuple_tcpv4(edev, t, fs);
++      case UDP_V4_FLOW:
++              return qede_flow_spec_to_tuple_udpv4(edev, t, fs);
++      case TCP_V6_FLOW:
++              return qede_flow_spec_to_tuple_tcpv6(edev, t, fs);
++      case UDP_V6_FLOW:
++              return qede_flow_spec_to_tuple_udpv6(edev, t, fs);
++      default:
++              DP_VERBOSE(edev, NETIF_MSG_IFUP,
++                         "Can't support flow of type %08x\n", fs->flow_type);
++              return -EOPNOTSUPP;
+       }
+-      if (count == 0 || fltr->fw_rc) {
+-              qede_dequeue_fltr_and_config_searcher(edev, fltr);
+-              return -EIO;
++      return 0;
++}
++
++static int qede_flow_spec_validate(struct qede_dev *edev,
++                                 struct ethtool_rx_flow_spec *fs,
++                                 struct qede_arfs_tuple *t)
++{
++      if (fs->location >= QEDE_RFS_MAX_FLTR) {
++              DP_INFO(edev, "Location out-of-bounds\n");
++              return -EINVAL;
+       }
+-      return fltr->fw_rc;
++      /* Check location isn't already in use */
++      if (test_bit(fs->location, edev->arfs->arfs_fltr_bmap)) {
++              DP_INFO(edev, "Location already in use\n");
++              return -EINVAL;
++      }
++
++      if (fs->ring_cookie >= QEDE_RSS_COUNT(edev)) {
++              DP_INFO(edev, "Queue out-of-bounds\n");
++              return -EINVAL;
++      }
++
++      return 0;
++}
++
++/* Must be called while qede lock is held */
++static struct qede_arfs_fltr_node *
++qede_flow_find_fltr(struct qede_dev *edev, struct qede_arfs_tuple *t)
++{
++      struct qede_arfs_fltr_node *fltr;
++      struct hlist_node *temp;
++      struct hlist_head *head;
++
++      head = QEDE_ARFS_BUCKET_HEAD(edev, 0);
++
++      hlist_for_each_entry_safe(fltr, temp, head, node) {
++              if (fltr->tuple.ip_proto == t->ip_proto &&
++                  fltr->tuple.src_port == t->src_port &&
++                  fltr->tuple.dst_port == t->dst_port &&
++                  t->ip_comp(&fltr->tuple, t))
++                      return fltr;
++      }
++
++      return NULL;
+ }
+ int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
+ {
+       struct ethtool_rx_flow_spec *fsp = &info->fs;
+       struct qede_arfs_fltr_node *n;
+-      int min_hlen = ETH_HLEN, rc;
+-      struct ethhdr *eth;
+-      struct iphdr *ip;
+-      __be16 *ports;
++      struct qede_arfs_tuple t;
++      int min_hlen, rc;
+       __qede_lock(edev);
+@@ -1559,16 +1749,28 @@ int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
+               goto unlock;
+       }
+-      rc = qede_validate_and_check_flow_exist(edev, fsp, &min_hlen);
++      /* Translate the flow specification into something fittign our DB */
++      rc = qede_flow_spec_to_tuple(edev, &t, fsp);
++      if (rc)
++              goto unlock;
++
++      /* Make sure location is valid and filter isn't already set */
++      rc = qede_flow_spec_validate(edev, fsp, &t);
+       if (rc)
+               goto unlock;
++      if (qede_flow_find_fltr(edev, &t)) {
++              rc = -EINVAL;
++              goto unlock;
++      }
++
+       n = kzalloc(sizeof(*n), GFP_KERNEL);
+       if (!n) {
+               rc = -ENOMEM;
+               goto unlock;
+       }
++      min_hlen = qede_flow_get_min_header_size(&t);
+       n->data = kzalloc(min_hlen, GFP_KERNEL);
+       if (!n->data) {
+               kfree(n);
+@@ -1581,66 +1783,11 @@ int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
+       n->buf_len = min_hlen;
+       n->rxq_id = fsp->ring_cookie;
+       n->next_rxq_id = n->rxq_id;
+-      eth = (struct ethhdr *)n->data;
+-      if (info->fs.flow_type == TCP_V4_FLOW ||
+-          info->fs.flow_type == UDP_V4_FLOW) {
+-              ports = (__be16 *)(n->data + ETH_HLEN +
+-                                      sizeof(struct iphdr));
+-              eth->h_proto = htons(ETH_P_IP);
+-              n->tuple.eth_proto = htons(ETH_P_IP);
+-              n->tuple.src_ipv4 = info->fs.h_u.tcp_ip4_spec.ip4src;
+-              n->tuple.dst_ipv4 = info->fs.h_u.tcp_ip4_spec.ip4dst;
+-              n->tuple.src_port = info->fs.h_u.tcp_ip4_spec.psrc;
+-              n->tuple.dst_port = info->fs.h_u.tcp_ip4_spec.pdst;
+-              ports[0] = n->tuple.src_port;
+-              ports[1] = n->tuple.dst_port;
+-              ip = (struct iphdr *)(n->data + ETH_HLEN);
+-              ip->saddr = info->fs.h_u.tcp_ip4_spec.ip4src;
+-              ip->daddr = info->fs.h_u.tcp_ip4_spec.ip4dst;
+-              ip->version = 0x4;
+-              ip->ihl = 0x5;
+-
+-              if (info->fs.flow_type == TCP_V4_FLOW) {
+-                      n->tuple.ip_proto = IPPROTO_TCP;
+-                      ip->protocol = IPPROTO_TCP;
+-              } else {
+-                      n->tuple.ip_proto = IPPROTO_UDP;
+-                      ip->protocol = IPPROTO_UDP;
+-              }
+-              ip->tot_len = cpu_to_be16(min_hlen - ETH_HLEN);
+-      } else {
+-              struct ipv6hdr *ip6;
+-
+-              ip6 = (struct ipv6hdr *)(n->data + ETH_HLEN);
+-              ports = (__be16 *)(n->data + ETH_HLEN +
+-                                      sizeof(struct ipv6hdr));
+-              eth->h_proto = htons(ETH_P_IPV6);
+-              n->tuple.eth_proto = htons(ETH_P_IPV6);
+-              memcpy(&n->tuple.src_ipv6, &info->fs.h_u.tcp_ip6_spec.ip6src,
+-                     sizeof(struct in6_addr));
+-              memcpy(&n->tuple.dst_ipv6, &info->fs.h_u.tcp_ip6_spec.ip6dst,
+-                     sizeof(struct in6_addr));
+-              n->tuple.src_port = info->fs.h_u.tcp_ip6_spec.psrc;
+-              n->tuple.dst_port = info->fs.h_u.tcp_ip6_spec.pdst;
+-              ports[0] = n->tuple.src_port;
+-              ports[1] = n->tuple.dst_port;
+-              memcpy(&ip6->saddr, &n->tuple.src_ipv6,
+-                     sizeof(struct in6_addr));
+-              memcpy(&ip6->daddr, &n->tuple.dst_ipv6,
+-                     sizeof(struct in6_addr));
+-              ip6->version = 0x6;
++      memcpy(&n->tuple, &t, sizeof(n->tuple));
+-              if (info->fs.flow_type == TCP_V6_FLOW) {
+-                      n->tuple.ip_proto = IPPROTO_TCP;
+-                      ip6->nexthdr = NEXTHDR_TCP;
+-                      ip6->payload_len = cpu_to_be16(sizeof(struct tcphdr));
+-              } else {
+-                      n->tuple.ip_proto = IPPROTO_UDP;
+-                      ip6->nexthdr = NEXTHDR_UDP;
+-                      ip6->payload_len = cpu_to_be16(sizeof(struct udphdr));
+-              }
+-      }
++      /* Build a minimal header according to the flow */
++      n->tuple.build_hdr(&n->tuple, n->data);
+       rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0);
+       if (rc)
+@@ -1650,6 +1797,7 @@ int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
+       rc = qede_poll_arfs_filter_config(edev, n);
+ unlock:
+       __qede_unlock(edev);
++
+       return rc;
+ }
+-- 
+2.9.5
+
diff --git a/linux-next-cherry-picks/0022-qede-Validate-unsupported-configurations.patch b/linux-next-cherry-picks/0022-qede-Validate-unsupported-configurations.patch
new file mode 100644 (file)
index 0000000..6fdd3fe
--- /dev/null
@@ -0,0 +1,122 @@
+From 89ffd14ee95dca812874fcd25ad3538ff3592a49 Mon Sep 17 00:00:00 2001
+From: Manish Chopra <manish.chopra@cavium.com>
+Date: Thu, 24 May 2018 09:54:50 -0700
+Subject: [PATCH 22/44] qede: Validate unsupported configurations
+
+Validate and prevent some of the configurations for
+unsupported [by firmware] inputs [for example - mac ext,
+vlans, masks/prefix, tos/tclass] via ethtool -N/-U.
+
+Signed-off-by: Manish Chopra <manish.chopra@cavium.com>
+Signed-off-by: Shahed Shaikh <shahed.shaikh@cavium.com>
+Signed-off-by: Ariel Elior <ariel.elior@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/qlogic/qede/qede_filter.c | 73 ++++++++++++++++++++++++++
+ 1 file changed, 73 insertions(+)
+
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
+index bd5b4e4..43ed420 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
+@@ -1560,10 +1560,63 @@ static void qede_flow_build_ipv6_hdr(struct qede_arfs_tuple *t,
+       ports[1] = t->dst_port;
+ }
++/* Validate fields which are set and not accepted by the driver */
++static int qede_flow_spec_validate_unused(struct qede_dev *edev,
++                                        struct ethtool_rx_flow_spec *fs)
++{
++      if (fs->flow_type & FLOW_MAC_EXT) {
++              DP_INFO(edev, "Don't support MAC extensions\n");
++              return -EOPNOTSUPP;
++      }
++
++      if ((fs->flow_type & FLOW_EXT) &&
++          (fs->h_ext.vlan_etype || fs->h_ext.vlan_tci)) {
++              DP_INFO(edev, "Don't support vlan-based classification\n");
++              return -EOPNOTSUPP;
++      }
++
++      if ((fs->flow_type & FLOW_EXT) &&
++          (fs->h_ext.data[0] || fs->h_ext.data[1])) {
++              DP_INFO(edev, "Don't support user defined data\n");
++              return -EOPNOTSUPP;
++      }
++
++      return 0;
++}
++
+ static int qede_flow_spec_to_tuple_ipv4_common(struct qede_dev *edev,
+                                              struct qede_arfs_tuple *t,
+                                              struct ethtool_rx_flow_spec *fs)
+ {
++      if ((fs->h_u.tcp_ip4_spec.ip4src &
++           fs->m_u.tcp_ip4_spec.ip4src) != fs->h_u.tcp_ip4_spec.ip4src) {
++              DP_INFO(edev, "Don't support IP-masks\n");
++              return -EOPNOTSUPP;
++      }
++
++      if ((fs->h_u.tcp_ip4_spec.ip4dst &
++           fs->m_u.tcp_ip4_spec.ip4dst) != fs->h_u.tcp_ip4_spec.ip4dst) {
++              DP_INFO(edev, "Don't support IP-masks\n");
++              return -EOPNOTSUPP;
++      }
++
++      if ((fs->h_u.tcp_ip4_spec.psrc &
++           fs->m_u.tcp_ip4_spec.psrc) != fs->h_u.tcp_ip4_spec.psrc) {
++              DP_INFO(edev, "Don't support port-masks\n");
++              return -EOPNOTSUPP;
++      }
++
++      if ((fs->h_u.tcp_ip4_spec.pdst &
++           fs->m_u.tcp_ip4_spec.pdst) != fs->h_u.tcp_ip4_spec.pdst) {
++              DP_INFO(edev, "Don't support port-masks\n");
++              return -EOPNOTSUPP;
++      }
++
++      if (fs->h_u.tcp_ip4_spec.tos) {
++              DP_INFO(edev, "Don't support tos\n");
++              return -EOPNOTSUPP;
++      }
++
+       t->eth_proto = htons(ETH_P_IP);
+       t->src_ipv4 = fs->h_u.tcp_ip4_spec.ip4src;
+       t->dst_ipv4 = fs->h_u.tcp_ip4_spec.ip4dst;
+@@ -1619,6 +1672,23 @@ static int qede_flow_spec_to_tuple_ipv6_common(struct qede_dev *edev,
+       p = &zero_addr;
+       memset(p, 0, sizeof(zero_addr));
++      if ((fs->h_u.tcp_ip6_spec.psrc &
++           fs->m_u.tcp_ip6_spec.psrc) != fs->h_u.tcp_ip6_spec.psrc) {
++              DP_INFO(edev, "Don't support port-masks\n");
++              return -EOPNOTSUPP;
++      }
++
++      if ((fs->h_u.tcp_ip6_spec.pdst &
++           fs->m_u.tcp_ip6_spec.pdst) != fs->h_u.tcp_ip6_spec.pdst) {
++              DP_INFO(edev, "Don't support port-masks\n");
++              return -EOPNOTSUPP;
++      }
++
++      if (fs->h_u.tcp_ip6_spec.tclass) {
++              DP_INFO(edev, "Don't support tclass\n");
++              return -EOPNOTSUPP;
++      }
++
+       t->eth_proto = htons(ETH_P_IPV6);
+       memcpy(&t->src_ipv6, &fs->h_u.tcp_ip6_spec.ip6src,
+              sizeof(struct in6_addr));
+@@ -1673,6 +1743,9 @@ static int qede_flow_spec_to_tuple(struct qede_dev *edev,
+ {
+       memset(t, 0, sizeof(*t));
++      if (qede_flow_spec_validate_unused(edev, fs))
++              return -EOPNOTSUPP;
++
+       switch ((fs->flow_type & ~FLOW_EXT)) {
+       case TCP_V4_FLOW:
+               return qede_flow_spec_to_tuple_tcpv4(edev, t, fs);
+-- 
+2.9.5
+
diff --git a/linux-next-cherry-picks/0023-qed-Support-other-classification-modes.patch b/linux-next-cherry-picks/0023-qed-Support-other-classification-modes.patch
new file mode 100644 (file)
index 0000000..2f54b7e
--- /dev/null
@@ -0,0 +1,122 @@
+From 3893fc62b1769db3ef160f7f1e36d3db754497ee Mon Sep 17 00:00:00 2001
+From: Manish Chopra <manish.chopra@cavium.com>
+Date: Thu, 24 May 2018 09:54:51 -0700
+Subject: [PATCH 23/44] qed*: Support other classification modes.
+
+Currently, driver supports flow classification to PF
+receive queues based on TCP/UDP 4 tuples [src_ip, dst_ip,
+src_port, dst_port] only.
+
+This patch enables to configure different flow profiles
+[For example - only UDP dest port or src_ip based] on the
+adapter so that classification can be done according to
+just those fields as well. Although, at a time just one
+type of flow configuration is supported due to limited
+number of flow profiles available on the device.
+
+For example -
+
+ethtool -N enp7s0f0 flow-type udp4 dst-port 45762 action 2
+ethtool -N enp7s0f0 flow-type tcp4 src-ip 192.16.4.10 action 1
+ethtool -N enp7s0f0 flow-type udp6 dst-port 45762 action 3
+
+Signed-off-by: Manish Chopra <manish.chopra@cavium.com>
+Signed-off-by: Shahed Shaikh <shahed.shaikh@cavium.com>
+Signed-off-by: Ariel Elior <ariel.elior@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/qlogic/qed/qed_l2.c       |  2 ++
+ drivers/net/ethernet/qlogic/qede/qede_filter.c | 31 ++++++++++++++++++++++++--
+ include/linux/qed/qed_eth_if.h                 |  1 +
+ 3 files changed, 32 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
+index 5e655c3..3cb8a80 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
+@@ -1973,6 +1973,8 @@ qed_arfs_mode_to_hsi(enum qed_filter_config_mode mode)
+               return GFT_PROFILE_TYPE_4_TUPLE;
+       if (mode == QED_FILTER_CONFIG_MODE_IP_DEST)
+               return GFT_PROFILE_TYPE_IP_DST_ADDR;
++      if (mode == QED_FILTER_CONFIG_MODE_IP_SRC)
++              return GFT_PROFILE_TYPE_IP_SRC_ADDR;
+       return GFT_PROFILE_TYPE_L4_DST_PORT;
+ }
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
+index 43ed420..9b84f0c 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
+@@ -1623,9 +1623,17 @@ static int qede_flow_spec_to_tuple_ipv4_common(struct qede_dev *edev,
+       t->src_port = fs->h_u.tcp_ip4_spec.psrc;
+       t->dst_port = fs->h_u.tcp_ip4_spec.pdst;
+-      /* We must have a valid 4-tuple */
++      /* We must either have a valid 4-tuple or only dst port
++       * or only src ip as an input
++       */
+       if (t->src_port && t->dst_port && t->src_ipv4 && t->dst_ipv4) {
+               t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
++      } else if (!t->src_port && t->dst_port &&
++                 !t->src_ipv4 && !t->dst_ipv4) {
++              t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
++      }  else if (!t->src_port && !t->dst_port &&
++                  !t->dst_ipv4 && t->src_ipv4) {
++              t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
+       } else {
+               DP_INFO(edev, "Invalid N-tuple\n");
+               return -EOPNOTSUPP;
+@@ -1697,11 +1705,21 @@ static int qede_flow_spec_to_tuple_ipv6_common(struct qede_dev *edev,
+       t->src_port = fs->h_u.tcp_ip6_spec.psrc;
+       t->dst_port = fs->h_u.tcp_ip6_spec.pdst;
+-      /* We must make sure we have a valid 4-tuple */
++      /* We must make sure we have a valid 4-tuple or only dest port
++       * or only src ip as an input
++       */
+       if (t->src_port && t->dst_port &&
+           memcmp(&t->src_ipv6, p, sizeof(struct in6_addr)) &&
+           memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr))) {
+               t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
++      } else if (!t->src_port && t->dst_port &&
++                 !memcmp(&t->src_ipv6, p, sizeof(struct in6_addr)) &&
++                 !memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr))) {
++              t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
++      } else if (!t->src_port && !t->dst_port &&
++                 !memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr)) &&
++                 memcmp(&t->src_ipv6, p, sizeof(struct in6_addr))) {
++              t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
+       } else {
+               DP_INFO(edev, "Invalid N-tuple\n");
+               return -EOPNOTSUPP;
+@@ -1779,6 +1797,15 @@ static int qede_flow_spec_validate(struct qede_dev *edev,
+               return -EINVAL;
+       }
++      /* Check if the filtering-mode could support the filter */
++      if (edev->arfs->filter_count &&
++          edev->arfs->mode != t->mode) {
++              DP_INFO(edev,
++                      "flow_spec would require filtering mode %08x, but %08x is configured\n",
++                      t->mode, edev->arfs->filter_count);
++              return -EINVAL;
++      }
++
+       if (fs->ring_cookie >= QEDE_RSS_COUNT(edev)) {
+               DP_INFO(edev, "Queue out-of-bounds\n");
+               return -EINVAL;
+diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
+index 7f9756f..557e86e1 100644
+--- a/include/linux/qed/qed_eth_if.h
++++ b/include/linux/qed/qed_eth_if.h
+@@ -66,6 +66,7 @@ enum qed_filter_config_mode {
+       QED_FILTER_CONFIG_MODE_5_TUPLE,
+       QED_FILTER_CONFIG_MODE_L4_PORT,
+       QED_FILTER_CONFIG_MODE_IP_DEST,
++      QED_FILTER_CONFIG_MODE_IP_SRC,
+ };
+ struct qed_ntuple_filter_params {
+-- 
+2.9.5
+
diff --git a/linux-next-cherry-picks/0024-qede-Support-flow-classification-to-the-VFs.patch b/linux-next-cherry-picks/0024-qede-Support-flow-classification-to-the-VFs.patch
new file mode 100644 (file)
index 0000000..c4384bf
--- /dev/null
@@ -0,0 +1,123 @@
+From 39385ab02c3e6ffe8f70a445433c7419fd2df753 Mon Sep 17 00:00:00 2001
+From: Manish Chopra <manish.chopra@cavium.com>
+Date: Thu, 24 May 2018 09:54:52 -0700
+Subject: [PATCH 24/44] qede: Support flow classification to the VFs.
+
+With the supported classification modes [4 tuples based,
+udp port based, src-ip based], flows can be classified
+to the VFs as well. With this patch, flows can be re-directed
+to the requested VF provided in "action" field of command.
+
+Please note that driver doesn't really care about the queue bits
+in "action" field for the VFs. Since queue will be still chosen
+by FW using RSS hash. [I.e., the classification would be done
+according to vport-only]
+
+For examples -
+
+ethtool -N p5p1 flow-type udp4 dst-port 8000 action 0x100000000
+ethtool -N p5p1 flow-type tcp4 src-ip 192.16.6.10 action 0x200000000
+ethtool -U p5p1 flow-type tcp4 src-ip 192.168.40.100 dst-ip \
+       192.168.40.200 src-port 6660 dst-port 5550 \
+       action 0x100000000
+
+Signed-off-by: Manish Chopra <manish.chopra@cavium.com>
+Signed-off-by: Shahed Shaikh <shahed.shaikh@cavium.com>
+Signed-off-by: Ariel Elior <ariel.elior@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/qlogic/qede/qede_filter.c | 34 +++++++++++++++++++++++---
+ 1 file changed, 30 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
+index 9b84f0c..6c02c21 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
+@@ -86,6 +86,7 @@ struct qede_arfs_fltr_node {
+       u16 sw_id;
+       u16 rxq_id;
+       u16 next_rxq_id;
++      u8 vfid;
+       bool filter_op;
+       bool used;
+       u8 fw_rc;
+@@ -125,14 +126,19 @@ static void qede_configure_arfs_fltr(struct qede_dev *edev,
+       params.qid = rxq_id;
+       params.b_is_add = add_fltr;
++      if (n->vfid) {
++              params.b_is_vf = true;
++              params.vf_id = n->vfid - 1;
++      }
++
+       if (n->tuple.stringify) {
+               char tuple_buffer[QEDE_FILTER_PRINT_MAX_LEN];
+               n->tuple.stringify(&n->tuple, tuple_buffer);
+               DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
+-                         "%s sw_id[0x%x]: %s [queue %d]\n",
++                         "%s sw_id[0x%x]: %s [vf %u queue %d]\n",
+                          add_fltr ? "Adding" : "Deleting",
+-                         n->sw_id, tuple_buffer, rxq_id);
++                         n->sw_id, tuple_buffer, n->vfid, rxq_id);
+       }
+       n->used = true;
+@@ -1435,6 +1441,10 @@ int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd)
+       fsp->ring_cookie = fltr->rxq_id;
++      if (fltr->vfid) {
++              fsp->ring_cookie |= ((u64)fltr->vfid) <<
++                                      ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
++      }
+ unlock:
+       __qede_unlock(edev);
+       return rc;
+@@ -1806,6 +1816,9 @@ static int qede_flow_spec_validate(struct qede_dev *edev,
+               return -EINVAL;
+       }
++      if (ethtool_get_flow_spec_ring_vf(fs->ring_cookie))
++              return 0;
++
+       if (fs->ring_cookie >= QEDE_RSS_COUNT(edev)) {
+               DP_INFO(edev, "Queue out-of-bounds\n");
+               return -EINVAL;
+@@ -1835,6 +1848,19 @@ qede_flow_find_fltr(struct qede_dev *edev, struct qede_arfs_tuple *t)
+       return NULL;
+ }
++static void qede_flow_set_destination(struct qede_dev *edev,
++                                    struct qede_arfs_fltr_node *n,
++                                    struct ethtool_rx_flow_spec *fs)
++{
++      n->vfid = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
++      n->rxq_id = ethtool_get_flow_spec_ring(fs->ring_cookie);
++      n->next_rxq_id = n->rxq_id;
++
++      if (n->vfid)
++              DP_VERBOSE(edev, QED_MSG_SP,
++                         "Configuring N-tuple for VF 0x%02x\n", n->vfid - 1);
++}
++
+ int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
+ {
+       struct ethtool_rx_flow_spec *fsp = &info->fs;
+@@ -1881,11 +1907,11 @@ int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
+       n->sw_id = fsp->location;
+       set_bit(n->sw_id, edev->arfs->arfs_fltr_bmap);
+       n->buf_len = min_hlen;
+-      n->rxq_id = fsp->ring_cookie;
+-      n->next_rxq_id = n->rxq_id;
+       memcpy(&n->tuple, &t, sizeof(n->tuple));
++      qede_flow_set_destination(edev, n, fsp);
++
+       /* Build a minimal header according to the flow */
+       n->tuple.build_hdr(&n->tuple, n->data);
+-- 
+2.9.5
+
diff --git a/linux-next-cherry-picks/0025-qed-Support-drop-action-classification.patch b/linux-next-cherry-picks/0025-qed-Support-drop-action-classification.patch
new file mode 100644 (file)
index 0000000..9bf2354
--- /dev/null
@@ -0,0 +1,210 @@
+From 608e00d0a2eb53079c55dc9c14d8711bbb3a4390 Mon Sep 17 00:00:00 2001
+From: Manish Chopra <manish.chopra@cavium.com>
+Date: Thu, 24 May 2018 09:54:53 -0700
+Subject: [PATCH 25/44] qed*: Support drop action classification
+
+With this patch, User can configure for the supported
+flows to be dropped. Added a stat "gft_filter_drop"
+as well to be populated in ethtool for the dropped flows.
+
+For example -
+
+ethtool -N p5p1 flow-type udp4 dst-port 8000 action -1
+ethtool -N p5p1 flow-type tcp4 scr-ip 192.168.8.1 action -1
+
+Signed-off-by: Manish Chopra <manish.chopra@cavium.com>
+Signed-off-by: Shahed Shaikh <shahed.shaikh@cavium.com>
+Signed-off-by: Ariel Elior <ariel.elior@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/qlogic/qed/qed_l2.c        | 35 ++++++++++++++-----------
+ drivers/net/ethernet/qlogic/qede/qede.h         |  1 +
+ drivers/net/ethernet/qlogic/qede/qede_ethtool.c |  1 +
+ drivers/net/ethernet/qlogic/qede/qede_filter.c  | 14 ++++++++++
+ drivers/net/ethernet/qlogic/qede/qede_main.c    |  1 +
+ include/linux/qed/qed_eth_if.h                  |  3 +++
+ include/linux/qed/qed_if.h                      |  1 +
+ 7 files changed, 41 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
+index 3cb8a80..1c0d0c2 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
+@@ -1677,6 +1677,8 @@ static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
+           HILO_64_REGPAIR(tstats.mftag_filter_discard);
+       p_stats->common.mac_filter_discards +=
+           HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
++      p_stats->common.gft_filter_drop +=
++              HILO_64_REGPAIR(tstats.eth_gft_drop_pkt);
+ }
+ static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
+@@ -2015,16 +2017,6 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
+       u8 abs_vport_id = 0;
+       int rc = -EINVAL;
+-      rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
+-      if (rc)
+-              return rc;
+-
+-      if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
+-              rc = qed_fw_l2_queue(p_hwfn, p_params->qid, &abs_rx_q_id);
+-              if (rc)
+-                      return rc;
+-      }
+-
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = qed_spq_get_cid(p_hwfn);
+@@ -2049,15 +2041,28 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
+       DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_params->addr);
+       p_ramrod->pkt_hdr_length = cpu_to_le16(p_params->length);
+-      if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
+-              p_ramrod->rx_qid_valid = 1;
+-              p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id);
++      if (p_params->b_is_drop) {
++              p_ramrod->vport_id = cpu_to_le16(ETH_GFT_TRASHCAN_VPORT);
++      } else {
++              rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
++              if (rc)
++                      return rc;
++
++              if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
++                      rc = qed_fw_l2_queue(p_hwfn, p_params->qid,
++                                           &abs_rx_q_id);
++                      if (rc)
++                              return rc;
++
++                      p_ramrod->rx_qid_valid = 1;
++                      p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id);
++              }
++
++              p_ramrod->vport_id = cpu_to_le16((u16)abs_vport_id);
+       }
+       p_ramrod->flow_id_valid = 0;
+       p_ramrod->flow_id = 0;
+-
+-      p_ramrod->vport_id = cpu_to_le16((u16)abs_vport_id);
+       p_ramrod->filter_action = p_params->b_is_add ? GFT_ADD_FILTER
+           : GFT_DELETE_FILTER;
+diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
+index 2d3f09e..81c5c8df 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede.h
++++ b/drivers/net/ethernet/qlogic/qede/qede.h
+@@ -75,6 +75,7 @@ struct qede_stats_common {
+       u64 rx_bcast_pkts;
+       u64 mftag_filter_discards;
+       u64 mac_filter_discards;
++      u64 gft_filter_drop;
+       u64 tx_ucast_bytes;
+       u64 tx_mcast_bytes;
+       u64 tx_bcast_bytes;
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+index 8c6fdad..6906e04 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+@@ -161,6 +161,7 @@ static const struct {
+       QEDE_STAT(no_buff_discards),
+       QEDE_PF_STAT(mftag_filter_discards),
+       QEDE_PF_STAT(mac_filter_discards),
++      QEDE_PF_STAT(gft_filter_drop),
+       QEDE_STAT(tx_err_drop_pkts),
+       QEDE_STAT(ttl0_discard),
+       QEDE_STAT(packet_too_big_discard),
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
+index 6c02c21..e9e088d 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
+@@ -90,6 +90,7 @@ struct qede_arfs_fltr_node {
+       bool filter_op;
+       bool used;
+       u8 fw_rc;
++      bool b_is_drop;
+       struct hlist_node node;
+ };
+@@ -125,6 +126,7 @@ static void qede_configure_arfs_fltr(struct qede_dev *edev,
+       params.length = n->buf_len;
+       params.qid = rxq_id;
+       params.b_is_add = add_fltr;
++      params.b_is_drop = n->b_is_drop;
+       if (n->vfid) {
+               params.b_is_vf = true;
+@@ -1445,6 +1447,9 @@ int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd)
+               fsp->ring_cookie |= ((u64)fltr->vfid) <<
+                                       ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+       }
++
++      if (fltr->b_is_drop)
++              fsp->ring_cookie = RX_CLS_FLOW_DISC;
+ unlock:
+       __qede_unlock(edev);
+       return rc;
+@@ -1816,6 +1821,10 @@ static int qede_flow_spec_validate(struct qede_dev *edev,
+               return -EINVAL;
+       }
++      /* If drop requested then no need to validate other data */
++      if (fs->ring_cookie == RX_CLS_FLOW_DISC)
++              return 0;
++
+       if (ethtool_get_flow_spec_ring_vf(fs->ring_cookie))
+               return 0;
+@@ -1852,6 +1861,11 @@ static void qede_flow_set_destination(struct qede_dev *edev,
+                                     struct qede_arfs_fltr_node *n,
+                                     struct ethtool_rx_flow_spec *fs)
+ {
++      if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
++              n->b_is_drop = true;
++              return;
++      }
++
+       n->vfid = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
+       n->rxq_id = ethtool_get_flow_spec_ring(fs->ring_cookie);
+       n->next_rxq_id = n->rxq_id;
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
+index 9e70f71..d118771 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
+@@ -347,6 +347,7 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
+       p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts;
+       p_common->mftag_filter_discards = stats.common.mftag_filter_discards;
+       p_common->mac_filter_discards = stats.common.mac_filter_discards;
++      p_common->gft_filter_drop = stats.common.gft_filter_drop;
+       p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes;
+       p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes;
+diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
+index 557e86e1..2978fa4 100644
+--- a/include/linux/qed/qed_eth_if.h
++++ b/include/linux/qed/qed_eth_if.h
+@@ -89,6 +89,9 @@ struct qed_ntuple_filter_params {
+       /* true iff this filter is to be added. Else to be removed */
+       bool b_is_add;
++
++      /* If flow needs to be dropped */
++      bool b_is_drop;
+ };
+ struct qed_dev_eth_info {
+diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
+index 44af652..ac991a3 100644
+--- a/include/linux/qed/qed_if.h
++++ b/include/linux/qed/qed_if.h
+@@ -1129,6 +1129,7 @@ struct qed_eth_stats_common {
+       u64     rx_bcast_pkts;
+       u64     mftag_filter_discards;
+       u64     mac_filter_discards;
++      u64     gft_filter_drop;
+       u64     tx_ucast_bytes;
+       u64     tx_mcast_bytes;
+       u64     tx_bcast_bytes;
+-- 
+2.9.5
+
diff --git a/linux-next-cherry-picks/0026-drivers-net-Fix-various-unnecessary-characters-after.patch b/linux-next-cherry-picks/0026-drivers-net-Fix-various-unnecessary-characters-after.patch
new file mode 100644 (file)
index 0000000..237e8aa
--- /dev/null
@@ -0,0 +1,84 @@
+From d602de8e7e7fc25fb3a2112ce4285962f15aa549 Mon Sep 17 00:00:00 2001
+From: Joe Perches <joe@perches.com>
+Date: Mon, 28 May 2018 19:51:57 -0700
+Subject: [PATCH 26/44] drivers/net: Fix various unnecessary characters after
+ logging newlines
+
+Remove and coalesce formats when there is an unnecessary
+character after a logging newline.  These extra characters
+cause logging defects.
+
+Miscellanea:
+
+o Coalesce formats
+
+Signed-off-by: Joe Perches <joe@perches.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c | 6 ++----
+ drivers/net/ethernet/qlogic/qed/qed_dev.c           | 2 +-
+ drivers/net/ethernet/qlogic/qlge/qlge_main.c        | 4 ++--
+ 3 files changed, 5 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
+index 6cec2a6..7503aa2 100644
+--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
++++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
+@@ -146,8 +146,7 @@ netxen_get_minidump_template(struct netxen_adapter *adapter)
+       if ((cmd.rsp.cmd == NX_RCODE_SUCCESS) && (size == cmd.rsp.arg2)) {
+               memcpy(adapter->mdump.md_template, addr, size);
+       } else {
+-              dev_err(&adapter->pdev->dev, "Failed to get minidump template, "
+-                      "err_code : %d, requested_size : %d, actual_size : %d\n ",
++              dev_err(&adapter->pdev->dev, "Failed to get minidump template, err_code : %d, requested_size : %d, actual_size : %d\n",
+                       cmd.rsp.cmd, size, cmd.rsp.arg2);
+       }
+       pci_free_consistent(adapter->pdev, size, addr, md_template_addr);
+@@ -180,8 +179,7 @@ netxen_setup_minidump(struct netxen_adapter *adapter)
+               if ((err == NX_RCODE_CMD_INVALID) ||
+                       (err == NX_RCODE_CMD_NOT_IMPL)) {
+                       dev_info(&adapter->pdev->dev,
+-                              "Flashed firmware version does not support minidump, "
+-                              "minimum version required is [ %u.%u.%u ].\n ",
++                              "Flashed firmware version does not support minidump, minimum version required is [ %u.%u.%u ]\n",
+                               NX_MD_SUPPORT_MAJOR, NX_MD_SUPPORT_MINOR,
+                               NX_MD_SUPPORT_SUBVERSION);
+               }
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
+index 5605289..fde20fd 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
+@@ -1098,7 +1098,7 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn,
+       }
+       DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+-                 "Sending final cleanup for PFVF[%d] [Command %08x\n]",
++                 "Sending final cleanup for PFVF[%d] [Command %08x]\n",
+                  id, command);
+       qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
+diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+index 8293c202..70de062 100644
+--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
++++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+@@ -2211,7 +2211,7 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
+       while (prod != rx_ring->cnsmr_idx) {
+               netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+-                           "cq_id = %d, prod = %d, cnsmr = %d.\n.",
++                           "cq_id = %d, prod = %d, cnsmr = %d\n",
+                            rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
+               net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
+@@ -2258,7 +2258,7 @@ static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
+       while (prod != rx_ring->cnsmr_idx) {
+               netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+-                           "cq_id = %d, prod = %d, cnsmr = %d.\n.",
++                           "cq_id = %d, prod = %d, cnsmr = %d\n",
+                            rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
+               net_rsp = rx_ring->curr_entry;
+-- 
+2.9.5
+
diff --git a/linux-next-cherry-picks/0027-RDMA-qedr-fix-spelling-mistake-adrresses-addresses.patch b/linux-next-cherry-picks/0027-RDMA-qedr-fix-spelling-mistake-adrresses-addresses.patch
new file mode 100644 (file)
index 0000000..b0a9d52
--- /dev/null
@@ -0,0 +1,30 @@
+From 367d2f0787e8363f30cbac4d5270a772b69828c1 Mon Sep 17 00:00:00 2001
+From: Colin Ian King <colin.king@canonical.com>
+Date: Wed, 30 May 2018 10:40:29 +0100
+Subject: [PATCH 27/44] RDMA/qedr: fix spelling mistake: "adrresses" ->
+ "addresses"
+
+Trivial fix to spelling mistake in DP_ERR error message
+
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/qedr/verbs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
+index 988aace..614a954 100644
+--- a/drivers/infiniband/hw/qedr/verbs.c
++++ b/drivers/infiniband/hw/qedr/verbs.c
+@@ -414,7 +414,7 @@ int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+       if ((vma->vm_start & (PAGE_SIZE - 1)) || (len & (PAGE_SIZE - 1))) {
+               DP_ERR(dev,
+-                     "failed mmap, adrresses must be page aligned: start=0x%pK, end=0x%pK\n",
++                     "failed mmap, addresses must be page aligned: start=0x%pK, end=0x%pK\n",
+                      (void *)vma->vm_start, (void *)vma->vm_end);
+               return -EINVAL;
+       }
+-- 
+2.9.5
+
diff --git a/linux-next-cherry-picks/0028-qed-Add-link-change-count-value-to-ethtool-statistic.patch b/linux-next-cherry-picks/0028-qed-Add-link-change-count-value-to-ethtool-statistic.patch
new file mode 100644 (file)
index 0000000..c10a250
--- /dev/null
@@ -0,0 +1,107 @@
+From 32d26a685c1802a0e485bd674e7dd038e88019f7 Mon Sep 17 00:00:00 2001
+From: Sudarsana Reddy Kalluru <sudarsana.kalluru@cavium.com>
+Date: Tue, 29 May 2018 02:31:24 -0700
+Subject: [PATCH 28/44] qed*: Add link change count value to ethtool statistics
+ display.
+
+This patch adds driver changes for capturing the link change count in
+ethtool statistics display.
+
+Please consider applying this to "net-next".
+
+Signed-off-by: Sudarsana Reddy Kalluru <Sudarsana.Kalluru@cavium.com>
+Signed-off-by: Ariel Elior <ariel.elior@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/qlogic/qed/qed_l2.c        | 12 ++++++++++--
+ drivers/net/ethernet/qlogic/qede/qede.h         |  1 +
+ drivers/net/ethernet/qlogic/qede/qede_ethtool.c |  2 ++
+ drivers/net/ethernet/qlogic/qede/qede_main.c    |  1 +
+ include/linux/qed/qed_if.h                      |  1 +
+ 5 files changed, 15 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
+index 1c0d0c2..eed4725 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
+@@ -1854,6 +1854,11 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
+               p_ah->tx_1519_to_max_byte_packets =
+                   port_stats.eth.u1.ah1.t1519_to_max;
+       }
++
++      p_common->link_change_count = qed_rd(p_hwfn, p_ptt,
++                                           p_hwfn->mcp_info->port_addr +
++                                           offsetof(struct public_port,
++                                                    link_change_count));
+ }
+ static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn,
+@@ -1961,11 +1966,14 @@ void qed_reset_vport_stats(struct qed_dev *cdev)
+       /* PORT statistics are not necessarily reset, so we need to
+        * read and create a baseline for future statistics.
++       * Link change stat is maintained by MFW, return its value as is.
+        */
+-      if (!cdev->reset_stats)
++      if (!cdev->reset_stats) {
+               DP_INFO(cdev, "Reset stats not allocated\n");
+-      else
++      } else {
+               _qed_get_vport_stats(cdev, cdev->reset_stats);
++              cdev->reset_stats->common.link_change_count = 0;
++      }
+ }
+ static enum gft_profile_type
+diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
+index 81c5c8df..d7ed0d3 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede.h
++++ b/drivers/net/ethernet/qlogic/qede/qede.h
+@@ -88,6 +88,7 @@ struct qede_stats_common {
+       u64 coalesced_aborts_num;
+       u64 non_coalesced_pkts;
+       u64 coalesced_bytes;
++      u64 link_change_count;
+       /* port */
+       u64 rx_64_byte_packets;
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+index 6906e04..f4a0f8f 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+@@ -171,6 +171,8 @@ static const struct {
+       QEDE_STAT(coalesced_aborts_num),
+       QEDE_STAT(non_coalesced_pkts),
+       QEDE_STAT(coalesced_bytes),
++
++      QEDE_STAT(link_change_count),
+ };
+ #define QEDE_NUM_STATS        ARRAY_SIZE(qede_stats_arr)
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
+index d118771..6a79604 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
+@@ -399,6 +399,7 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
+       p_common->brb_truncates = stats.common.brb_truncates;
+       p_common->brb_discards = stats.common.brb_discards;
+       p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames;
++      p_common->link_change_count = stats.common.link_change_count;
+       if (QEDE_IS_BB(edev)) {
+               struct qede_stats_bb *p_bb = &edev->stats.bb;
+diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
+index ac991a3..b404002 100644
+--- a/include/linux/qed/qed_if.h
++++ b/include/linux/qed/qed_if.h
+@@ -1180,6 +1180,7 @@ struct qed_eth_stats_common {
+       u64     tx_mac_mc_packets;
+       u64     tx_mac_bc_packets;
+       u64     tx_mac_ctrl_frames;
++      u64     link_change_count;
+ };
+ struct qed_eth_stats_bb {
+-- 
+2.9.5
+
diff --git a/linux-next-cherry-picks/0029-qed-Fix-shared-memory-inconsistency-between-driver-a.patch b/linux-next-cherry-picks/0029-qed-Fix-shared-memory-inconsistency-between-driver-a.patch
new file mode 100644 (file)
index 0000000..a93b83f
--- /dev/null
@@ -0,0 +1,33 @@
+From 5e9f20359a166de934b5bd02cb54c0be0e8c2890 Mon Sep 17 00:00:00 2001
+From: Sudarsana Reddy Kalluru <sudarsana.kalluru@cavium.com>
+Date: Thu, 31 May 2018 18:47:36 -0700
+Subject: [PATCH 29/44] qed: Fix shared memory inconsistency between driver and
+ the MFW.
+
+The structure shared between driver and management firmware (MFW)
+differ in sizes. The additional field defined by the MFW is not
+relevant to the current driver. Add a dummy field to the structure.
+
+Fixes: cac6f691 ("qed: Add support for Unified Fabric Port")
+Signed-off-by: Sudarsana Reddy Kalluru <Sudarsana.Kalluru@cavium.com>
+Signed-off-by: Ariel Elior <ariel.elior@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/qlogic/qed/qed_hsi.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+index 8e1e6e1..beba930 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+@@ -11996,6 +11996,7 @@ struct public_port {
+ #define EEE_REMOTE_TW_RX_MASK   0xffff0000
+ #define EEE_REMOTE_TW_RX_OFFSET 16
++      u32 reserved1;
+       u32 oem_cfg_port;
+ #define OEM_CFG_CHANNEL_TYPE_MASK                       0x00000003
+ #define OEM_CFG_CHANNEL_TYPE_OFFSET                     0
+-- 
+2.9.5
+
diff --git a/linux-next-cherry-picks/0030-qed-Fix-use-of-incorrect-shmem-address.patch b/linux-next-cherry-picks/0030-qed-Fix-use-of-incorrect-shmem-address.patch
new file mode 100644 (file)
index 0000000..97c9787
--- /dev/null
@@ -0,0 +1,37 @@
+From b5fabb080062e7685b898e9c0ec4d95f4d526ed2 Mon Sep 17 00:00:00 2001
+From: Sudarsana Reddy Kalluru <sudarsana.kalluru@cavium.com>
+Date: Thu, 31 May 2018 18:47:37 -0700
+Subject: [PATCH 30/44] qed: Fix use of incorrect shmem address.
+
+Incorrect shared memory address is used while deriving the values
+for tc and pri_type. Use shmem address corresponding to 'oem_cfg_func'
+where the management firmare saves tc/pri_type values.
+
+Fixes: cac6f691 ("qed: Add support for Unified Fabric Port")
+Signed-off-by: Sudarsana Reddy Kalluru <Sudarsana.Kalluru@cavium.com>
+Signed-off-by: Ariel Elior <ariel.elior@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/qlogic/qed/qed_mcp.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+index 2612e3e..6f9927d 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+@@ -1514,9 +1514,10 @@ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+       }
+       qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
+-      val = (port_cfg & OEM_CFG_FUNC_TC_MASK) >> OEM_CFG_FUNC_TC_OFFSET;
++      val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_TC_MASK) >>
++              OEM_CFG_FUNC_TC_OFFSET;
+       p_hwfn->ufp_info.tc = (u8)val;
+-      val = (port_cfg & OEM_CFG_FUNC_HOST_PRI_CTRL_MASK) >>
++      val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_HOST_PRI_CTRL_MASK) >>
+               OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET;
+       if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC) {
+               p_hwfn->ufp_info.pri_type = QED_UFP_PRI_VNIC;
+-- 
+2.9.5
+
diff --git a/linux-next-cherry-picks/0031-qed-Add-srq-core-support-for-RoCE-and-iWARP.patch b/linux-next-cherry-picks/0031-qed-Add-srq-core-support-for-RoCE-and-iWARP.patch
new file mode 100644 (file)
index 0000000..56d477e
--- /dev/null
@@ -0,0 +1,463 @@
+From 39dbc646fd2c67ee9b71450ce172cbd714d4e7fb Mon Sep 17 00:00:00 2001
+From: Yuval Bason <yuval.bason@cavium.com>
+Date: Sun, 3 Jun 2018 19:13:07 +0300
+Subject: [PATCH 31/44] qed: Add srq core support for RoCE and iWARP
+
+This patch adds support for configuring SRQ and provides the necessary
+APIs for rdma upper layer driver (qedr) to enable the SRQ feature.
+
+Signed-off-by: Michal Kalderon <michal.kalderon@cavium.com>
+Signed-off-by: Ariel Elior <ariel.elior@cavium.com>
+Signed-off-by: Yuval Bason <yuval.bason@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/qlogic/qed/qed_cxt.c   |   5 +-
+ drivers/net/ethernet/qlogic/qed/qed_cxt.h   |   1 +
+ drivers/net/ethernet/qlogic/qed/qed_hsi.h   |   2 +
+ drivers/net/ethernet/qlogic/qed/qed_iwarp.c |  23 ++++
+ drivers/net/ethernet/qlogic/qed/qed_main.c  |   2 +
+ drivers/net/ethernet/qlogic/qed/qed_rdma.c  | 178 +++++++++++++++++++++++++++-
+ drivers/net/ethernet/qlogic/qed/qed_rdma.h  |   2 +
+ drivers/net/ethernet/qlogic/qed/qed_roce.c  |  17 ++-
+ include/linux/qed/qed_rdma_if.h             |  12 +-
+ 9 files changed, 234 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+index 820b226..7ed6aa0 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+@@ -47,6 +47,7 @@
+ #include "qed_hsi.h"
+ #include "qed_hw.h"
+ #include "qed_init_ops.h"
++#include "qed_rdma.h"
+ #include "qed_reg_addr.h"
+ #include "qed_sriov.h"
+@@ -426,7 +427,7 @@ static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs)
+       p_mgr->srq_count = num_srqs;
+ }
+-static u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
++u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
+ {
+       struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
+@@ -2071,7 +2072,7 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
+       u32 num_cons, num_qps, num_srqs;
+       enum protocol_type proto;
+-      num_srqs = min_t(u32, 32 * 1024, p_params->num_srqs);
++      num_srqs = min_t(u32, QED_RDMA_MAX_SRQS, p_params->num_srqs);
+       if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) {
+               DP_NOTICE(p_hwfn,
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+index a4e9586..758a8b4 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+@@ -235,6 +235,7 @@ u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
+                               enum protocol_type type);
+ u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn,
+                               enum protocol_type type);
++u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn);
+ int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto);
+ #define QED_CTX_WORKING_MEM 0
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+index beba930..b9704be 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+@@ -9725,6 +9725,8 @@ enum iwarp_eqe_async_opcode {
+       IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED,
+       IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE,
+       IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW,
++      IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY,
++      IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT,
+       MAX_IWARP_EQE_ASYNC_OPCODE
+ };
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+index 2a2b101..474e6cf 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+@@ -271,6 +271,8 @@ int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn,
+       p_ramrod->sq_num_pages = qp->sq_num_pages;
+       p_ramrod->rq_num_pages = qp->rq_num_pages;
++      p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id);
++      p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid);
+       p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
+       p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
+@@ -3004,8 +3006,11 @@ static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
+                                union event_ring_data *data,
+                                u8 fw_return_code)
+ {
++      struct qed_rdma_events events = p_hwfn->p_rdma_info->events;
+       struct regpair *fw_handle = &data->rdma_data.async_handle;
+       struct qed_iwarp_ep *ep = NULL;
++      u16 srq_offset;
++      u16 srq_id;
+       u16 cid;
+       ep = (struct qed_iwarp_ep *)(uintptr_t)HILO_64(fw_handle->hi,
+@@ -3067,6 +3072,24 @@ static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
+               qed_iwarp_cid_cleaned(p_hwfn, cid);
+               break;
++      case IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY:
++              DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY\n");
++              srq_offset = p_hwfn->p_rdma_info->srq_id_offset;
++              /* FW assigns value that is no greater than u16 */
++              srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset;
++              events.affiliated_event(events.context,
++                                      QED_IWARP_EVENT_SRQ_EMPTY,
++                                      &srq_id);
++              break;
++      case IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT:
++              DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT\n");
++              srq_offset = p_hwfn->p_rdma_info->srq_id_offset;
++              /* FW assigns value that is no greater than u16 */
++              srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset;
++              events.affiliated_event(events.context,
++                                      QED_IWARP_EVENT_SRQ_LIMIT,
++                                      &srq_id);
++              break;
+       case IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW:
+               DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW\n");
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
+index 68c4399..b04d57c 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
+@@ -64,6 +64,7 @@
+ #define QED_ROCE_QPS                  (8192)
+ #define QED_ROCE_DPIS                 (8)
++#define QED_RDMA_SRQS                   QED_ROCE_QPS
+ static char version[] =
+       "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
+@@ -922,6 +923,7 @@ static void qed_update_pf_params(struct qed_dev *cdev,
+       if (IS_ENABLED(CONFIG_QED_RDMA)) {
+               params->rdma_pf_params.num_qps = QED_ROCE_QPS;
+               params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
++              params->rdma_pf_params.num_srqs = QED_RDMA_SRQS;
+               /* divide by 3 the MRs to avoid MF ILT overflow */
+               params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
+       }
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+index a411f9c..b870510 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+@@ -259,15 +259,29 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
+               goto free_cid_map;
+       }
++      /* Allocate bitmap for srqs */
++      p_rdma_info->num_srqs = qed_cxt_get_srq_count(p_hwfn);
++      rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->srq_map,
++                               p_rdma_info->num_srqs, "SRQ");
++      if (rc) {
++              DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
++                         "Failed to allocate srq bitmap, rc = %d\n", rc);
++              goto free_real_cid_map;
++      }
++
+       if (QED_IS_IWARP_PERSONALITY(p_hwfn))
+               rc = qed_iwarp_alloc(p_hwfn);
+       if (rc)
+-              goto free_cid_map;
++              goto free_srq_map;
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
+       return 0;
++free_srq_map:
++      kfree(p_rdma_info->srq_map.bitmap);
++free_real_cid_map:
++      kfree(p_rdma_info->real_cid_map.bitmap);
+ free_cid_map:
+       kfree(p_rdma_info->cid_map.bitmap);
+ free_tid_map:
+@@ -351,6 +365,8 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
+       qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1);
+       qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0);
+       qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1);
++      qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->srq_map, 1);
++      qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, 1);
+       kfree(p_rdma_info->port);
+       kfree(p_rdma_info->dev);
+@@ -431,6 +447,12 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
+       if (cdev->rdma_max_sge)
+               dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge);
++      dev->max_srq_sge = QED_RDMA_MAX_SGE_PER_SRQ_WQE;
++      if (p_hwfn->cdev->rdma_max_srq_sge) {
++              dev->max_srq_sge = min_t(u32,
++                                       p_hwfn->cdev->rdma_max_srq_sge,
++                                       dev->max_srq_sge);
++      }
+       dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE;
+       dev->max_inline = (cdev->rdma_max_inline) ?
+@@ -474,6 +496,8 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
+       dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE;
+       dev->max_pkey = QED_RDMA_MAX_P_KEY;
++      dev->max_srq = p_hwfn->p_rdma_info->num_srqs;
++      dev->max_srq_wr = QED_RDMA_MAX_SRQ_WQE_ELEM;
+       dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
+                                         (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2);
+       dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
+@@ -1628,6 +1652,155 @@ static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
+       return QED_LEADING_HWFN(cdev);
+ }
++static int qed_rdma_modify_srq(void *rdma_cxt,
++                             struct qed_rdma_modify_srq_in_params *in_params)
++{
++      struct rdma_srq_modify_ramrod_data *p_ramrod;
++      struct qed_sp_init_data init_data = {};
++      struct qed_hwfn *p_hwfn = rdma_cxt;
++      struct qed_spq_entry *p_ent;
++      u16 opaque_fid;
++      int rc;
++
++      init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
++      init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
++
++      rc = qed_sp_init_request(p_hwfn, &p_ent,
++                               RDMA_RAMROD_MODIFY_SRQ,
++                               p_hwfn->p_rdma_info->proto, &init_data);
++      if (rc)
++              return rc;
++
++      p_ramrod = &p_ent->ramrod.rdma_modify_srq;
++      p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id);
++      opaque_fid = p_hwfn->hw_info.opaque_fid;
++      p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid);
++      p_ramrod->wqe_limit = cpu_to_le32(in_params->wqe_limit);
++
++      rc = qed_spq_post(p_hwfn, p_ent, NULL);
++      if (rc)
++              return rc;
++
++      DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "modified SRQ id = %x",
++                 in_params->srq_id);
++
++      return rc;
++}
++
++static int
++qed_rdma_destroy_srq(void *rdma_cxt,
++                   struct qed_rdma_destroy_srq_in_params *in_params)
++{
++      struct rdma_srq_destroy_ramrod_data *p_ramrod;
++      struct qed_sp_init_data init_data = {};
++      struct qed_hwfn *p_hwfn = rdma_cxt;
++      struct qed_spq_entry *p_ent;
++      struct qed_bmap *bmap;
++      u16 opaque_fid;
++      int rc;
++
++      opaque_fid = p_hwfn->hw_info.opaque_fid;
++
++      init_data.opaque_fid = opaque_fid;
++      init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
++
++      rc = qed_sp_init_request(p_hwfn, &p_ent,
++                               RDMA_RAMROD_DESTROY_SRQ,
++                               p_hwfn->p_rdma_info->proto, &init_data);
++      if (rc)
++              return rc;
++
++      p_ramrod = &p_ent->ramrod.rdma_destroy_srq;
++      p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id);
++      p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid);
++
++      rc = qed_spq_post(p_hwfn, p_ent, NULL);
++      if (rc)
++              return rc;
++
++      bmap = &p_hwfn->p_rdma_info->srq_map;
++
++      spin_lock_bh(&p_hwfn->p_rdma_info->lock);
++      qed_bmap_release_id(p_hwfn, bmap, in_params->srq_id);
++      spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
++
++      DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "SRQ destroyed Id = %x",
++                 in_params->srq_id);
++
++      return rc;
++}
++
++static int
++qed_rdma_create_srq(void *rdma_cxt,
++                  struct qed_rdma_create_srq_in_params *in_params,
++                  struct qed_rdma_create_srq_out_params *out_params)
++{
++      struct rdma_srq_create_ramrod_data *p_ramrod;
++      struct qed_sp_init_data init_data = {};
++      struct qed_hwfn *p_hwfn = rdma_cxt;
++      enum qed_cxt_elem_type elem_type;
++      struct qed_spq_entry *p_ent;
++      u16 opaque_fid, srq_id;
++      struct qed_bmap *bmap;
++      u32 returned_id;
++      int rc;
++
++      bmap = &p_hwfn->p_rdma_info->srq_map;
++      spin_lock_bh(&p_hwfn->p_rdma_info->lock);
++      rc = qed_rdma_bmap_alloc_id(p_hwfn, bmap, &returned_id);
++      spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
++
++      if (rc) {
++              DP_NOTICE(p_hwfn, "failed to allocate srq id\n");
++              return rc;
++      }
++
++      elem_type = QED_ELEM_SRQ;
++      rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, elem_type, returned_id);
++      if (rc)
++              goto err;
++      /* returned id is no greater than u16 */
++      srq_id = (u16)returned_id;
++      opaque_fid = p_hwfn->hw_info.opaque_fid;
++
++      opaque_fid = p_hwfn->hw_info.opaque_fid;
++      init_data.opaque_fid = opaque_fid;
++      init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
++
++      rc = qed_sp_init_request(p_hwfn, &p_ent,
++                               RDMA_RAMROD_CREATE_SRQ,
++                               p_hwfn->p_rdma_info->proto, &init_data);
++      if (rc)
++              goto err;
++
++      p_ramrod = &p_ent->ramrod.rdma_create_srq;
++      DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, in_params->pbl_base_addr);
++      p_ramrod->pages_in_srq_pbl = cpu_to_le16(in_params->num_pages);
++      p_ramrod->pd_id = cpu_to_le16(in_params->pd_id);
++      p_ramrod->srq_id.srq_idx = cpu_to_le16(srq_id);
++      p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid);
++      p_ramrod->page_size = cpu_to_le16(in_params->page_size);
++      DMA_REGPAIR_LE(p_ramrod->producers_addr, in_params->prod_pair_addr);
++
++      rc = qed_spq_post(p_hwfn, p_ent, NULL);
++      if (rc)
++              goto err;
++
++      out_params->srq_id = srq_id;
++
++      DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
++                 "SRQ created Id = %x\n", out_params->srq_id);
++
++      return rc;
++
++err:
++      spin_lock_bh(&p_hwfn->p_rdma_info->lock);
++      qed_bmap_release_id(p_hwfn, bmap, returned_id);
++      spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
++
++      return rc;
++}
++
+ bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn)
+ {
+       bool result;
+@@ -1773,6 +1946,9 @@ static const struct qed_rdma_ops qed_rdma_ops_pass = {
+       .rdma_free_tid = &qed_rdma_free_tid,
+       .rdma_register_tid = &qed_rdma_register_tid,
+       .rdma_deregister_tid = &qed_rdma_deregister_tid,
++      .rdma_create_srq = &qed_rdma_create_srq,
++      .rdma_modify_srq = &qed_rdma_modify_srq,
++      .rdma_destroy_srq = &qed_rdma_destroy_srq,
+       .ll2_acquire_connection = &qed_ll2_acquire_connection,
+       .ll2_establish_connection = &qed_ll2_establish_connection,
+       .ll2_terminate_connection = &qed_ll2_terminate_connection,
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h
+index 18ec9cb..6f722ee 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h
+@@ -96,6 +96,8 @@ struct qed_rdma_info {
+       u8 num_cnqs;
+       u32 num_qps;
+       u32 num_mrs;
++      u32 num_srqs;
++      u16 srq_id_offset;
+       u16 queue_zone_base;
+       u16 max_queue_zones;
+       enum protocol_type proto;
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
+index 6acfd43..ee57fcd 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
+@@ -65,6 +65,8 @@ qed_roce_async_event(struct qed_hwfn *p_hwfn,
+                    u8 fw_event_code,
+                    u16 echo, union event_ring_data *data, u8 fw_return_code)
+ {
++      struct qed_rdma_events events = p_hwfn->p_rdma_info->events;
++
+       if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) {
+               u16 icid =
+                   (u16)le32_to_cpu(data->rdma_data.rdma_destroy_qp_data.cid);
+@@ -75,11 +77,18 @@ qed_roce_async_event(struct qed_hwfn *p_hwfn,
+                */
+               qed_roce_free_real_icid(p_hwfn, icid);
+       } else {
+-              struct qed_rdma_events *events = &p_hwfn->p_rdma_info->events;
++              if (fw_event_code == ROCE_ASYNC_EVENT_SRQ_EMPTY ||
++                  fw_event_code == ROCE_ASYNC_EVENT_SRQ_LIMIT) {
++                      u16 srq_id = (u16)data->rdma_data.async_handle.lo;
++
++                      events.affiliated_event(events.context, fw_event_code,
++                                              &srq_id);
++              } else {
++                      union rdma_eqe_data rdata = data->rdma_data;
+-              events->affiliated_event(p_hwfn->p_rdma_info->events.context,
+-                                       fw_event_code,
+-                                   (void *)&data->rdma_data.async_handle);
++                      events.affiliated_event(events.context, fw_event_code,
++                                              (void *)&rdata.async_handle);
++              }
+       }
+       return 0;
+diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h
+index 4dd72ba..e05e320 100644
+--- a/include/linux/qed/qed_rdma_if.h
++++ b/include/linux/qed/qed_rdma_if.h
+@@ -485,7 +485,9 @@ enum qed_iwarp_event_type {
+       QED_IWARP_EVENT_ACTIVE_MPA_REPLY,
+       QED_IWARP_EVENT_LOCAL_ACCESS_ERROR,
+       QED_IWARP_EVENT_REMOTE_OPERATION_ERROR,
+-      QED_IWARP_EVENT_TERMINATE_RECEIVED
++      QED_IWARP_EVENT_TERMINATE_RECEIVED,
++      QED_IWARP_EVENT_SRQ_LIMIT,
++      QED_IWARP_EVENT_SRQ_EMPTY,
+ };
+ enum qed_tcp_ip_version {
+@@ -646,6 +648,14 @@ struct qed_rdma_ops {
+       int (*rdma_alloc_tid)(void *rdma_cxt, u32 *itid);
+       void (*rdma_free_tid)(void *rdma_cxt, u32 itid);
++      int (*rdma_create_srq)(void *rdma_cxt,
++                             struct qed_rdma_create_srq_in_params *iparams,
++                             struct qed_rdma_create_srq_out_params *oparams);
++      int (*rdma_destroy_srq)(void *rdma_cxt,
++                              struct qed_rdma_destroy_srq_in_params *iparams);
++      int (*rdma_modify_srq)(void *rdma_cxt,
++                             struct qed_rdma_modify_srq_in_params *iparams);
++
+       int (*ll2_acquire_connection)(void *rdma_cxt,
+                                     struct qed_ll2_acquire_data *data);
+-- 
+2.9.5
+
diff --git a/linux-next-cherry-picks/0032-qed-use-dma_zalloc_coherent-instead-of-allocator-mem.patch b/linux-next-cherry-picks/0032-qed-use-dma_zalloc_coherent-instead-of-allocator-mem.patch
new file mode 100644 (file)
index 0000000..d6d880f
--- /dev/null
@@ -0,0 +1,41 @@
+From ff2e351e1928b5b81a23d78e3e4effc24db007b9 Mon Sep 17 00:00:00 2001
+From: YueHaibing <yuehaibing@huawei.com>
+Date: Mon, 4 Jun 2018 21:10:31 +0800
+Subject: [PATCH 32/44] qed: use dma_zalloc_coherent instead of
+ allocator/memset
+
+Use dma_zalloc_coherent instead of dma_alloc_coherent
+followed by memset 0.
+
+Signed-off-by: YueHaibing <yuehaibing@huawei.com>
+Acked-by: Tomer Tayar <Tomer.Tayar@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/qlogic/qed/qed_cxt.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+index 7ed6aa0..b5b5ff7 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+@@ -937,14 +937,13 @@ static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
+               u32 size = min_t(u32, total_size, psz);
+               void **p_virt = &p_mngr->t2[i].p_virt;
+-              *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+-                                           size,
+-                                           &p_mngr->t2[i].p_phys, GFP_KERNEL);
++              *p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev,
++                                            size, &p_mngr->t2[i].p_phys,
++                                            GFP_KERNEL);
+               if (!p_mngr->t2[i].p_virt) {
+                       rc = -ENOMEM;
+                       goto t2_fail;
+               }
+-              memset(*p_virt, 0, size);
+               p_mngr->t2[i].size = size;
+               total_size -= size;
+       }
+-- 
+2.9.5
+
diff --git a/linux-next-cherry-picks/0033-qed-Utilize-FW-8.37.2.0.patch b/linux-next-cherry-picks/0033-qed-Utilize-FW-8.37.2.0.patch
new file mode 100644 (file)
index 0000000..f73e26a
--- /dev/null
@@ -0,0 +1,2360 @@
+From d52c89f120de849575f6b2e5948038f2be12ce6f Mon Sep 17 00:00:00 2001
+From: Michal Kalderon <Michal.Kalderon@cavium.com>
+Date: Tue, 5 Jun 2018 13:11:16 +0300
+Subject: [PATCH 33/44] qed*: Utilize FW 8.37.2.0
+
+This FW contains several fixes and features.
+
+RDMA
+- Several modifications and fixes for Memory Windows
+- drop vlan and tcp timestamp from mss calculation in driver for
+  this FW
+- Fix SQ completion flow when local ack timeout is infinite
+- Modifications in t10dif support
+
+ETH
+- Fix aRFS for tunneled traffic without inner IP.
+- Fix chip configuration which may fail under heavy traffic conditions.
+- Support receiving any-VNI in VXLAN and GENEVE RX classification.
+
+iSCSI / FcoE
+- Fix iSCSI recovery flow
+- Drop vlan and tcp timestamp from mss calc for fw 8.37.2.0
+
+Misc
+- Several registers (split registers) won't read correctly with
+  ethtool -d
+
+Signed-off-by: Ariel Elior <Ariel.Elior@cavium.com>
+Signed-off-by: Manish Rangankar <manish.rangankar@cavium.com>
+Signed-off-by: Michal Kalderon <Michal.Kalderon@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/infiniband/hw/qedr/qedr_hsi_rdma.h         | 139 +++---
+ drivers/infiniband/hw/qedr/verbs.c                 |   4 +-
+ drivers/net/ethernet/qlogic/qed/qed_debug.c        | 492 ++++++++++++---------
+ drivers/net/ethernet/qlogic/qed/qed_dev.c          |   2 +-
+ drivers/net/ethernet/qlogic/qed/qed_hsi.h          | 462 ++++++++++++-------
+ drivers/net/ethernet/qlogic/qed/qed_hw.c           |  20 +
+ drivers/net/ethernet/qlogic/qed/qed_hw.h           |  12 +
+ .../net/ethernet/qlogic/qed/qed_init_fw_funcs.c    |  50 ++-
+ drivers/net/ethernet/qlogic/qed/qed_iwarp.c        |  13 +-
+ drivers/net/ethernet/qlogic/qed/qed_l2.c           |   3 +
+ drivers/net/ethernet/qlogic/qed/qed_l2.h           |   1 +
+ drivers/net/ethernet/qlogic/qed/qed_rdma.c         |   8 +-
+ drivers/net/ethernet/qlogic/qed/qed_reg_addr.h     |   3 +-
+ drivers/net/ethernet/qlogic/qed/qed_roce.c         |  31 +-
+ include/linux/qed/common_hsi.h                     |   4 +-
+ include/linux/qed/iscsi_common.h                   |   8 +-
+ include/linux/qed/qed_rdma_if.h                    |   4 +-
+ include/linux/qed/roce_common.h                    |   1 +
+ 18 files changed, 727 insertions(+), 530 deletions(-)
+
+diff --git a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
+index b816c80..7e1f702 100644
+--- a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
++++ b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
+@@ -116,6 +116,7 @@ enum rdma_cqe_requester_status_enum {
+       RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR,
+       RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR,
+       RDMA_CQE_REQ_STS_XRC_VOILATION_ERR,
++      RDMA_CQE_REQ_STS_SIG_ERR,
+       MAX_RDMA_CQE_REQUESTER_STATUS_ENUM
+ };
+@@ -152,12 +153,12 @@ struct rdma_rq_sge {
+       struct regpair addr;
+       __le32 length;
+       __le32 flags;
+-#define RDMA_RQ_SGE_L_KEY_MASK      0x3FFFFFF
+-#define RDMA_RQ_SGE_L_KEY_SHIFT     0
++#define RDMA_RQ_SGE_L_KEY_LO_MASK   0x3FFFFFF
++#define RDMA_RQ_SGE_L_KEY_LO_SHIFT  0
+ #define RDMA_RQ_SGE_NUM_SGES_MASK   0x7
+ #define RDMA_RQ_SGE_NUM_SGES_SHIFT  26
+-#define RDMA_RQ_SGE_RESERVED0_MASK  0x7
+-#define RDMA_RQ_SGE_RESERVED0_SHIFT 29
++#define RDMA_RQ_SGE_L_KEY_HI_MASK   0x7
++#define RDMA_RQ_SGE_L_KEY_HI_SHIFT  29
+ };
+ struct rdma_srq_sge {
+@@ -241,18 +242,39 @@ enum rdma_dif_io_direction_flg {
+       MAX_RDMA_DIF_IO_DIRECTION_FLG
+ };
+-/* RDMA DIF Runt Result Structure */
+-struct rdma_dif_runt_result {
+-      __le16 guard_tag;
+-      __le16 reserved[3];
++struct rdma_dif_params {
++      __le32 base_ref_tag;
++      __le16 app_tag;
++      __le16 app_tag_mask;
++      __le16 runt_crc_value;
++      __le16 flags;
++#define RDMA_DIF_PARAMS_IO_DIRECTION_FLG_MASK    0x1
++#define RDMA_DIF_PARAMS_IO_DIRECTION_FLG_SHIFT   0
++#define RDMA_DIF_PARAMS_BLOCK_SIZE_MASK          0x1
++#define RDMA_DIF_PARAMS_BLOCK_SIZE_SHIFT         1
++#define RDMA_DIF_PARAMS_RUNT_VALID_FLG_MASK      0x1
++#define RDMA_DIF_PARAMS_RUNT_VALID_FLG_SHIFT     2
++#define RDMA_DIF_PARAMS_VALIDATE_CRC_GUARD_MASK  0x1
++#define RDMA_DIF_PARAMS_VALIDATE_CRC_GUARD_SHIFT 3
++#define RDMA_DIF_PARAMS_VALIDATE_REF_TAG_MASK    0x1
++#define RDMA_DIF_PARAMS_VALIDATE_REF_TAG_SHIFT   4
++#define RDMA_DIF_PARAMS_VALIDATE_APP_TAG_MASK    0x1
++#define RDMA_DIF_PARAMS_VALIDATE_APP_TAG_SHIFT   5
++#define RDMA_DIF_PARAMS_CRC_SEED_MASK            0x1
++#define RDMA_DIF_PARAMS_CRC_SEED_SHIFT           6
++#define RDMA_DIF_PARAMS_RX_REF_TAG_CONST_MASK    0x1
++#define RDMA_DIF_PARAMS_RX_REF_TAG_CONST_SHIFT   7
++#define RDMA_DIF_PARAMS_BLOCK_GUARD_TYPE_MASK    0x1
++#define RDMA_DIF_PARAMS_BLOCK_GUARD_TYPE_SHIFT   8
++#define RDMA_DIF_PARAMS_APP_ESCAPE_MASK          0x1
++#define RDMA_DIF_PARAMS_APP_ESCAPE_SHIFT         9
++#define RDMA_DIF_PARAMS_REF_ESCAPE_MASK          0x1
++#define RDMA_DIF_PARAMS_REF_ESCAPE_SHIFT         10
++#define RDMA_DIF_PARAMS_RESERVED4_MASK           0x1F
++#define RDMA_DIF_PARAMS_RESERVED4_SHIFT          11
++      __le32 reserved5;
+ };
+-/* Memory window type enumeration */
+-enum rdma_mw_type {
+-      RDMA_MW_TYPE_1,
+-      RDMA_MW_TYPE_2A,
+-      MAX_RDMA_MW_TYPE
+-};
+ struct rdma_sq_atomic_wqe {
+       __le32 reserved1;
+@@ -334,17 +356,17 @@ struct rdma_sq_bind_wqe {
+ #define RDMA_SQ_BIND_WQE_SE_FLG_SHIFT        3
+ #define RDMA_SQ_BIND_WQE_INLINE_FLG_MASK     0x1
+ #define RDMA_SQ_BIND_WQE_INLINE_FLG_SHIFT    4
+-#define RDMA_SQ_BIND_WQE_RESERVED0_MASK      0x7
+-#define RDMA_SQ_BIND_WQE_RESERVED0_SHIFT     5
++#define RDMA_SQ_BIND_WQE_DIF_ON_HOST_FLG_MASK  0x1
++#define RDMA_SQ_BIND_WQE_DIF_ON_HOST_FLG_SHIFT 5
++#define RDMA_SQ_BIND_WQE_RESERVED0_MASK      0x3
++#define RDMA_SQ_BIND_WQE_RESERVED0_SHIFT     6
+       u8 wqe_size;
+       u8 prev_wqe_size;
+       u8 bind_ctrl;
+ #define RDMA_SQ_BIND_WQE_ZERO_BASED_MASK     0x1
+ #define RDMA_SQ_BIND_WQE_ZERO_BASED_SHIFT    0
+-#define RDMA_SQ_BIND_WQE_MW_TYPE_MASK        0x1
+-#define RDMA_SQ_BIND_WQE_MW_TYPE_SHIFT       1
+-#define RDMA_SQ_BIND_WQE_RESERVED1_MASK      0x3F
+-#define RDMA_SQ_BIND_WQE_RESERVED1_SHIFT     2
++#define RDMA_SQ_BIND_WQE_RESERVED1_MASK        0x7F
++#define RDMA_SQ_BIND_WQE_RESERVED1_SHIFT       1
+       u8 access_ctrl;
+ #define RDMA_SQ_BIND_WQE_REMOTE_READ_MASK    0x1
+ #define RDMA_SQ_BIND_WQE_REMOTE_READ_SHIFT   0
+@@ -363,6 +385,7 @@ struct rdma_sq_bind_wqe {
+       __le32 length_lo;
+       __le32 parent_l_key;
+       __le32 reserved4;
++      struct rdma_dif_params dif_params;
+ };
+ /* First element (16 bytes) of bind wqe */
+@@ -392,10 +415,8 @@ struct rdma_sq_bind_wqe_2nd {
+       u8 bind_ctrl;
+ #define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_MASK     0x1
+ #define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_SHIFT    0
+-#define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_MASK        0x1
+-#define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_SHIFT       1
+-#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_MASK      0x3F
+-#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_SHIFT     2
++#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_MASK      0x7F
++#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_SHIFT     1
+       u8 access_ctrl;
+ #define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_MASK    0x1
+ #define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_SHIFT   0
+@@ -416,6 +437,11 @@ struct rdma_sq_bind_wqe_2nd {
+       __le32 reserved4;
+ };
++/* Third element (16 bytes) of bind wqe */
++struct rdma_sq_bind_wqe_3rd {
++      struct rdma_dif_params dif_params;
++};
++
+ /* Structure with only the SQ WQE common
+  * fields. Size is of one SQ element (16B)
+  */
+@@ -486,30 +512,6 @@ struct rdma_sq_fmr_wqe {
+       u8 length_hi;
+       __le32 length_lo;
+       struct regpair pbl_addr;
+-      __le32 dif_base_ref_tag;
+-      __le16 dif_app_tag;
+-      __le16 dif_app_tag_mask;
+-      __le16 dif_runt_crc_value;
+-      __le16 dif_flags;
+-#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK     0x1
+-#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT    0
+-#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK           0x1
+-#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT          1
+-#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK               0x1
+-#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT      2
+-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK   0x1
+-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT  3
+-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK     0x1
+-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT    4
+-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK     0x1
+-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT    5
+-#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK             0x1
+-#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT            6
+-#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_MASK     0x1
+-#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_SHIFT    7
+-#define RDMA_SQ_FMR_WQE_RESERVED4_MASK                        0xFF
+-#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT                       8
+-      __le32 reserved5;
+ };
+ /* First element (16 bytes) of fmr wqe */
+@@ -566,33 +568,6 @@ struct rdma_sq_fmr_wqe_2nd {
+       struct regpair pbl_addr;
+ };
+-/* Third element (16 bytes) of fmr wqe */
+-struct rdma_sq_fmr_wqe_3rd {
+-      __le32 dif_base_ref_tag;
+-      __le16 dif_app_tag;
+-      __le16 dif_app_tag_mask;
+-      __le16 dif_runt_crc_value;
+-      __le16 dif_flags;
+-#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK         0x1
+-#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT                0
+-#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK                       0x1
+-#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT              1
+-#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK           0x1
+-#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT          2
+-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK               0x1
+-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT      3
+-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK         0x1
+-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT                4
+-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK         0x1
+-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT                5
+-#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK                 0x1
+-#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT                        6
+-#define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_MASK         0x1
+-#define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_SHIFT                7
+-#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK                    0xFF
+-#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT                               8
+-      __le32 reserved5;
+-};
+ struct rdma_sq_local_inv_wqe {
+       struct regpair reserved;
+@@ -637,8 +612,8 @@ struct rdma_sq_rdma_wqe {
+ #define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT        5
+ #define RDMA_SQ_RDMA_WQE_READ_INV_FLG_MASK    0x1
+ #define RDMA_SQ_RDMA_WQE_READ_INV_FLG_SHIFT   6
+-#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK               0x1
+-#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT      7
++#define RDMA_SQ_RDMA_WQE_RESERVED1_MASK        0x1
++#define RDMA_SQ_RDMA_WQE_RESERVED1_SHIFT       7
+       u8 wqe_size;
+       u8 prev_wqe_size;
+       struct regpair remote_va;
+@@ -646,13 +621,9 @@ struct rdma_sq_rdma_wqe {
+       u8 dif_flags;
+ #define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_MASK            0x1
+ #define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_SHIFT           0
+-#define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_MASK  0x1
+-#define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_SHIFT 1
+-#define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_MASK   0x1
+-#define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_SHIFT  2
+-#define RDMA_SQ_RDMA_WQE_RESERVED1_MASK                 0x1F
+-#define RDMA_SQ_RDMA_WQE_RESERVED1_SHIFT                3
+-      u8 reserved2[3];
++#define RDMA_SQ_RDMA_WQE_RESERVED2_MASK        0x7F
++#define RDMA_SQ_RDMA_WQE_RESERVED2_SHIFT       1
++      u8 reserved3[3];
+ };
+ /* First element (16 bytes) of rdma wqe */
+diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
+index 3f9afc0..e2caabb 100644
+--- a/drivers/infiniband/hw/qedr/verbs.c
++++ b/drivers/infiniband/hw/qedr/verbs.c
+@@ -3276,7 +3276,7 @@ int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+                               SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
+                                         wr->num_sge);
+-                      SET_FIELD(flags, RDMA_RQ_SGE_L_KEY,
++                      SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO,
+                                 wr->sg_list[i].lkey);
+                       RQ_SGE_SET(rqe, wr->sg_list[i].addr,
+@@ -3295,7 +3295,7 @@ int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+                       /* First one must include the number
+                        * of SGE in the list
+                        */
+-                      SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0);
++                      SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO, 0);
+                       SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
+                       RQ_SGE_SET(rqe, 0, 0, flags);
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
+index 39124b5..b9ec460 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
+@@ -183,16 +183,9 @@ enum platform_ids {
+       MAX_PLATFORM_IDS
+ };
+-struct chip_platform_defs {
+-      u8 num_ports;
+-      u8 num_pfs;
+-      u8 num_vfs;
+-};
+-
+ /* Chip constant definitions */
+ struct chip_defs {
+       const char *name;
+-      struct chip_platform_defs per_platform[MAX_PLATFORM_IDS];
+ };
+ /* Platform constant definitions */
+@@ -317,6 +310,11 @@ struct phy_defs {
+       u32 tbus_data_hi_addr;
+ };
++/* Split type definitions */
++struct split_type_defs {
++      const char *name;
++};
++
+ /******************************** Constants **********************************/
+ #define MAX_LCIDS                     320
+@@ -469,21 +467,9 @@ static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
+ /* Chip constant definitions array */
+ static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
+-      { "bb",
+-        {{MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB},
+-         {0, 0, 0},
+-         {0, 0, 0},
+-         {0, 0, 0} } },
+-      { "ah",
+-        {{MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2},
+-         {0, 0, 0},
+-         {0, 0, 0},
+-         {0, 0, 0} } },
+-      { "reserved",
+-         {{0, 0, 0},
+-         {0, 0, 0},
+-         {0, 0, 0},
+-         {0, 0, 0} } }
++      {"bb"},
++      {"ah"},
++      {"reserved"},
+ };
+ /* Storm constant definitions array */
+@@ -1588,7 +1574,7 @@ static struct grc_param_defs s_grc_param_defs[] = {
+       {{0, 0, 0}, 0, 1, false, false, 0, 1},
+       /* DBG_GRC_PARAM_DUMP_BMB */
+-      {{0, 0, 0}, 0, 1, false, false, 0, 1},
++      {{0, 0, 0}, 0, 1, false, false, 0, 0},
+       /* DBG_GRC_PARAM_DUMP_NIG */
+       {{1, 1, 1}, 0, 1, false, false, 0, 1},
+@@ -1745,6 +1731,23 @@ static struct phy_defs s_phy_defs[] = {
+        PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
+ };
++static struct split_type_defs s_split_type_defs[] = {
++      /* SPLIT_TYPE_NONE */
++      {"eng"},
++
++      /* SPLIT_TYPE_PORT */
++      {"port"},
++
++      /* SPLIT_TYPE_PF */
++      {"pf"},
++
++      /* SPLIT_TYPE_PORT_PF */
++      {"port"},
++
++      /* SPLIT_TYPE_VF */
++      {"vf"}
++};
++
+ /**************************** Private Functions ******************************/
+ /* Reads and returns a single dword from the specified unaligned buffer */
+@@ -1781,28 +1784,68 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
+                                       struct qed_ptt *p_ptt)
+ {
+       struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
++      u8 num_pfs = 0, max_pfs_per_port = 0;
+       if (dev_data->initialized)
+               return DBG_STATUS_OK;
++      /* Set chip */
+       if (QED_IS_K2(p_hwfn->cdev)) {
+               dev_data->chip_id = CHIP_K2;
+               dev_data->mode_enable[MODE_K2] = 1;
++              dev_data->num_vfs = MAX_NUM_VFS_K2;
++              num_pfs = MAX_NUM_PFS_K2;
++              max_pfs_per_port = MAX_NUM_PFS_K2 / 2;
+       } else if (QED_IS_BB_B0(p_hwfn->cdev)) {
+               dev_data->chip_id = CHIP_BB;
+               dev_data->mode_enable[MODE_BB] = 1;
++              dev_data->num_vfs = MAX_NUM_VFS_BB;
++              num_pfs = MAX_NUM_PFS_BB;
++              max_pfs_per_port = MAX_NUM_PFS_BB;
+       } else {
+               return DBG_STATUS_UNKNOWN_CHIP;
+       }
++      /* Set platofrm */
+       dev_data->platform_id = PLATFORM_ASIC;
+       dev_data->mode_enable[MODE_ASIC] = 1;
++      /* Set port mode */
++      switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
++      case 0:
++              dev_data->mode_enable[MODE_PORTS_PER_ENG_1] = 1;
++              break;
++      case 1:
++              dev_data->mode_enable[MODE_PORTS_PER_ENG_2] = 1;
++              break;
++      case 2:
++              dev_data->mode_enable[MODE_PORTS_PER_ENG_4] = 1;
++              break;
++      }
++
++      /* Set 100G mode */
++      if (dev_data->chip_id == CHIP_BB &&
++          qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB) == 2)
++              dev_data->mode_enable[MODE_100G] = 1;
++
++      /* Set number of ports */
++      if (dev_data->mode_enable[MODE_PORTS_PER_ENG_1] ||
++          dev_data->mode_enable[MODE_100G])
++              dev_data->num_ports = 1;
++      else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_2])
++              dev_data->num_ports = 2;
++      else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_4])
++              dev_data->num_ports = 4;
++
++      /* Set number of PFs per port */
++      dev_data->num_pfs_per_port = min_t(u32,
++                                         num_pfs / dev_data->num_ports,
++                                         max_pfs_per_port);
++
+       /* Initializes the GRC parameters */
+       qed_dbg_grc_init_params(p_hwfn);
+       dev_data->use_dmae = true;
+-      dev_data->num_regs_read = 0;
+       dev_data->initialized = 1;
+       return DBG_STATUS_OK;
+@@ -1821,9 +1864,9 @@ static struct dbg_bus_block *get_dbg_bus_block_desc(struct qed_hwfn *p_hwfn,
+ /* Reads the FW info structure for the specified Storm from the chip,
+  * and writes it to the specified fw_info pointer.
+  */
+-static void qed_read_fw_info(struct qed_hwfn *p_hwfn,
+-                           struct qed_ptt *p_ptt,
+-                           u8 storm_id, struct fw_info *fw_info)
++static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn,
++                                 struct qed_ptt *p_ptt,
++                                 u8 storm_id, struct fw_info *fw_info)
+ {
+       struct storm_defs *storm = &s_storm_defs[storm_id];
+       struct fw_info_location fw_info_location;
+@@ -1945,45 +1988,29 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
+                                struct qed_ptt *p_ptt,
+                                u32 *dump_buf, bool dump)
+ {
+-      struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+       char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
+       char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
+       struct fw_info fw_info = { {0}, {0} };
+       u32 offset = 0;
+       if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
+-              /* Read FW image/version from PRAM in a non-reset SEMI */
+-              bool found = false;
+-              u8 storm_id;
+-
+-              for (storm_id = 0; storm_id < MAX_DBG_STORMS && !found;
+-                   storm_id++) {
+-                      struct storm_defs *storm = &s_storm_defs[storm_id];
+-
+-                      /* Read FW version/image */
+-                      if (dev_data->block_in_reset[storm->block_id])
+-                              continue;
+-
+-                      /* Read FW info for the current Storm */
+-                      qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
+-
+-                      /* Create FW version/image strings */
+-                      if (snprintf(fw_ver_str, sizeof(fw_ver_str),
+-                                   "%d_%d_%d_%d", fw_info.ver.num.major,
+-                                   fw_info.ver.num.minor, fw_info.ver.num.rev,
+-                                   fw_info.ver.num.eng) < 0)
+-                              DP_NOTICE(p_hwfn,
+-                                        "Unexpected debug error: invalid FW version string\n");
+-                      switch (fw_info.ver.image_id) {
+-                      case FW_IMG_MAIN:
+-                              strcpy(fw_img_str, "main");
+-                              break;
+-                      default:
+-                              strcpy(fw_img_str, "unknown");
+-                              break;
+-                      }
+-
+-                      found = true;
++              /* Read FW info from chip */
++              qed_read_fw_info(p_hwfn, p_ptt, &fw_info);
++
++              /* Create FW version/image strings */
++              if (snprintf(fw_ver_str, sizeof(fw_ver_str),
++                           "%d_%d_%d_%d", fw_info.ver.num.major,
++                           fw_info.ver.num.minor, fw_info.ver.num.rev,
++                           fw_info.ver.num.eng) < 0)
++                      DP_NOTICE(p_hwfn,
++                                "Unexpected debug error: invalid FW version string\n");
++              switch (fw_info.ver.image_id) {
++              case FW_IMG_MAIN:
++                      strcpy(fw_img_str, "main");
++                      break;
++              default:
++                      strcpy(fw_img_str, "unknown");
++                      break;
+               }
+       }
+@@ -2412,20 +2439,21 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
+ /* Dumps GRC registers section header. Returns the dumped size in dwords.
+  * The following parameters are dumped:
+- * - count:    no. of dumped entries
+- * - split:    split type
+- * - id:       split ID (dumped only if split_id >= 0)
++ * - count: no. of dumped entries
++ * - split_type: split type
++ * - split_id: split ID (dumped only if split_id != SPLIT_TYPE_NONE)
+  * - param_name: user parameter value (dumped only if param_name != NULL
+  *             and param_val != NULL).
+  */
+ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
+                                bool dump,
+                                u32 num_reg_entries,
+-                               const char *split_type,
+-                               int split_id,
++                               enum init_split_types split_type,
++                               u8 split_id,
+                                const char *param_name, const char *param_val)
+ {
+-      u8 num_params = 2 + (split_id >= 0 ? 1 : 0) + (param_name ? 1 : 0);
++      u8 num_params = 2 +
++          (split_type != SPLIT_TYPE_NONE ? 1 : 0) + (param_name ? 1 : 0);
+       u32 offset = 0;
+       offset += qed_dump_section_hdr(dump_buf + offset,
+@@ -2433,8 +2461,9 @@ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
+       offset += qed_dump_num_param(dump_buf + offset,
+                                    dump, "count", num_reg_entries);
+       offset += qed_dump_str_param(dump_buf + offset,
+-                                   dump, "split", split_type);
+-      if (split_id >= 0)
++                                   dump, "split",
++                                   s_split_type_defs[split_type].name);
++      if (split_type != SPLIT_TYPE_NONE)
+               offset += qed_dump_num_param(dump_buf + offset,
+                                            dump, "id", split_id);
+       if (param_name && param_val)
+@@ -2463,9 +2492,12 @@ void qed_read_regs(struct qed_hwfn *p_hwfn,
+ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
+                                  struct qed_ptt *p_ptt,
+                                  u32 *dump_buf,
+-                                 bool dump, u32 addr, u32 len, bool wide_bus)
++                                 bool dump, u32 addr, u32 len, bool wide_bus,
++                                 enum init_split_types split_type,
++                                 u8 split_id)
+ {
+       struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
++      u8 port_id = 0, pf_id = 0, vf_id = 0, fid = 0;
+       if (!dump)
+               return len;
+@@ -2481,8 +2513,27 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
+               dev_data->num_regs_read = 0;
+       }
++      switch (split_type) {
++      case SPLIT_TYPE_PORT:
++              port_id = split_id;
++              break;
++      case SPLIT_TYPE_PF:
++              pf_id = split_id;
++              break;
++      case SPLIT_TYPE_PORT_PF:
++              port_id = split_id / dev_data->num_pfs_per_port;
++              pf_id = port_id + dev_data->num_ports *
++                  (split_id % dev_data->num_pfs_per_port);
++              break;
++      case SPLIT_TYPE_VF:
++              vf_id = split_id;
++              break;
++      default:
++              break;
++      }
++
+       /* Try reading using DMAE */
+-      if (dev_data->use_dmae &&
++      if (dev_data->use_dmae && split_type == SPLIT_TYPE_NONE &&
+           (len >= s_platform_defs[dev_data->platform_id].dmae_thresh ||
+            wide_bus)) {
+               if (!qed_dmae_grc2host(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr),
+@@ -2494,7 +2545,37 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
+                          "Failed reading from chip using DMAE, using GRC instead\n");
+       }
+-      /* Read registers */
++      /* If not read using DMAE, read using GRC */
++
++      /* Set pretend */
++      if (split_type != dev_data->pretend.split_type || split_id !=
++          dev_data->pretend.split_id) {
++              switch (split_type) {
++              case SPLIT_TYPE_PORT:
++                      qed_port_pretend(p_hwfn, p_ptt, port_id);
++                      break;
++              case SPLIT_TYPE_PF:
++                      fid = pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
++                      qed_fid_pretend(p_hwfn, p_ptt, fid);
++                      break;
++              case SPLIT_TYPE_PORT_PF:
++                      fid = pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
++                      qed_port_fid_pretend(p_hwfn, p_ptt, port_id, fid);
++                      break;
++              case SPLIT_TYPE_VF:
++                      fid = BIT(PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT) |
++                            (vf_id << PXP_PRETEND_CONCRETE_FID_VFID_SHIFT);
++                      qed_fid_pretend(p_hwfn, p_ptt, fid);
++                      break;
++              default:
++                      break;
++              }
++
++              dev_data->pretend.split_type = (u8)split_type;
++              dev_data->pretend.split_id = split_id;
++      }
++
++      /* Read registers using GRC */
+       qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
+       return len;
+@@ -2518,7 +2599,8 @@ static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf,
+ static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
+                                 struct qed_ptt *p_ptt,
+                                 u32 *dump_buf,
+-                                bool dump, u32 addr, u32 len, bool wide_bus)
++                                bool dump, u32 addr, u32 len, bool wide_bus,
++                                enum init_split_types split_type, u8 split_id)
+ {
+       u32 offset = 0;
+@@ -2526,7 +2608,8 @@ static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
+       offset += qed_grc_dump_addr_range(p_hwfn,
+                                         p_ptt,
+                                         dump_buf + offset,
+-                                        dump, addr, len, wide_bus);
++                                        dump, addr, len, wide_bus,
++                                        split_type, split_id);
+       return offset;
+ }
+@@ -2559,7 +2642,8 @@ static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
+               offset += qed_grc_dump_addr_range(p_hwfn,
+                                                 p_ptt,
+                                                 dump_buf + offset,
+-                                                dump, addr, curr_len, false);
++                                                dump,  addr, curr_len, false,
++                                                SPLIT_TYPE_NONE, 0);
+               reg_offset += curr_len;
+               addr += curr_len;
+@@ -2581,6 +2665,8 @@ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
+                                    struct dbg_array input_regs_arr,
+                                    u32 *dump_buf,
+                                    bool dump,
++                                   enum init_split_types split_type,
++                                   u8 split_id,
+                                    bool block_enable[MAX_BLOCK_ID],
+                                    u32 *num_dumped_reg_entries)
+ {
+@@ -2628,7 +2714,8 @@ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
+                                                        dump,
+                                                        addr,
+                                                        len,
+-                                                       wide_bus);
++                                                       wide_bus,
++                                                       split_type, split_id);
+                       (*num_dumped_reg_entries)++;
+               }
+       }
+@@ -2643,19 +2730,28 @@ static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
+                                  u32 *dump_buf,
+                                  bool dump,
+                                  bool block_enable[MAX_BLOCK_ID],
+-                                 const char *split_type_name,
+-                                 u32 split_id,
++                                 enum init_split_types split_type,
++                                 u8 split_id,
+                                  const char *param_name,
+                                  const char *param_val)
+ {
++      struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
++      enum init_split_types hdr_split_type = split_type;
+       u32 num_dumped_reg_entries, offset;
++      u8 hdr_split_id = split_id;
++
++      /* In PORT_PF split type, print a port split header */
++      if (split_type == SPLIT_TYPE_PORT_PF) {
++              hdr_split_type = SPLIT_TYPE_PORT;
++              hdr_split_id = split_id / dev_data->num_pfs_per_port;
++      }
+       /* Calculate register dump header size (and skip it for now) */
+       offset = qed_grc_dump_regs_hdr(dump_buf,
+                                      false,
+                                      0,
+-                                     split_type_name,
+-                                     split_id, param_name, param_val);
++                                     hdr_split_type,
++                                     hdr_split_id, param_name, param_val);
+       /* Dump registers */
+       offset += qed_grc_dump_regs_entries(p_hwfn,
+@@ -2663,6 +2759,8 @@ static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
+                                           input_regs_arr,
+                                           dump_buf + offset,
+                                           dump,
++                                          split_type,
++                                          split_id,
+                                           block_enable,
+                                           &num_dumped_reg_entries);
+@@ -2671,8 +2769,8 @@ static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
+               qed_grc_dump_regs_hdr(dump_buf,
+                                     dump,
+                                     num_dumped_reg_entries,
+-                                    split_type_name,
+-                                    split_id, param_name, param_val);
++                                    hdr_split_type,
++                                    hdr_split_id, param_name, param_val);
+       return num_dumped_reg_entries > 0 ? offset : 0;
+ }
+@@ -2688,26 +2786,21 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
+                                 const char *param_name, const char *param_val)
+ {
+       struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+-      struct chip_platform_defs *chip_platform;
+       u32 offset = 0, input_offset = 0;
+-      struct chip_defs *chip;
+-      u8 port_id, pf_id, vf_id;
+       u16 fid;
+-
+-      chip = &s_chip_defs[dev_data->chip_id];
+-      chip_platform = &chip->per_platform[dev_data->platform_id];
+-
+       while (input_offset <
+              s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
+               const struct dbg_dump_split_hdr *split_hdr;
+               struct dbg_array curr_input_regs_arr;
++              enum init_split_types split_type;
++              u16 split_count = 0;
+               u32 split_data_size;
+-              u8 split_type_id;
++              u8 split_id;
+               split_hdr =
+                       (const struct dbg_dump_split_hdr *)
+                       &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++];
+-              split_type_id =
++              split_type =
+                       GET_FIELD(split_hdr->hdr,
+                                 DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
+               split_data_size =
+@@ -2717,99 +2810,44 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
+                       &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset];
+               curr_input_regs_arr.size_in_dwords = split_data_size;
+-              switch (split_type_id) {
++              switch (split_type) {
+               case SPLIT_TYPE_NONE:
+-                      offset += qed_grc_dump_split_data(p_hwfn,
+-                                                        p_ptt,
+-                                                        curr_input_regs_arr,
+-                                                        dump_buf + offset,
+-                                                        dump,
+-                                                        block_enable,
+-                                                        "eng",
+-                                                        (u32)(-1),
+-                                                        param_name,
+-                                                        param_val);
++                      split_count = 1;
+                       break;
+-
+               case SPLIT_TYPE_PORT:
+-                      for (port_id = 0; port_id < chip_platform->num_ports;
+-                           port_id++) {
+-                              if (dump)
+-                                      qed_port_pretend(p_hwfn, p_ptt,
+-                                                       port_id);
+-                              offset +=
+-                                  qed_grc_dump_split_data(p_hwfn, p_ptt,
+-                                                          curr_input_regs_arr,
+-                                                          dump_buf + offset,
+-                                                          dump, block_enable,
+-                                                          "port", port_id,
+-                                                          param_name,
+-                                                          param_val);
+-                      }
++                      split_count = dev_data->num_ports;
+                       break;
+-
+               case SPLIT_TYPE_PF:
+               case SPLIT_TYPE_PORT_PF:
+-                      for (pf_id = 0; pf_id < chip_platform->num_pfs;
+-                           pf_id++) {
+-                              u8 pfid_shift =
+-                                      PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
+-
+-                              if (dump) {
+-                                      fid = pf_id << pfid_shift;
+-                                      qed_fid_pretend(p_hwfn, p_ptt, fid);
+-                              }
+-
+-                              offset +=
+-                                  qed_grc_dump_split_data(p_hwfn,
+-                                                          p_ptt,
+-                                                          curr_input_regs_arr,
+-                                                          dump_buf + offset,
+-                                                          dump,
+-                                                          block_enable,
+-                                                          "pf",
+-                                                          pf_id,
+-                                                          param_name,
+-                                                          param_val);
+-                      }
++                      split_count = dev_data->num_ports *
++                          dev_data->num_pfs_per_port;
+                       break;
+-
+               case SPLIT_TYPE_VF:
+-                      for (vf_id = 0; vf_id < chip_platform->num_vfs;
+-                           vf_id++) {
+-                              u8 vfvalid_shift =
+-                                      PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT;
+-                              u8 vfid_shift =
+-                                      PXP_PRETEND_CONCRETE_FID_VFID_SHIFT;
+-
+-                              if (dump) {
+-                                      fid = BIT(vfvalid_shift) |
+-                                            (vf_id << vfid_shift);
+-                                      qed_fid_pretend(p_hwfn, p_ptt, fid);
+-                              }
+-
+-                              offset +=
+-                                  qed_grc_dump_split_data(p_hwfn, p_ptt,
+-                                                          curr_input_regs_arr,
+-                                                          dump_buf + offset,
+-                                                          dump, block_enable,
+-                                                          "vf", vf_id,
+-                                                          param_name,
+-                                                          param_val);
+-                      }
++                      split_count = dev_data->num_vfs;
+                       break;
+-
+               default:
+-                      break;
++                      return 0;
+               }
++              for (split_id = 0; split_id < split_count; split_id++)
++                      offset += qed_grc_dump_split_data(p_hwfn, p_ptt,
++                                                        curr_input_regs_arr,
++                                                        dump_buf + offset,
++                                                        dump, block_enable,
++                                                        split_type,
++                                                        split_id,
++                                                        param_name,
++                                                        param_val);
++
+               input_offset += split_data_size;
+       }
+-      /* Pretend to original PF */
++      /* Cancel pretends (pretend to original PF) */
+       if (dump) {
+               fid = p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
+               qed_fid_pretend(p_hwfn, p_ptt, fid);
++              dev_data->pretend.split_type = SPLIT_TYPE_NONE;
++              dev_data->pretend.split_id = 0;
+       }
+       return offset;
+@@ -2825,7 +2863,8 @@ static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
+       /* Calculate header size */
+       offset += qed_grc_dump_regs_hdr(dump_buf,
+-                                      false, 0, "eng", -1, NULL, NULL);
++                                      false, 0,
++                                      SPLIT_TYPE_NONE, 0, NULL, NULL);
+       /* Write reset registers */
+       for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
+@@ -2838,14 +2877,15 @@ static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
+                                                dump,
+                                                BYTES_TO_DWORDS
+                                                (s_reset_regs_defs[i].addr), 1,
+-                                               false);
++                                               false, SPLIT_TYPE_NONE, 0);
+               num_regs++;
+       }
+       /* Write header */
+       if (dump)
+               qed_grc_dump_regs_hdr(dump_buf,
+-                                    true, num_regs, "eng", -1, NULL, NULL);
++                                    true, num_regs, SPLIT_TYPE_NONE,
++                                    0, NULL, NULL);
+       return offset;
+ }
+@@ -2864,7 +2904,8 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
+       /* Calculate header size */
+       offset += qed_grc_dump_regs_hdr(dump_buf,
+-                                      false, 0, "eng", -1, NULL, NULL);
++                                      false, 0, SPLIT_TYPE_NONE,
++                                      0, NULL, NULL);
+       /* Write parity registers */
+       for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
+@@ -2899,7 +2940,8 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
+                                                        dump_buf + offset,
+                                                        dump,
+                                                        addr,
+-                                                       1, false);
++                                                       1, false,
++                                                       SPLIT_TYPE_NONE, 0);
+                       addr = GET_FIELD(reg_data->data,
+                                        DBG_ATTN_REG_STS_ADDRESS);
+                       offset += qed_grc_dump_reg_entry(p_hwfn,
+@@ -2907,7 +2949,8 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
+                                                        dump_buf + offset,
+                                                        dump,
+                                                        addr,
+-                                                       1, false);
++                                                       1, false,
++                                                       SPLIT_TYPE_NONE, 0);
+                       num_reg_entries += 2;
+               }
+       }
+@@ -2929,7 +2972,7 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
+                                                dump,
+                                                addr,
+                                                1,
+-                                               false);
++                                               false, SPLIT_TYPE_NONE, 0);
+               num_reg_entries++;
+       }
+@@ -2937,7 +2980,8 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
+       if (dump)
+               qed_grc_dump_regs_hdr(dump_buf,
+                                     true,
+-                                    num_reg_entries, "eng", -1, NULL, NULL);
++                                    num_reg_entries, SPLIT_TYPE_NONE,
++                                    0, NULL, NULL);
+       return offset;
+ }
+@@ -2950,7 +2994,8 @@ static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
+       u32 offset = 0, addr;
+       offset += qed_grc_dump_regs_hdr(dump_buf,
+-                                      dump, 2, "eng", -1, NULL, NULL);
++                                      dump, 2, SPLIT_TYPE_NONE, 0,
++                                      NULL, NULL);
+       /* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
+        * skipped).
+@@ -3096,7 +3141,8 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
+       offset += qed_grc_dump_addr_range(p_hwfn,
+                                         p_ptt,
+                                         dump_buf + offset,
+-                                        dump, addr, len, wide_bus);
++                                        dump, addr, len, wide_bus,
++                                        SPLIT_TYPE_NONE, 0);
+       return offset;
+ }
+@@ -3235,12 +3281,12 @@ static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
+              s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) {
+               const struct dbg_dump_split_hdr *split_hdr;
+               struct dbg_array curr_input_mems_arr;
++              enum init_split_types split_type;
+               u32 split_data_size;
+-              u8 split_type_id;
+               split_hdr = (const struct dbg_dump_split_hdr *)
+                       &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++];
+-              split_type_id =
++              split_type =
+                       GET_FIELD(split_hdr->hdr,
+                                 DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
+               split_data_size =
+@@ -3250,20 +3296,15 @@ static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
+                       &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset];
+               curr_input_mems_arr.size_in_dwords = split_data_size;
+-              switch (split_type_id) {
+-              case SPLIT_TYPE_NONE:
++              if (split_type == SPLIT_TYPE_NONE)
+                       offset += qed_grc_dump_mem_entries(p_hwfn,
+                                                          p_ptt,
+                                                          curr_input_mems_arr,
+                                                          dump_buf + offset,
+                                                          dump);
+-                      break;
+-
+-              default:
++              else
+                       DP_NOTICE(p_hwfn,
+                                 "Dumping split memories is currently not supported\n");
+-                      break;
+-              }
+               input_offset += split_data_size;
+       }
+@@ -3623,7 +3664,8 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
+                                                         dump,
+                                                         addr,
+                                                         num_dwords_to_read,
+-                                                        false);
++                                                        false,
++                                                        SPLIT_TYPE_NONE, 0);
+                       total_dwords -= num_dwords_to_read;
+                       rss_addr++;
+               }
+@@ -3682,7 +3724,7 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
+                                                 dump,
+                                                 addr,
+                                                 len,
+-                                                false);
++                                                false, SPLIT_TYPE_NONE, 0);
+       }
+       return offset;
+@@ -3731,7 +3773,8 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
+       /* Dump required non-MCP registers */
+       offset += qed_grc_dump_regs_hdr(dump_buf + offset,
+-                                      dump, 1, "eng", -1, "block", "MCP");
++                                      dump, 1, SPLIT_TYPE_NONE, 0,
++                                      "block", "MCP");
+       addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
+       offset += qed_grc_dump_reg_entry(p_hwfn,
+                                        p_ptt,
+@@ -3739,7 +3782,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
+                                        dump,
+                                        addr,
+                                        1,
+-                                       false);
++                                       false, SPLIT_TYPE_NONE, 0);
+       /* Release MCP */
+       if (halted && qed_mcp_resume(p_hwfn, p_ptt))
+@@ -3923,7 +3966,8 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
+                                                         dump,
+                                                         addr,
+                                                         len,
+-                                                        true);
++                                                        true, SPLIT_TYPE_NONE,
++                                                        0);
+               }
+               /* Disable block's client and debug output */
+@@ -3949,28 +3993,15 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
+ {
+       struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+       bool parities_masked = false;
+-      u8 i, port_mode = 0;
+       u32 offset = 0;
++      u8 i;
+       *num_dumped_dwords = 0;
++      dev_data->num_regs_read = 0;
+-      if (dump) {
+-              /* Find port mode */
+-              switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
+-              case 0:
+-                      port_mode = 1;
+-                      break;
+-              case 1:
+-                      port_mode = 2;
+-                      break;
+-              case 2:
+-                      port_mode = 4;
+-                      break;
+-              }
+-
+-              /* Update reset state */
++      /* Update reset state */
++      if (dump)
+               qed_update_blocks_reset_state(p_hwfn, p_ptt);
+-      }
+       /* Dump global params */
+       offset += qed_dump_common_global_params(p_hwfn,
+@@ -3989,7 +4020,7 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
+                                    qed_grc_get_param(p_hwfn,
+                                               DBG_GRC_PARAM_NUM_LTIDS));
+       offset += qed_dump_num_param(dump_buf + offset,
+-                                   dump, "num-ports", port_mode);
++                                   dump, "num-ports", dev_data->num_ports);
+       /* Dump reset registers (dumped before taking blocks out of reset ) */
+       if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
+@@ -4093,10 +4124,10 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
+               offset += qed_grc_dump_phy(p_hwfn,
+                                          p_ptt, dump_buf + offset, dump);
+-      /* Dump static debug data  */
++      /* Dump static debug data (only if not during debug bus recording) */
+       if (qed_grc_is_included(p_hwfn,
+                               DBG_GRC_PARAM_DUMP_STATIC) &&
+-          dev_data->bus.state == DBG_BUS_STATE_IDLE)
++          (!dump || dev_data->bus.state == DBG_BUS_STATE_IDLE))
+               offset += qed_grc_dump_static_debug(p_hwfn,
+                                                   p_ptt,
+                                                   dump_buf + offset, dump);
+@@ -4250,7 +4281,8 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
+                                                         dump_buf + offset,
+                                                         dump,
+                                                         addr,
+-                                                        reg->size, wide_bus);
++                                                        reg->size, wide_bus,
++                                                        SPLIT_TYPE_NONE, 0);
+               }
+       }
+@@ -4373,7 +4405,8 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+                                                           next_reg_offset,
+                                                           dump, addr,
+                                                           reg->entry_size,
+-                                                          wide_bus);
++                                                          wide_bus,
++                                                          SPLIT_TYPE_NONE, 0);
+                       }
+                       /* Call rule condition function.
+@@ -4723,7 +4756,8 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
+                                         dump_buf + offset,
+                                         dump,
+                                         BYTES_TO_DWORDS(trace_data_grc_addr),
+-                                        trace_data_size_dwords, false);
++                                        trace_data_size_dwords, false,
++                                        SPLIT_TYPE_NONE, 0);
+       /* Resume MCP (only if halt succeeded) */
+       if (halted && qed_mcp_resume(p_hwfn, p_ptt))
+@@ -4829,7 +4863,8 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
+                                                 true,
+                                                 addr,
+                                                 len,
+-                                                true);
++                                                true, SPLIT_TYPE_NONE,
++                                                0);
+               fifo_has_data = qed_rd(p_hwfn, p_ptt,
+                                      GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
+       }
+@@ -4898,7 +4933,8 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
+                                                 true,
+                                                 addr,
+                                                 len,
+-                                                true);
++                                                true, SPLIT_TYPE_NONE,
++                                                0);
+               fifo_has_data = qed_rd(p_hwfn, p_ptt,
+                                      IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
+       }
+@@ -4956,7 +4992,7 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
+                                         true,
+                                         addr,
+                                         override_window_dwords,
+-                                        true);
++                                        true, SPLIT_TYPE_NONE, 0);
+       qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
+                          override_window_dwords);
+ out:
+@@ -4998,7 +5034,7 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
+                       continue;
+               /* Read FW info for the current Storm */
+-              qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
++              qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
+               asserts = &fw_info.fw_asserts_section;
+@@ -5036,7 +5072,7 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
+                                           dump_buf + offset,
+                                           dump, addr,
+                                           asserts->list_element_dword_size,
+-                                          false);
++                                                false, SPLIT_TYPE_NONE, 0);
+       }
+       /* Dump last section */
+@@ -5063,6 +5099,28 @@ enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
+       return DBG_STATUS_OK;
+ }
++bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
++                    struct qed_ptt *p_ptt, struct fw_info *fw_info)
++{
++      struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
++      u8 storm_id;
++
++      for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
++              struct storm_defs *storm = &s_storm_defs[storm_id];
++
++              /* Skip Storm if it's in reset */
++              if (dev_data->block_in_reset[storm->block_id])
++                      continue;
++
++              /* Read FW info for the current Storm */
++              qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, fw_info);
++
++              return true;
++      }
++
++      return false;
++}
++
+ /* Assign default GRC param values */
+ void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
+ {
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
+index fde20fd..b285edc 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
+@@ -2792,7 +2792,7 @@ static void qed_hw_info_port_num_bb(struct qed_hwfn *p_hwfn,
+ {
+       u32 port_mode;
+-      port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0);
++      port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB);
+       if (port_mode < 3) {
+               p_hwfn->cdev->num_ports_in_engine = 1;
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+index b9704be..bee10c1 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+@@ -1095,14 +1095,16 @@ enum personality_type {
+ struct pf_start_tunnel_config {
+       u8 set_vxlan_udp_port_flg;
+       u8 set_geneve_udp_port_flg;
++      u8 set_no_inner_l2_vxlan_udp_port_flg;
+       u8 tunnel_clss_vxlan;
+       u8 tunnel_clss_l2geneve;
+       u8 tunnel_clss_ipgeneve;
+       u8 tunnel_clss_l2gre;
+       u8 tunnel_clss_ipgre;
+-      u8 reserved;
+       __le16 vxlan_udp_port;
+       __le16 geneve_udp_port;
++      __le16 no_inner_l2_vxlan_udp_port;
++      __le16 reserved[3];
+ };
+ /* Ramrod data for PF start ramrod */
+@@ -1145,14 +1147,17 @@ struct pf_update_tunnel_config {
+       u8 update_rx_def_non_ucast_clss;
+       u8 set_vxlan_udp_port_flg;
+       u8 set_geneve_udp_port_flg;
++      u8 set_no_inner_l2_vxlan_udp_port_flg;
+       u8 tunnel_clss_vxlan;
+       u8 tunnel_clss_l2geneve;
+       u8 tunnel_clss_ipgeneve;
+       u8 tunnel_clss_l2gre;
+       u8 tunnel_clss_ipgre;
++      u8 reserved;
+       __le16 vxlan_udp_port;
+       __le16 geneve_udp_port;
+-      __le16 reserved;
++      __le16 no_inner_l2_vxlan_udp_port;
++      __le16 reserved1[3];
+ };
+ /* Data for port update ramrod */
+@@ -2535,7 +2540,14 @@ struct idle_chk_data {
+       u16 reserved2;
+ };
+-/* Debug Tools data (per HW function) */
++struct pretend_params {
++      u8 split_type;
++      u8 reserved;
++      u16 split_id;
++};
++
++/* Debug Tools data (per HW function)
++ */
+ struct dbg_tools_data {
+       struct dbg_grc_data grc;
+       struct dbg_bus_data bus;
+@@ -2544,8 +2556,13 @@ struct dbg_tools_data {
+       u8 block_in_reset[88];
+       u8 chip_id;
+       u8 platform_id;
++      u8 num_ports;
++      u8 num_pfs_per_port;
++      u8 num_vfs;
+       u8 initialized;
+       u8 use_dmae;
++      u8 reserved;
++      struct pretend_params pretend;
+       u32 num_regs_read;
+ };
+@@ -2975,6 +2992,24 @@ void qed_read_regs(struct qed_hwfn *p_hwfn,
+                  struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len);
+ /**
++ * @brief qed_read_fw_info - Reads FW info from the chip.
++ *
++ * The FW info contains FW-related information, such as the FW version,
++ * FW image (main/L2B/kuku), FW timestamp, etc.
++ * The FW info is read from the internal RAM of the first Storm that is not in
++ * reset.
++ *
++ * @param p_hwfn -        HW device data
++ * @param p_ptt -         Ptt window used for writing the registers.
++ * @param fw_info -   Out: a pointer to write the FW info into.
++ *
++ * @return true if the FW info was read successfully from one of the Storms,
++ * or false if all Storms are in reset.
++ */
++bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
++                    struct qed_ptt *p_ptt, struct fw_info *fw_info);
++
++/**
+  * @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their
+  *    default value.
+  *
+@@ -4110,6 +4145,21 @@ void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
+  */
+ void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
++#define NUM_STORMS 6
++
++/**
++ * @brief qed_set_rdma_error_level - Sets the RDMA assert level.
++ *                                   If the severity of the error will be
++ *                                   above the level, the FW will assert.
++ * @param p_hwfn - HW device data
++ * @param p_ptt - ptt window used for writing the registers
++ * @param assert_level - An array of assert levels for each storm.
++ *
++ */
++void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn,
++                            struct qed_ptt *p_ptt,
++                            u8 assert_level[NUM_STORMS]);
++
+ /* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
+ #define YSTORM_FLOW_CONTROL_MODE_OFFSET                       (IRO[0].base)
+ #define YSTORM_FLOW_CONTROL_MODE_SIZE                 (IRO[0].size)
+@@ -4340,27 +4390,67 @@ void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
+       (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1))
+ #define TSTORM_RDMA_QUEUE_STAT_SIZE                   (IRO[46].size)
++/* Xstorm error level for assert */
++#define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
++      (IRO[47].base + ((pf_id) * IRO[47].m1))
++#define XSTORM_RDMA_ASSERT_LEVEL_SIZE                 (IRO[47].size)
++
++/* Ystorm error level for assert */
++#define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
++      (IRO[48].base + ((pf_id) * IRO[48].m1))
++#define YSTORM_RDMA_ASSERT_LEVEL_SIZE                 (IRO[48].size)
++
++/* Pstorm error level for assert */
++#define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
++      (IRO[49].base + ((pf_id) * IRO[49].m1))
++#define PSTORM_RDMA_ASSERT_LEVEL_SIZE                 (IRO[49].size)
++
++/* Tstorm error level for assert */
++#define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
++      (IRO[50].base + ((pf_id) * IRO[50].m1))
++#define TSTORM_RDMA_ASSERT_LEVEL_SIZE                 (IRO[50].size)
++
++/* Mstorm error level for assert */
++#define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
++      (IRO[51].base + ((pf_id) * IRO[51].m1))
++#define MSTORM_RDMA_ASSERT_LEVEL_SIZE                 (IRO[51].size)
++
++/* Ustorm error level for assert */
++#define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
++      (IRO[52].base + ((pf_id) * IRO[52].m1))
++#define USTORM_RDMA_ASSERT_LEVEL_SIZE                 (IRO[52].size)
++
+ /* Xstorm iWARP rxmit stats */
+ #define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \
+-      (IRO[47].base + ((pf_id) * IRO[47].m1))
+-#define XSTORM_IWARP_RXMIT_STATS_SIZE                 (IRO[47].size)
++      (IRO[53].base + ((pf_id) * IRO[53].m1))
++#define XSTORM_IWARP_RXMIT_STATS_SIZE                 (IRO[53].size)
+ /* Tstorm RoCE Event Statistics */
+ #define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \
+-      (IRO[48].base + ((roce_pf_id) * IRO[48].m1))
+-#define TSTORM_ROCE_EVENTS_STAT_SIZE                  (IRO[48].size)
++      (IRO[54].base + ((roce_pf_id) * IRO[54].m1))
++#define TSTORM_ROCE_EVENTS_STAT_SIZE                  (IRO[54].size)
+ /* DCQCN Received Statistics */
+ #define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) \
+-      (IRO[49].base + ((roce_pf_id) * IRO[49].m1))
+-#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE         (IRO[49].size)
++      (IRO[55].base + ((roce_pf_id) * IRO[55].m1))
++#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE         (IRO[55].size)
++
++/* RoCE Error Statistics */
++#define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \
++      (IRO[56].base + ((roce_pf_id) * IRO[56].m1))
++#define YSTORM_ROCE_ERROR_STATS_SIZE                  (IRO[56].size)
+ /* DCQCN Sent Statistics */
+ #define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \
+-      (IRO[50].base + ((roce_pf_id) * IRO[50].m1))
+-#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE             (IRO[50].size)
++      (IRO[57].base + ((roce_pf_id) * IRO[57].m1))
++#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE             (IRO[57].size)
+-static const struct iro iro_arr[51] = {
++/* RoCE CQEs Statistics */
++#define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \
++      (IRO[58].base + ((roce_pf_id) * IRO[58].m1))
++#define USTORM_ROCE_CQE_STATS_SIZE                    (IRO[58].size)
++
++static const struct iro iro_arr[59] = {
+       {0x0, 0x0, 0x0, 0x0, 0x8},
+       {0x4cb8, 0x88, 0x0, 0x0, 0x88},
+       {0x6530, 0x20, 0x0, 0x0, 0x20},
+@@ -4408,10 +4498,18 @@ static const struct iro iro_arr[51] = {
+       {0x10768, 0x20, 0x0, 0x0, 0x20},
+       {0x2d48, 0x80, 0x0, 0x0, 0x10},
+       {0x5048, 0x10, 0x0, 0x0, 0x10},
++      {0xc748, 0x8, 0x0, 0x0, 0x1},
++      {0xa128, 0x8, 0x0, 0x0, 0x1},
++      {0x10f00, 0x8, 0x0, 0x0, 0x1},
++      {0xf030, 0x8, 0x0, 0x0, 0x1},
++      {0x13028, 0x8, 0x0, 0x0, 0x1},
++      {0x12c58, 0x8, 0x0, 0x0, 0x1},
+       {0xc9b8, 0x30, 0x0, 0x0, 0x10},
+-      {0xed90, 0x10, 0x0, 0x0, 0x10},
+-      {0xa3a0, 0x10, 0x0, 0x0, 0x10},
++      {0xed90, 0x28, 0x0, 0x0, 0x28},
++      {0xa520, 0x18, 0x0, 0x0, 0x18},
++      {0xa6a0, 0x8, 0x0, 0x0, 0x8},
+       {0x13108, 0x8, 0x0, 0x0, 0x8},
++      {0x13c50, 0x18, 0x0, 0x0, 0x18},
+ };
+ /* Runtime array offsets */
+@@ -4797,147 +4895,147 @@ static const struct iro iro_arr[51] = {
+ #define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET             39769
+ #define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE                       16
+ #define NIG_REG_TX_EDPM_CTRL_RT_OFFSET                                39785
+-#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET              39786
+-#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET                   39787
+-#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE                     8
+-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET                39795
+-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_SIZE          1024
+-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET           40819
+-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_SIZE             512
+-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET         41331
+-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_SIZE           512
+-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET        41843
+-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE  512
+-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET      42355
+-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_SIZE                512
+-#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET              42867
+-#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_SIZE                        32
+-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET                     42899
+-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET                     42900
+-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET                     42901
+-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET                 42902
+-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET                 42903
+-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET                 42904
+-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET                 42905
+-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET              42906
+-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET              42907
+-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET              42908
+-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET              42909
+-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET                  42910
+-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET                       42911
+-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET                     42912
+-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET                        42913
+-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET              42914
+-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET                 42915
+-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET          42916
+-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET              42917
+-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET                 42918
+-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET          42919
+-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET              42920
+-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET                 42921
+-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET          42922
+-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET              42923
+-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET                 42924
+-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET          42925
+-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET              42926
+-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET                 42927
+-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET          42928
+-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET              42929
+-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET                 42930
+-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET          42931
+-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET              42932
+-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET                 42933
+-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET          42934
+-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET              42935
+-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET                 42936
+-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET          42937
+-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET              42938
+-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET                 42939
+-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET          42940
+-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET              42941
+-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET                 42942
+-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET          42943
+-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET             42944
+-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET                        42945
+-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET         42946
+-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET             42947
+-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET                        42948
+-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET         42949
+-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET             42950
+-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET                        42951
+-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET         42952
+-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET             42953
+-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET                        42954
+-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET         42955
+-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET             42956
+-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET                        42957
+-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET         42958
+-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET             42959
+-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET                        42960
+-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET         42961
+-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET             42962
+-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET                        42963
+-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET         42964
+-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET             42965
+-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET                        42966
+-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET         42967
+-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET             42968
+-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET                        42969
+-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET         42970
+-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET             42971
+-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET                        42972
+-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET         42973
+-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET             42974
+-#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET                        42975
+-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET         42976
+-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET             42977
+-#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET                        42978
+-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET         42979
+-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET             42980
+-#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET                        42981
+-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET         42982
+-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET             42983
+-#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET                        42984
+-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET         42985
+-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET             42986
+-#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET                        42987
+-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET         42988
+-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET             42989
+-#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET                        42990
+-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET         42991
+-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET             42992
+-#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET                        42993
+-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET         42994
+-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET             42995
+-#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET                        42996
+-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET         42997
+-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET             42998
+-#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET                        42999
+-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET         43000
+-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET             43001
+-#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET                        43002
+-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET         43003
+-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET             43004
+-#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET                        43005
+-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET         43006
+-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET             43007
+-#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET                        43008
+-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET         43009
+-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET             43010
+-#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET                        43011
+-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET         43012
+-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET             43013
+-#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET                        43014
+-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET         43015
+-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET             43016
+-#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET                        43017
+-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET         43018
+-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET             43019
+-#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET                        43020
+-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET         43021
+-#define XCM_REG_CON_PHY_Q3_RT_OFFSET                          43022
+-
+-#define RUNTIME_ARRAY_SIZE    43023
++#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET                             39786
++#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE                               8
++#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET                  39794
++#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_SIZE                    1024
++#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET                     40818
++#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_SIZE                       512
++#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET                   41330
++#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_SIZE                     512
++#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET          41842
++#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE            512
++#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET                42354
++#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_SIZE                  512
++#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET                        42866
++#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_SIZE                          32
++#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET                               42898
++#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET                               42899
++#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET                               42900
++#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET                           42901
++#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET                           42902
++#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET                           42903
++#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET                           42904
++#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET                        42905
++#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET                        42906
++#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET                        42907
++#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET                        42908
++#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET                            42909
++#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET                         42910
++#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET                               42911
++#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET                          42912
++#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET                        42913
++#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET                           42914
++#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET                    42915
++#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET                        42916
++#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET                           42917
++#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET                    42918
++#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET                        42919
++#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET                           42920
++#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET                    42921
++#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET                        42922
++#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET                           42923
++#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET                    42924
++#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET                        42925
++#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET                           42926
++#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET                    42927
++#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET                        42928
++#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET                           42929
++#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET                    42930
++#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET                        42931
++#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET                           42932
++#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET                    42933
++#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET                        42934
++#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET                           42935
++#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET                    42936
++#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET                        42937
++#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET                           42938
++#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET                    42939
++#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET                        42940
++#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET                           42941
++#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET                    42942
++#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET                       42943
++#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET                          42944
++#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET                   42945
++#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET                       42946
++#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET                          42947
++#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET                   42948
++#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET                       42949
++#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET                          42950
++#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET                   42951
++#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET                       42952
++#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET                          42953
++#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET                   42954
++#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET                       42955
++#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET                          42956
++#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET                   42957
++#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET                       42958
++#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET                          42959
++#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET                   42960
++#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET                       42961
++#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET                          42962
++#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET                   42963
++#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET                       42964
++#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET                          42965
++#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET                   42966
++#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET                       42967
++#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET                          42968
++#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET                   42969
++#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET                       42970
++#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET                          42971
++#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET                   42972
++#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET                       42973
++#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET                          42974
++#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET                   42975
++#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET                       42976
++#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET                          42977
++#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET                   42978
++#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET                       42979
++#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET                          42980
++#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET                   42981
++#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET                       42982
++#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET                          42983
++#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET                   42984
++#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET                       42985
++#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET                          42986
++#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET                   42987
++#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET                       42988
++#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET                          42989
++#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET                   42990
++#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET                       42991
++#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET                          42992
++#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET                   42993
++#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET                       42994
++#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET                          42995
++#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET                   42996
++#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET                       42997
++#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET                          42998
++#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET                   42999
++#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET                       43000
++#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET                          43001
++#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET                   43002
++#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET                       43003
++#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET                          43004
++#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET                   43005
++#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET                       43006
++#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET                          43007
++#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET                   43008
++#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET                       43009
++#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET                          43010
++#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET                   43011
++#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET                       43012
++#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET                          43013
++#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET                   43014
++#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET                       43015
++#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET                          43016
++#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET                   43017
++#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET                       43018
++#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET                          43019
++#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET                   43020
++#define XCM_REG_CON_PHY_Q3_RT_OFFSET                                    43021
++
++#define RUNTIME_ARRAY_SIZE 43022
++
+ /* Init Callbacks */
+ #define DMAE_READY_CB 0
+@@ -5694,8 +5792,10 @@ struct eth_vport_rx_mode {
+ #define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT      4
+ #define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK               0x1
+ #define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT      5
+-#define ETH_VPORT_RX_MODE_RESERVED1_MASK              0x3FF
+-#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT             6
++#define ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI_MASK         0x1
++#define ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI_SHIFT                6
++#define ETH_VPORT_RX_MODE_RESERVED1_MASK              0x1FF
++#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT             7
+ };
+ /* Command for setting tpa parameters */
+@@ -6756,7 +6856,7 @@ struct e4_ystorm_rdma_task_ag_ctx {
+ #define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK               0x1
+ #define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT      7
+       u8 key;
+-      __le32 mw_cnt;
++      __le32 mw_cnt_or_qp_id;
+       u8 ref_cnt_seq;
+       u8 ctx_upd_seq;
+       __le16 dif_flags;
+@@ -6812,7 +6912,7 @@ struct e4_mstorm_rdma_task_ag_ctx {
+ #define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK               0x1
+ #define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT      7
+       u8 key;
+-      __le32 mw_cnt;
++      __le32 mw_cnt_or_qp_id;
+       u8 ref_cnt_seq;
+       u8 ctx_upd_seq;
+       __le16 dif_flags;
+@@ -7075,8 +7175,7 @@ struct rdma_register_tid_ramrod_data {
+       struct regpair va;
+       struct regpair pbl_base;
+       struct regpair dif_error_addr;
+-      struct regpair dif_runt_addr;
+-      __le32 reserved4[2];
++      __le32 reserved4[4];
+ };
+ /* rdma resize cq output params */
+@@ -7144,8 +7243,7 @@ struct rdma_srq_modify_ramrod_data {
+ enum rdma_tid_type {
+       RDMA_TID_REGISTERED_MR,
+       RDMA_TID_FMR,
+-      RDMA_TID_MW_TYPE1,
+-      RDMA_TID_MW_TYPE2A,
++      RDMA_TID_MW,
+       MAX_RDMA_TID_TYPE
+ };
+@@ -7681,6 +7779,16 @@ struct e4_roce_conn_context {
+       struct ustorm_roce_conn_st_ctx ustorm_st_context;
+ };
++/* roce cqes statistics */
++struct roce_cqe_stats {
++      __le32 req_cqe_error;
++      __le32 req_remote_access_errors;
++      __le32 req_remote_invalid_request;
++      __le32 resp_cqe_error;
++      __le32 resp_local_length_error;
++      __le32 reserved;
++};
++
+ /* roce create qp requester ramrod data */
+ struct roce_create_qp_req_ramrod_data {
+       __le16 flags;
+@@ -7798,8 +7906,8 @@ struct roce_dcqcn_sent_stats {
+ /* RoCE destroy qp requester output params */
+ struct roce_destroy_qp_req_output_params {
+-      __le32 num_bound_mw;
+       __le32 cq_prod;
++      __le32 reserved;
+ };
+ /* RoCE destroy qp requester ramrod data */
+@@ -7809,8 +7917,8 @@ struct roce_destroy_qp_req_ramrod_data {
+ /* RoCE destroy qp responder output params */
+ struct roce_destroy_qp_resp_output_params {
+-      __le32 num_invalidated_mw;
+       __le32 cq_prod;
++      __le32 reserved;
+ };
+ /* RoCE destroy qp responder ramrod data */
+@@ -7818,16 +7926,27 @@ struct roce_destroy_qp_resp_ramrod_data {
+       struct regpair output_params_addr;
+ };
++/* roce error statistics */
++struct roce_error_stats {
++      __le32 resp_remote_access_errors;
++      __le32 reserved;
++};
++
+ /* roce special events statistics */
+ struct roce_events_stats {
+-      __le16 silent_drops;
+-      __le16 rnr_naks_sent;
++      __le32 silent_drops;
++      __le32 rnr_naks_sent;
+       __le32 retransmit_count;
+       __le32 icrc_error_count;
+-      __le32 reserved;
++      __le32 implied_nak_seq_err;
++      __le32 duplicate_request;
++      __le32 local_ack_timeout_err;
++      __le32 out_of_sequence;
++      __le32 packet_seq_err;
++      __le32 rnr_nak_retry_err;
+ };
+-/* ROCE slow path EQ cmd IDs */
++/* roce slow path EQ cmd IDs */
+ enum roce_event_opcode {
+       ROCE_EVENT_CREATE_QP = 11,
+       ROCE_EVENT_MODIFY_QP,
+@@ -7845,6 +7964,9 @@ struct roce_init_func_params {
+       u8 cnp_dscp;
+       u8 reserved;
+       __le32 cnp_send_timeout;
++      __le16 rl_offset;
++      u8 rl_count_log;
++      u8 reserved1[5];
+ };
+ /* roce func init ramrod data */
+@@ -8532,7 +8654,7 @@ struct e4_tstorm_roce_resp_conn_ag_ctx {
+       __le16 rq_prod;
+       __le16 conn_dpi;
+       __le16 irq_cons;
+-      __le32 num_invlidated_mw;
++      __le32 reg9;
+       __le32 reg10;
+ };
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
+index fca2dbd..70504dc 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
+@@ -360,6 +360,26 @@ void qed_port_unpretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+              *(u32 *)&p_ptt->pxp.pretend);
+ }
++void qed_port_fid_pretend(struct qed_hwfn *p_hwfn,
++                        struct qed_ptt *p_ptt, u8 port_id, u16 fid)
++{
++      u16 control = 0;
++
++      SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
++      SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
++      SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
++      SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
++      SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
++      if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
++              fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
++      p_ptt->pxp.pretend.control = cpu_to_le16(control);
++      p_ptt->pxp.pretend.fid.concrete_fid.fid = cpu_to_le16(fid);
++      REG_WR(p_hwfn,
++             qed_ptt_config_addr(p_ptt) +
++             offsetof(struct pxp_ptt_entry, pretend),
++             *(u32 *)&p_ptt->pxp.pretend);
++}
++
+ u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid)
+ {
+       u32 concrete_fid = 0;
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
+index 8db2839..505e94d 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_hw.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h
+@@ -245,6 +245,18 @@ void qed_port_unpretend(struct qed_hwfn *p_hwfn,
+                       struct qed_ptt *p_ptt);
+ /**
++ * @brief qed_port_fid_pretend - pretend to another port and another function
++ *        when accessing the ptt window
++ *
++ * @param p_hwfn
++ * @param p_ptt
++ * @param port_id - the port to pretend to
++ * @param fid - fid field of pxp_pretend structure. Can contain either pf / vf.
++ */
++void qed_port_fid_pretend(struct qed_hwfn *p_hwfn,
++                        struct qed_ptt *p_ptt, u8 port_id, u16 fid);
++
++/**
+  * @brief qed_vfid_to_concrete - build a concrete FID for a
+  *        given VF ID
+  *
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+index 1365da7..d845bad 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+@@ -1245,7 +1245,7 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
+                   bool udp,
+                   bool ipv4, bool ipv6, enum gft_profile_type profile_type)
+ {
+-      u32 reg_val, cam_line, ram_line_lo, ram_line_hi;
++      u32 reg_val, cam_line, ram_line_lo, ram_line_hi, search_non_ip_as_gft;
+       if (!ipv6 && !ipv4)
+               DP_NOTICE(p_hwfn,
+@@ -1314,6 +1314,9 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
+       ram_line_lo = 0;
+       ram_line_hi = 0;
++      /* Search no IP as GFT */
++      search_non_ip_as_gft = 0;
++
+       /* Tunnel type */
+       SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
+       SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
+@@ -1337,9 +1340,14 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
+               SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+       } else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) {
+               SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
++
++              /* Allow tunneled traffic without inner IP */
++              search_non_ip_as_gft = 1;
+       }
+       qed_wr(p_hwfn,
++             p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT, search_non_ip_as_gft);
++      qed_wr(p_hwfn,
+              p_ptt,
+              PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
+              ram_line_lo);
+@@ -1509,3 +1517,43 @@ void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
+       ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
+       qed_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
+ }
++
++static u32 qed_get_rdma_assert_ram_addr(struct qed_hwfn *p_hwfn, u8 storm_id)
++{
++      switch (storm_id) {
++      case 0:
++              return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
++                  TSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
++      case 1:
++              return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
++                  MSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
++      case 2:
++              return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
++                  USTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
++      case 3:
++              return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
++                  XSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
++      case 4:
++              return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
++                  YSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
++      case 5:
++              return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
++                  PSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
++
++      default:
++              return 0;
++      }
++}
++
++void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn,
++                            struct qed_ptt *p_ptt,
++                            u8 assert_level[NUM_STORMS])
++{
++      u8 storm_id;
++
++      for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
++              u32 ram_addr = qed_get_rdma_assert_ram_addr(p_hwfn, storm_id);
++
++              qed_wr(p_hwfn, p_ptt, ram_addr, assert_level[storm_id]);
++      }
++}
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+index 474e6cf..90a2b53 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+@@ -1159,7 +1159,6 @@ int qed_iwarp_connect(void *rdma_cxt,
+       struct qed_iwarp_info *iwarp_info;
+       struct qed_iwarp_ep *ep;
+       u8 mpa_data_size = 0;
+-      u8 ts_hdr_size = 0;
+       u32 cid;
+       int rc;
+@@ -1218,10 +1217,7 @@ int qed_iwarp_connect(void *rdma_cxt,
+              iparams->cm_info.private_data,
+              iparams->cm_info.private_data_len);
+-      if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN)
+-              ts_hdr_size = TIMESTAMP_HEADER_SIZE;
+-
+-      ep->mss = iparams->mss - ts_hdr_size;
++      ep->mss = iparams->mss;
+       ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
+       ep->event_cb = iparams->event_cb;
+@@ -2337,7 +2333,6 @@ qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
+       u8 local_mac_addr[ETH_ALEN];
+       struct qed_iwarp_ep *ep;
+       int tcp_start_offset;
+-      u8 ts_hdr_size = 0;
+       u8 ll2_syn_handle;
+       int payload_len;
+       u32 hdr_size;
+@@ -2415,11 +2410,7 @@ qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
+       memcpy(&ep->cm_info, &cm_info, sizeof(ep->cm_info));
+-      if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN)
+-              ts_hdr_size = TIMESTAMP_HEADER_SIZE;
+-
+-      hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60) +
+-                 ts_hdr_size;
++      hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60);
+       ep->mss = p_hwfn->p_rdma_info->iwarp.max_mtu - hdr_size;
+       ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
+index eed4725..1f6ac848 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
+@@ -586,6 +586,9 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
+               SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
+                         !!(accept_filter & QED_ACCEPT_BCAST));
++              SET_FIELD(state, ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI,
++                        !!(accept_filter & QED_ACCEPT_ANY_VNI));
++
+               p_ramrod->rx_mode.state = cpu_to_le16(state);
+               DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                          "p_ramrod->rx_mode.state = 0x%x\n", state);
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
+index c4030e9..806a8da 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
+@@ -183,6 +183,7 @@ struct qed_filter_accept_flags {
+ #define QED_ACCEPT_MCAST_MATCHED        0x08
+ #define QED_ACCEPT_MCAST_UNMATCHED      0x10
+ #define QED_ACCEPT_BCAST                0x20
++#define QED_ACCEPT_ANY_VNI              0x40
+ };
+ struct qed_arfs_config_params {
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+index b870510..101d677 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+@@ -1508,11 +1508,8 @@ qed_rdma_register_tid(void *rdma_cxt,
+       case QED_RDMA_TID_FMR:
+               tid_type = RDMA_TID_FMR;
+               break;
+-      case QED_RDMA_TID_MW_TYPE1:
+-              tid_type = RDMA_TID_MW_TYPE1;
+-              break;
+-      case QED_RDMA_TID_MW_TYPE2A:
+-              tid_type = RDMA_TID_MW_TYPE2A;
++      case QED_RDMA_TID_MW:
++              tid_type = RDMA_TID_MW;
+               break;
+       default:
+               rc = -EINVAL;
+@@ -1544,7 +1541,6 @@ qed_rdma_register_tid(void *rdma_cxt,
+                         RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1);
+               DMA_REGPAIR_LE(p_ramrod->dif_error_addr,
+                              params->dif_error_addr);
+-              DMA_REGPAIR_LE(p_ramrod->dif_runt_addr, params->dif_runt_addr);
+       }
+       rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+index f712205..d8ad2dc 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+@@ -178,7 +178,7 @@
+       0x008c80UL
+ #define  MCP_REG_SCRATCH      \
+       0xe20000UL
+-#define  CNIG_REG_NW_PORT_MODE_BB_B0 \
++#define  CNIG_REG_NW_PORT_MODE_BB \
+       0x218200UL
+ #define  MISCS_REG_CHIP_NUM \
+       0x00976cUL
+@@ -1621,6 +1621,7 @@
+ #define NIG_REG_TX_EDPM_CTRL_TX_EDPM_TC_EN_SHIFT 1
+ #define PRS_REG_SEARCH_GFT 0x1f11bcUL
++#define PRS_REG_SEARCH_NON_IP_AS_GFT 0x1f11c0UL
+ #define PRS_REG_CM_HDR_GFT 0x1f11c8UL
+ #define PRS_REG_GFT_CAM 0x1f1100UL
+ #define PRS_REG_GFT_PROFILE_MASK_RAM 0x1f1000UL
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
+index ee57fcd..b5ce158 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
+@@ -681,7 +681,6 @@ static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
+ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
+                                           struct qed_rdma_qp *qp,
+-                                          u32 *num_invalidated_mw,
+                                           u32 *cq_prod)
+ {
+       struct roce_destroy_qp_resp_output_params *p_ramrod_res;
+@@ -692,8 +691,6 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
+       int rc;
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+-
+-      *num_invalidated_mw = 0;
+       *cq_prod = qp->cq_prod;
+       if (!qp->resp_offloaded) {
+@@ -742,7 +739,6 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
+       if (rc)
+               goto err;
+-      *num_invalidated_mw = le32_to_cpu(p_ramrod_res->num_invalidated_mw);
+       *cq_prod = le32_to_cpu(p_ramrod_res->cq_prod);
+       qp->cq_prod = *cq_prod;
+@@ -764,8 +760,7 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
+ }
+ static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn,
+-                                          struct qed_rdma_qp *qp,
+-                                          u32 *num_bound_mw)
++                                          struct qed_rdma_qp *qp)
+ {
+       struct roce_destroy_qp_req_output_params *p_ramrod_res;
+       struct roce_destroy_qp_req_ramrod_data *p_ramrod;
+@@ -807,7 +802,6 @@ static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn,
+       if (rc)
+               goto err;
+-      *num_bound_mw = le32_to_cpu(p_ramrod_res->num_bound_mw);
+       /* Free ORQ - only if ramrod succeeded, in case FW is still using it */
+       dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+@@ -968,8 +962,6 @@ int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
+ int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
+ {
+-      u32 num_invalidated_mw = 0;
+-      u32 num_bound_mw = 0;
+       u32 cq_prod;
+       int rc;
+@@ -984,22 +976,14 @@ int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
+       if (qp->cur_state != QED_ROCE_QP_STATE_RESET) {
+               rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
+-                                                    &num_invalidated_mw,
+                                                     &cq_prod);
+               if (rc)
+                       return rc;
+               /* Send destroy requester ramrod */
+-              rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp,
+-                                                    &num_bound_mw);
++              rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp);
+               if (rc)
+                       return rc;
+-
+-              if (num_invalidated_mw != num_bound_mw) {
+-                      DP_NOTICE(p_hwfn,
+-                                "number of invalidate memory windows is different from bounded ones\n");
+-                      return -EINVAL;
+-              }
+       }
+       return 0;
+@@ -1010,7 +994,6 @@ int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
+                      enum qed_roce_qp_state prev_state,
+                      struct qed_rdma_modify_qp_in_params *params)
+ {
+-      u32 num_invalidated_mw = 0, num_bound_mw = 0;
+       int rc = 0;
+       /* Perform additional operations according to the current state and the
+@@ -1090,7 +1073,6 @@ int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
+               /* Send destroy responder ramrod */
+               rc = qed_roce_sp_destroy_qp_responder(p_hwfn,
+                                                     qp,
+-                                                    &num_invalidated_mw,
+                                                     &cq_prod);
+               if (rc)
+@@ -1098,14 +1080,7 @@ int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
+               qp->cq_prod = cq_prod;
+-              rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp,
+-                                                    &num_bound_mw);
+-
+-              if (num_invalidated_mw != num_bound_mw) {
+-                      DP_NOTICE(p_hwfn,
+-                                "number of invalidate memory windows is different from bounded ones\n");
+-                      return -EINVAL;
+-              }
++              rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp);
+       } else {
+               DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
+       }
+diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
+index 13c8ab1..0081fa6 100644
+--- a/include/linux/qed/common_hsi.h
++++ b/include/linux/qed/common_hsi.h
+@@ -109,8 +109,8 @@
+ #define MAX_NUM_LL2_TX_STATS_COUNTERS 48
+ #define FW_MAJOR_VERSION      8
+-#define FW_MINOR_VERSION      33
+-#define FW_REVISION_VERSION     11
++#define FW_MINOR_VERSION        37
++#define FW_REVISION_VERSION     2
+ #define FW_ENGINEERING_VERSION        0
+ /***********************/
+diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
+index 938df61..b34c573 100644
+--- a/include/linux/qed/iscsi_common.h
++++ b/include/linux/qed/iscsi_common.h
+@@ -799,8 +799,8 @@ struct e4_mstorm_iscsi_task_ag_ctx {
+ #define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT     0
+ #define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK         0x1
+ #define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT                4
+-#define E4_MSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK                 0x1
+-#define E4_MSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT                        5
++#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK   0x1
++#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT  5
+ #define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK                        0x1
+ #define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT                       6
+ #define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK    0x1
+@@ -849,8 +849,8 @@ struct e4_ustorm_iscsi_task_ag_ctx {
+ #define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT     0
+ #define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK         0x1
+ #define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT                4
+-#define E4_USTORM_ISCSI_TASK_AG_CTX_BIT1_MASK                 0x1
+-#define E4_USTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT                        5
++#define E4_USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK     0x1
++#define E4_USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT    5
+ #define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK                0x3
+ #define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT               6
+       u8 flags1;
+diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h
+index e05e320..df4d13f 100644
+--- a/include/linux/qed/qed_rdma_if.h
++++ b/include/linux/qed/qed_rdma_if.h
+@@ -65,8 +65,7 @@ enum qed_roce_qp_state {
+ enum qed_rdma_tid_type {
+       QED_RDMA_TID_REGISTERED_MR,
+       QED_RDMA_TID_FMR,
+-      QED_RDMA_TID_MW_TYPE1,
+-      QED_RDMA_TID_MW_TYPE2A
++      QED_RDMA_TID_MW
+ };
+ struct qed_rdma_events {
+@@ -280,7 +279,6 @@ struct qed_rdma_register_tid_in_params {
+       bool dif_enabled;
+       u64 dif_error_addr;
+-      u64 dif_runt_addr;
+ };
+ struct qed_rdma_create_cq_in_params {
+diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h
+index 193bcef3..473fba7 100644
+--- a/include/linux/qed/roce_common.h
++++ b/include/linux/qed/roce_common.h
+@@ -43,6 +43,7 @@
+ #define ROCE_MAX_QPS                  (32 * 1024)
+ #define ROCE_DCQCN_NP_MAX_QPS         (64)
+ #define ROCE_DCQCN_RP_MAX_QPS         (64)
++#define ROCE_LKEY_MW_DIF_EN_BIT               (28)
+ /* Affiliated asynchronous events / errors enumeration */
+ enum roce_async_events_type {
+-- 
+2.9.5
+
diff --git a/linux-next-cherry-picks/0034-RDMA-qedr-Fix-NULL-pointer-dereference-when-running-.patch b/linux-next-cherry-picks/0034-RDMA-qedr-Fix-NULL-pointer-dereference-when-running-.patch
new file mode 100644 (file)
index 0000000..9e31282
--- /dev/null
@@ -0,0 +1,41 @@
+From 425cf5c1350a98b81f3ddda160b99c3be613a213 Mon Sep 17 00:00:00 2001
+From: "Kalderon, Michal" <Michal.Kalderon@cavium.com>
+Date: Mon, 11 Jun 2018 10:20:20 +0300
+Subject: [PATCH 34/44] RDMA/qedr: Fix NULL pointer dereference when running
+ over iWARP without RDMA-CM
+
+Some RoCE specific code in qedr_modify_qp was run over an iWARP device
+when running perftest benchmarks without the -R option.
+
+The commit 3e44e0ee0893 ("IB/providers: Avoid null netdev check for RoCE")
+exposed this. Dropping the check for NULL pointer on ndev in
+qedr_modify_qp lead to a null pointer dereference when running over
+iWARP. Before the code would identify ndev as being NULL and return an
+error.
+
+Fixes: 3e44e0ee0893 ("IB/providers: Avoid null netdev check for RoCE")
+Signed-off-by: Ariel Elior <Ariel.Elior@cavium.com>
+Signed-off-by: Michal Kalderon <Michal.Kalderon@cavium.com>
+Reviewed-by: Parav Pandit <parav@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+---
+ drivers/infiniband/hw/qedr/verbs.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
+index 614a954..f9b1984 100644
+--- a/drivers/infiniband/hw/qedr/verbs.c
++++ b/drivers/infiniband/hw/qedr/verbs.c
+@@ -1957,6 +1957,9 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+       }
+       if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
++              if (rdma_protocol_iwarp(&dev->ibdev, 1))
++                      return -EINVAL;
++
+               if (attr_mask & IB_QP_PATH_MTU) {
+                       if (attr->path_mtu < IB_MTU_256 ||
+                           attr->path_mtu > IB_MTU_4096) {
+-- 
+2.9.5
+
diff --git a/linux-next-cherry-picks/0038-qed-Fix-possible-memory-leak-in-Rx-error-path-handli.patch b/linux-next-cherry-picks/0038-qed-Fix-possible-memory-leak-in-Rx-error-path-handli.patch
new file mode 100644 (file)
index 0000000..543324b
--- /dev/null
@@ -0,0 +1,57 @@
+From 4f9de4df901fb84709fe3a864dfa4eaf35700f68 Mon Sep 17 00:00:00 2001
+From: Sudarsana Reddy Kalluru <sudarsana.kalluru@cavium.com>
+Date: Mon, 18 Jun 2018 21:58:00 -0700
+Subject: [PATCH 38/44] qed: Fix possible memory leak in Rx error path
+ handling.
+
+Memory for packet buffers need to be freed in the error paths as there is
+no consumer (e.g., upper layer) for such packets and that memory will never
+get freed.
+The issue was uncovered when port was attacked with flood of isatap
+packets, these are multicast packets hence were directed at all the PFs.
+For foce PF, this meant they were routed to the ll2 module which in turn
+drops such packets.
+
+Fixes: 0a7fb11c ("qed: Add Light L2 support")
+Signed-off-by: Sudarsana Reddy Kalluru <Sudarsana.Kalluru@cavium.com>
+Signed-off-by: Ariel Elior <ariel.elior@cavium.com>
+Signed-off-by: Michal Kalderon <Michal.Kalderon@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/qlogic/qed/qed_ll2.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+index c97ebd6..012973d 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+@@ -201,8 +201,9 @@ void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data)
+       skb = build_skb(buffer->data, 0);
+       if (!skb) {
+-              rc = -ENOMEM;
+-              goto out_post;
++              DP_INFO(cdev, "Failed to build SKB\n");
++              kfree(buffer->data);
++              goto out_post1;
+       }
+       data->u.placement_offset += NET_SKB_PAD;
+@@ -224,8 +225,14 @@ void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data)
+               cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
+                                     data->opaque_data_0,
+                                     data->opaque_data_1);
++      } else {
++              DP_VERBOSE(p_hwfn, (NETIF_MSG_RX_STATUS | NETIF_MSG_PKTDATA |
++                                  QED_MSG_LL2 | QED_MSG_STORAGE),
++                         "Dropping the packet\n");
++              kfree(buffer->data);
+       }
++out_post1:
+       /* Update Buffer information and update FW producer */
+       buffer->data = new_data;
+       buffer->phys_addr = new_phys_addr;
+-- 
+2.9.5
+
diff --git a/linux-next-cherry-picks/0039-qed-Add-sanity-check-for-SIMD-fastpath-handler.patch b/linux-next-cherry-picks/0039-qed-Add-sanity-check-for-SIMD-fastpath-handler.patch
new file mode 100644 (file)
index 0000000..3b58ed5
--- /dev/null
@@ -0,0 +1,44 @@
+From 3935a70968820c3994db4de7e6e1c7e814bff875 Mon Sep 17 00:00:00 2001
+From: Sudarsana Reddy Kalluru <sudarsana.kalluru@cavium.com>
+Date: Mon, 18 Jun 2018 21:58:01 -0700
+Subject: [PATCH 39/44] qed: Add sanity check for SIMD fastpath handler.
+
+Avoid calling a SIMD fastpath handler if it is NULL. The check is needed
+to handle an unlikely scenario where unsolicited interrupt is destined to
+a PF in INTa mode.
+
+Fixes: fe56b9e6a ("qed: Add module with basic common support")
+Signed-off-by: Sudarsana Reddy Kalluru <Sudarsana.Kalluru@cavium.com>
+Signed-off-by: Ariel Elior <ariel.elior@cavium.com>
+Signed-off-by: Michal Kalderon <Michal.Kalderon@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/qlogic/qed/qed_main.c | 12 ++++++++++--
+ 1 file changed, 10 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
+index b04d57c..5c10fd7 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
+@@ -567,8 +567,16 @@ static irqreturn_t qed_single_int(int irq, void *dev_instance)
+               /* Fastpath interrupts */
+               for (j = 0; j < 64; j++) {
+                       if ((0x2ULL << j) & status) {
+-                              hwfn->simd_proto_handler[j].func(
+-                                      hwfn->simd_proto_handler[j].token);
++                              struct qed_simd_fp_handler *p_handler =
++                                      &hwfn->simd_proto_handler[j];
++
++                              if (p_handler->func)
++                                      p_handler->func(p_handler->token);
++                              else
++                                      DP_NOTICE(hwfn,
++                                                "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n",
++                                                j, status);
++
+                               status &= ~(0x2ULL << j);
+                               rc = IRQ_HANDLED;
+                       }
+-- 
+2.9.5
+
diff --git a/linux-next-cherry-picks/0040-qed-Do-not-advertise-DCBX_LLD_MANAGED-capability.patch b/linux-next-cherry-picks/0040-qed-Do-not-advertise-DCBX_LLD_MANAGED-capability.patch
new file mode 100644 (file)
index 0000000..b45aeac
--- /dev/null
@@ -0,0 +1,59 @@
+From ff54d5cd9ec15546abc870452dd0b66eef4b4606 Mon Sep 17 00:00:00 2001
+From: Sudarsana Reddy Kalluru <sudarsana.kalluru@cavium.com>
+Date: Mon, 18 Jun 2018 21:58:02 -0700
+Subject: [PATCH 40/44] qed: Do not advertise DCBX_LLD_MANAGED capability.
+
+Do not advertise DCBX_LLD_MANAGED capability i.e., do not allow
+external agent to manage the dcbx/lldp negotiation. MFW acts as lldp agent
+for qed* devices, and no other lldp agent is allowed to coexist with mfw.
+
+Also updated a debug print, to not to display the redundant info.
+
+Fixes: a1d8d8a51 ("qed: Add dcbnl support.")
+Signed-off-by: Sudarsana Reddy Kalluru <Sudarsana.Kalluru@cavium.com>
+Signed-off-by: Ariel Elior <ariel.elior@cavium.com>
+Signed-off-by: Michal Kalderon <Michal.Kalderon@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/qlogic/qed/qed_dcbx.c | 11 ++++-------
+ 1 file changed, 4 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+index 8f31406..f0b0138 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+@@ -255,9 +255,8 @@ qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn,
+               *type = DCBX_PROTOCOL_ROCE_V2;
+       } else {
+               *type = DCBX_MAX_PROTOCOL_TYPE;
+-              DP_ERR(p_hwfn,
+-                     "No action required, App TLV id = 0x%x app_prio_bitmap = 0x%x\n",
+-                     id, app_prio_bitmap);
++              DP_ERR(p_hwfn, "No action required, App TLV entry = 0x%x\n",
++                     app_prio_bitmap);
+               return false;
+       }
+@@ -1479,8 +1478,8 @@ static u8 qed_dcbnl_getcap(struct qed_dev *cdev, int capid, u8 *cap)
+               *cap = 0x80;
+               break;
+       case DCB_CAP_ATTR_DCBX:
+-              *cap = (DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_CEE |
+-                      DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_STATIC);
++              *cap = (DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_VER_IEEE |
++                      DCB_CAP_DCBX_STATIC);
+               break;
+       default:
+               *cap = false;
+@@ -1548,8 +1547,6 @@ static u8 qed_dcbnl_getdcbx(struct qed_dev *cdev)
+       if (!dcbx_info)
+               return 0;
+-      if (dcbx_info->operational.enabled)
+-              mode |= DCB_CAP_DCBX_LLD_MANAGED;
+       if (dcbx_info->operational.ieee)
+               mode |= DCB_CAP_DCBX_VER_IEEE;
+       if (dcbx_info->operational.cee)
+-- 
+2.9.5
+
diff --git a/linux-next-cherry-picks/0041-qed-Limit-msix-vectors-in-kdump-kernel-to-the-minimu.patch b/linux-next-cherry-picks/0041-qed-Limit-msix-vectors-in-kdump-kernel-to-the-minimu.patch
new file mode 100644 (file)
index 0000000..52e1306
--- /dev/null
@@ -0,0 +1,42 @@
+From bb7858ba1102f82470a917e041fd23e6385c31be Mon Sep 17 00:00:00 2001
+From: Sudarsana Reddy Kalluru <sudarsana.kalluru@cavium.com>
+Date: Sun, 1 Jul 2018 20:03:05 -0700
+Subject: [PATCH 41/44] qed: Limit msix vectors in kdump kernel to the minimum
+ required count.
+
+Memory size is limited in the kdump kernel environment. Allocation of more
+msix-vectors (or queues) consumes few tens of MBs of memory, which might
+lead to the kdump kernel failure.
+This patch adds changes to limit the number of MSI-X vectors in kdump
+kernel to minimum required value (i.e., 2 per engine).
+
+Fixes: fe56b9e6a ("qed: Add module with basic common support")
+Signed-off-by: Sudarsana Reddy Kalluru <Sudarsana.Kalluru@cavium.com>
+Signed-off-by: Michal Kalderon <Michal.Kalderon@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/qlogic/qed/qed_main.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
+index 5c10fd7..0cbc74d 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
+@@ -789,6 +789,14 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
+       /* We want a minimum of one slowpath and one fastpath vector per hwfn */
+       cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
++      if (is_kdump_kernel()) {
++              DP_INFO(cdev,
++                      "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
++                      cdev->int_params.in.min_msix_cnt);
++              cdev->int_params.in.num_vectors =
++                      cdev->int_params.in.min_msix_cnt;
++      }
++
+       rc = qed_set_int_mode(cdev, false);
+       if (rc)  {
+               DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
+-- 
+2.9.5
+
diff --git a/linux-next-cherry-picks/0042-qed-Fix-setting-of-incorrect-eswitch-mode.patch b/linux-next-cherry-picks/0042-qed-Fix-setting-of-incorrect-eswitch-mode.patch
new file mode 100644 (file)
index 0000000..fa1b370
--- /dev/null
@@ -0,0 +1,79 @@
+From 538f8d00ba8bb417c4d9e76c61dee59d812d8287 Mon Sep 17 00:00:00 2001
+From: Sudarsana Reddy Kalluru <sudarsana.kalluru@cavium.com>
+Date: Sun, 1 Jul 2018 20:03:06 -0700
+Subject: [PATCH 42/44] qed: Fix setting of incorrect eswitch mode.
+
+By default, driver sets the eswitch mode incorrectly as VEB (virtual
+Ethernet bridging).
+Need to set VEB eswitch mode only when sriov is enabled, and it should be
+to set NONE by default. The patch incorporates this change.
+
+Fixes: 0fefbfbaa ("qed*: Management firmware - notifications and defaults")
+Signed-off-by: Sudarsana Reddy Kalluru <Sudarsana.Kalluru@cavium.com>
+Signed-off-by: Michal Kalderon <Michal.Kalderon@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/qlogic/qed/qed_dev.c   |  2 +-
+ drivers/net/ethernet/qlogic/qed/qed_sriov.c | 19 +++++++++++++++++--
+ 2 files changed, 18 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
+index 329781c..e5249b4 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
+@@ -1804,7 +1804,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
+                       DP_INFO(p_hwfn, "Failed to update driver state\n");
+               rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt,
+-                                             QED_OV_ESWITCH_VEB);
++                                             QED_OV_ESWITCH_NONE);
+               if (rc)
+                       DP_INFO(p_hwfn, "Failed to update eswitch mode\n");
+       }
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+index f01bf52..fd59cf4 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+@@ -4513,6 +4513,8 @@ static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn,
+ static int qed_sriov_enable(struct qed_dev *cdev, int num)
+ {
+       struct qed_iov_vf_init_params params;
++      struct qed_hwfn *hwfn;
++      struct qed_ptt *ptt;
+       int i, j, rc;
+       if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
+@@ -4525,8 +4527,8 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num)
+       /* Initialize HW for VF access */
+       for_each_hwfn(cdev, j) {
+-              struct qed_hwfn *hwfn = &cdev->hwfns[j];
+-              struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
++              hwfn = &cdev->hwfns[j];
++              ptt = qed_ptt_acquire(hwfn);
+               /* Make sure not to use more than 16 queues per VF */
+               params.num_queues = min_t(int,
+@@ -4562,6 +4564,19 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num)
+               goto err;
+       }
++      hwfn = QED_LEADING_HWFN(cdev);
++      ptt = qed_ptt_acquire(hwfn);
++      if (!ptt) {
++              DP_ERR(hwfn, "Failed to acquire ptt\n");
++              rc = -EBUSY;
++              goto err;
++      }
++
++      rc = qed_mcp_ov_update_eswitch(hwfn, ptt, QED_OV_ESWITCH_VEB);
++      if (rc)
++              DP_INFO(cdev, "Failed to update eswitch mode\n");
++      qed_ptt_release(hwfn, ptt);
++
+       return num;
+ err:
+-- 
+2.9.5
+
diff --git a/linux-next-cherry-picks/0043-qed-Fix-use-of-incorrect-size-in-memcpy-call.patch b/linux-next-cherry-picks/0043-qed-Fix-use-of-incorrect-size-in-memcpy-call.patch
new file mode 100644 (file)
index 0000000..0b31375
--- /dev/null
@@ -0,0 +1,46 @@
+From cc9b27cdf7bd3c86df73439758ac1564bc8f5bbe Mon Sep 17 00:00:00 2001
+From: Sudarsana Reddy Kalluru <sudarsana.kalluru@cavium.com>
+Date: Sun, 1 Jul 2018 20:03:07 -0700
+Subject: [PATCH 43/44] qed: Fix use of incorrect size in memcpy call.
+
+Use the correct size value while copying chassis/port id values.
+
+Fixes: 6ad8c632e ("qed: Add support for query/config dcbx.")
+Signed-off-by: Sudarsana Reddy Kalluru <Sudarsana.Kalluru@cavium.com>
+Signed-off-by: Michal Kalderon <Michal.Kalderon@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/qlogic/qed/qed_dcbx.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+index f0b0138..e0680ce9 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+@@ -709,9 +709,9 @@ qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn,
+       p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE];
+       memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id,
+-             ARRAY_SIZE(p_local->local_chassis_id));
++             sizeof(p_local->local_chassis_id));
+       memcpy(params->lldp_local.local_port_id, p_local->local_port_id,
+-             ARRAY_SIZE(p_local->local_port_id));
++             sizeof(p_local->local_port_id));
+ }
+ static void
+@@ -723,9 +723,9 @@ qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn,
+       p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE];
+       memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id,
+-             ARRAY_SIZE(p_remote->peer_chassis_id));
++             sizeof(p_remote->peer_chassis_id));
+       memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id,
+-             ARRAY_SIZE(p_remote->peer_port_id));
++             sizeof(p_remote->peer_port_id));
+ }
+ static int
+-- 
+2.9.5
+
diff --git a/linux-next-cherry-picks/0044-qede-Adverstise-software-timestamp-caps-when-PHC-is-.patch b/linux-next-cherry-picks/0044-qede-Adverstise-software-timestamp-caps-when-PHC-is-.patch
new file mode 100644 (file)
index 0000000..38d2066
--- /dev/null
@@ -0,0 +1,42 @@
+From 82a4e71b1565dea8387f54503e806cf374e779ec Mon Sep 17 00:00:00 2001
+From: Sudarsana Reddy Kalluru <sudarsana.kalluru@cavium.com>
+Date: Sun, 1 Jul 2018 20:03:08 -0700
+Subject: [PATCH 44/44] qede: Adverstise software timestamp caps when PHC is
+ not available.
+
+When ptp clock is not available for a PF (e.g., higher PFs in NPAR mode),
+get-tsinfo() callback should return the software timestamp capabilities
+instead of returning the error.
+
+Fixes: 4c55215c ("qede: Add driver support for PTP")
+Signed-off-by: Sudarsana Reddy Kalluru <Sudarsana.Kalluru@cavium.com>
+Signed-off-by: Michal Kalderon <Michal.Kalderon@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/qlogic/qede/qede_ptp.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+index 02adb513..013ff56 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+@@ -337,8 +337,14 @@ int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info)
+ {
+       struct qede_ptp *ptp = edev->ptp;
+-      if (!ptp)
+-              return -EIO;
++      if (!ptp) {
++              info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
++                                      SOF_TIMESTAMPING_RX_SOFTWARE |
++                                      SOF_TIMESTAMPING_SOFTWARE;
++              info->phc_index = -1;
++
++              return 0;
++      }
+       info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+                               SOF_TIMESTAMPING_RX_SOFTWARE |
+-- 
+2.9.5
+