]> git.openfabrics.org - compat-rdma/linux-4.8.git/commitdiff
Add qedr
authorRam Amrani <Ram.Amrani@cavium.com>
Wed, 2 Aug 2017 08:50:39 +0000 (11:50 +0300)
committerVladimir Sokolovsky <vlad@mellanox.com>
Mon, 7 Aug 2017 15:08:13 +0000 (18:08 +0300)
Add all of the qedr patches from linux-next v4.8 until:
e4917d46a653 qede: Add aRFS support

All qed and qede patches along the way were updated as well as
qedr must be synchronized with them. Still these patches were
dropped:
054c67d1c82a qed*: Add support for ethtool link_ksettings callbacks.
16d5946a7c96 qede: Fix forcing high speeds
d7455f6e4450 qede: Decouple ethtool caps from qe

92 files changed:
drivers/infiniband/Kconfig
drivers/infiniband/hw/Makefile
drivers/infiniband/hw/qedr/Kconfig [new file with mode: 0644]
drivers/infiniband/hw/qedr/Makefile [new file with mode: 0644]
drivers/infiniband/hw/qedr/main.c [new file with mode: 0644]
drivers/infiniband/hw/qedr/qedr.h [new file with mode: 0644]
drivers/infiniband/hw/qedr/qedr_cm.c [new file with mode: 0644]
drivers/infiniband/hw/qedr/qedr_cm.h [new file with mode: 0644]
drivers/infiniband/hw/qedr/qedr_hsi_rdma.h [new file with mode: 0644]
drivers/infiniband/hw/qedr/verbs.c [new file with mode: 0644]
drivers/infiniband/hw/qedr/verbs.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/Kconfig
drivers/net/ethernet/qlogic/qed/Makefile
drivers/net/ethernet/qlogic/qed/qed.h
drivers/net/ethernet/qlogic/qed/qed_cxt.c
drivers/net/ethernet/qlogic/qed/qed_cxt.h
drivers/net/ethernet/qlogic/qed/qed_dcbx.c
drivers/net/ethernet/qlogic/qed/qed_dcbx.h
drivers/net/ethernet/qlogic/qed/qed_debug.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_debug.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_dev.c
drivers/net/ethernet/qlogic/qed/qed_dev_api.h
drivers/net/ethernet/qlogic/qed/qed_fcoe.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_fcoe.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_hsi.h
drivers/net/ethernet/qlogic/qed/qed_hw.c
drivers/net/ethernet/qlogic/qed/qed_hw.h
drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
drivers/net/ethernet/qlogic/qed/qed_init_ops.c
drivers/net/ethernet/qlogic/qed/qed_init_ops.h
drivers/net/ethernet/qlogic/qed/qed_int.c
drivers/net/ethernet/qlogic/qed/qed_int.h
drivers/net/ethernet/qlogic/qed/qed_iscsi.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_iscsi.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_l2.c
drivers/net/ethernet/qlogic/qed/qed_l2.h
drivers/net/ethernet/qlogic/qed/qed_ll2.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_ll2.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_main.c
drivers/net/ethernet/qlogic/qed/qed_mcp.c
drivers/net/ethernet/qlogic/qed/qed_mcp.h
drivers/net/ethernet/qlogic/qed/qed_ooo.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_ooo.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_ptp.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_ptp.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
drivers/net/ethernet/qlogic/qed/qed_roce.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_roce.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_selftest.c
drivers/net/ethernet/qlogic/qed/qed_selftest.h
drivers/net/ethernet/qlogic/qed/qed_sp.h
drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
drivers/net/ethernet/qlogic/qed/qed_spq.c
drivers/net/ethernet/qlogic/qed/qed_sriov.c
drivers/net/ethernet/qlogic/qed/qed_sriov.h
drivers/net/ethernet/qlogic/qed/qed_vf.c
drivers/net/ethernet/qlogic/qed/qed_vf.h
drivers/net/ethernet/qlogic/qede/Makefile
drivers/net/ethernet/qlogic/qede/qede.h
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
drivers/net/ethernet/qlogic/qede/qede_filter.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qede/qede_fp.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qede/qede_main.c
drivers/net/ethernet/qlogic/qede/qede_ptp.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qede/qede_ptp.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qede/qede_roce.c [new file with mode: 0644]
drivers/scsi/qedf/drv_fcoe_fw_funcs.c [new file with mode: 0644]
drivers/scsi/qedf/drv_fcoe_fw_funcs.h [new file with mode: 0644]
drivers/scsi/qedf/drv_scsi_fw_funcs.c [new file with mode: 0644]
drivers/scsi/qedf/drv_scsi_fw_funcs.h [new file with mode: 0644]
drivers/scsi/qedi/qedi_fw_api.c [new file with mode: 0644]
drivers/scsi/qedi/qedi_fw_iscsi.h [new file with mode: 0644]
drivers/scsi/qedi/qedi_fw_scsi.h [new file with mode: 0644]
include/linux/qed/common_hsi.h
include/linux/qed/eth_common.h
include/linux/qed/fcoe_common.h [new file with mode: 0644]
include/linux/qed/iscsi_common.h
include/linux/qed/qed_chain.h
include/linux/qed/qed_eth_if.h
include/linux/qed/qed_fcoe_if.h [new file with mode: 0644]
include/linux/qed/qed_if.h
include/linux/qed/qed_iov_if.h
include/linux/qed/qed_iscsi_if.h [new file with mode: 0644]
include/linux/qed/qed_ll2_if.h [new file with mode: 0644]
include/linux/qed/qed_roce_if.h [new file with mode: 0644]
include/linux/qed/qede_roce.h [new file with mode: 0644]
include/linux/qed/rdma_common.h
include/linux/qed/roce_common.h
include/linux/qed/storage_common.h
include/linux/qed/tcp_common.h
include/uapi/rdma/Kbuild
include/uapi/rdma/qedr-abi.h [new file with mode: 0644]

index e9b7dc037ff8774d83be33b9410ffde653304fd5..77ab0f306e8d3c4ddad871b0b56a303597218bd6 100644 (file)
@@ -88,4 +88,6 @@ source "drivers/infiniband/sw/rxe/Kconfig"
 
 source "drivers/infiniband/hw/hfi1/Kconfig"
 
+source "drivers/infiniband/hw/qedr/Kconfig"
+
 endif # INFINIBAND
index c0c7cf8af3f4cc22e524fbe37de0dbd7d1cc20da..c51e4643d06683500b38b47d4afd515308b0b9d4 100644 (file)
@@ -9,3 +9,4 @@ obj-$(CONFIG_INFINIBAND_NES)            += nes/
 obj-$(CONFIG_INFINIBAND_OCRDMA)                += ocrdma/
 obj-$(CONFIG_INFINIBAND_USNIC)         += usnic/
 obj-$(CONFIG_INFINIBAND_HFI1)          += hfi1/
+obj-$(CONFIG_INFINIBAND_QEDR)          += qedr/
diff --git a/drivers/infiniband/hw/qedr/Kconfig b/drivers/infiniband/hw/qedr/Kconfig
new file mode 100644 (file)
index 0000000..7c06d85
--- /dev/null
@@ -0,0 +1,7 @@
+config INFINIBAND_QEDR
+       tristate "QLogic RoCE driver"
+       depends on 64BIT && QEDE
+       select QED_LL2
+       ---help---
+         This driver provides low-level InfiniBand over Ethernet
+         support for QLogic QED host channel adapters (HCAs).
diff --git a/drivers/infiniband/hw/qedr/Makefile b/drivers/infiniband/hw/qedr/Makefile
new file mode 100644 (file)
index 0000000..ba7067c
--- /dev/null
@@ -0,0 +1,3 @@
+obj-$(CONFIG_INFINIBAND_QEDR) := qedr.o
+
+qedr-y := main.o verbs.o qedr_cm.o
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
new file mode 100644 (file)
index 0000000..ced0461
--- /dev/null
@@ -0,0 +1,920 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/module.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_user_verbs.h>
+#include <linux/netdevice.h>
+#include <linux/iommu.h>
+#include <net/addrconf.h>
+#include <linux/qed/qede_roce.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_if.h>
+#include "qedr.h"
+#include "verbs.h"
+#include <rdma/qedr-abi.h>
+
+MODULE_DESCRIPTION("QLogic 40G/100G ROCE Driver");
+MODULE_AUTHOR("QLogic Corporation");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(QEDR_MODULE_VERSION);
+
+#define QEDR_WQ_MULTIPLIER_DFT (3)
+
+void qedr_ib_dispatch_event(struct qedr_dev *dev, u8 port_num,
+                           enum ib_event_type type)
+{
+       struct ib_event ibev;
+
+       ibev.device = &dev->ibdev;
+       ibev.element.port_num = port_num;
+       ibev.event = type;
+
+       ib_dispatch_event(&ibev);
+}
+
+static enum rdma_link_layer qedr_link_layer(struct ib_device *device,
+                                           u8 port_num)
+{
+       return IB_LINK_LAYER_ETHERNET;
+}
+
+static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str,
+                               size_t str_len)
+{
+       struct qedr_dev *qedr = get_qedr_dev(ibdev);
+       u32 fw_ver = (u32)qedr->attr.fw_ver;
+
+       snprintf(str, str_len, "%d. %d. %d. %d",
+                (fw_ver >> 24) & 0xFF, (fw_ver >> 16) & 0xFF,
+                (fw_ver >> 8) & 0xFF, fw_ver & 0xFF);
+}
+
+static struct net_device *qedr_get_netdev(struct ib_device *dev, u8 port_num)
+{
+       struct qedr_dev *qdev;
+
+       qdev = get_qedr_dev(dev);
+       dev_hold(qdev->ndev);
+
+       /* The HW vendor's device driver must guarantee
+        * that this function returns NULL before the net device reaches
+        * NETDEV_UNREGISTER_FINAL state.
+        */
+       return qdev->ndev;
+}
+
+static int qedr_register_device(struct qedr_dev *dev)
+{
+       strlcpy(dev->ibdev.name, "qedr%d", IB_DEVICE_NAME_MAX);
+
+       dev->ibdev.node_guid = dev->attr.node_guid;
+       memcpy(dev->ibdev.node_desc, QEDR_NODE_DESC, sizeof(QEDR_NODE_DESC));
+       dev->ibdev.owner = THIS_MODULE;
+       dev->ibdev.uverbs_abi_ver = QEDR_ABI_VERSION;
+
+       dev->ibdev.uverbs_cmd_mask = QEDR_UVERBS(GET_CONTEXT) |
+                                    QEDR_UVERBS(QUERY_DEVICE) |
+                                    QEDR_UVERBS(QUERY_PORT) |
+                                    QEDR_UVERBS(ALLOC_PD) |
+                                    QEDR_UVERBS(DEALLOC_PD) |
+                                    QEDR_UVERBS(CREATE_COMP_CHANNEL) |
+                                    QEDR_UVERBS(CREATE_CQ) |
+                                    QEDR_UVERBS(RESIZE_CQ) |
+                                    QEDR_UVERBS(DESTROY_CQ) |
+                                    QEDR_UVERBS(REQ_NOTIFY_CQ) |
+                                    QEDR_UVERBS(CREATE_QP) |
+                                    QEDR_UVERBS(MODIFY_QP) |
+                                    QEDR_UVERBS(QUERY_QP) |
+                                    QEDR_UVERBS(DESTROY_QP) |
+                                    QEDR_UVERBS(REG_MR) |
+                                    QEDR_UVERBS(DEREG_MR) |
+                                    QEDR_UVERBS(POLL_CQ) |
+                                    QEDR_UVERBS(POST_SEND) |
+                                    QEDR_UVERBS(POST_RECV);
+
+       dev->ibdev.phys_port_cnt = 1;
+       dev->ibdev.num_comp_vectors = dev->num_cnq;
+       dev->ibdev.node_type = RDMA_NODE_IB_CA;
+
+       dev->ibdev.query_device = qedr_query_device;
+       dev->ibdev.query_port = qedr_query_port;
+       dev->ibdev.modify_port = qedr_modify_port;
+
+       dev->ibdev.query_gid = qedr_query_gid;
+       dev->ibdev.add_gid = qedr_add_gid;
+       dev->ibdev.del_gid = qedr_del_gid;
+
+       dev->ibdev.alloc_ucontext = qedr_alloc_ucontext;
+       dev->ibdev.dealloc_ucontext = qedr_dealloc_ucontext;
+       dev->ibdev.mmap = qedr_mmap;
+
+       dev->ibdev.alloc_pd = qedr_alloc_pd;
+       dev->ibdev.dealloc_pd = qedr_dealloc_pd;
+
+       dev->ibdev.create_cq = qedr_create_cq;
+       dev->ibdev.destroy_cq = qedr_destroy_cq;
+       dev->ibdev.resize_cq = qedr_resize_cq;
+       dev->ibdev.req_notify_cq = qedr_arm_cq;
+
+       dev->ibdev.create_qp = qedr_create_qp;
+       dev->ibdev.modify_qp = qedr_modify_qp;
+       dev->ibdev.query_qp = qedr_query_qp;
+       dev->ibdev.destroy_qp = qedr_destroy_qp;
+
+       dev->ibdev.query_pkey = qedr_query_pkey;
+
+       dev->ibdev.create_ah = qedr_create_ah;
+       dev->ibdev.destroy_ah = qedr_destroy_ah;
+
+       dev->ibdev.get_dma_mr = qedr_get_dma_mr;
+       dev->ibdev.dereg_mr = qedr_dereg_mr;
+       dev->ibdev.reg_user_mr = qedr_reg_user_mr;
+       dev->ibdev.alloc_mr = qedr_alloc_mr;
+       dev->ibdev.map_mr_sg = qedr_map_mr_sg;
+
+       dev->ibdev.poll_cq = qedr_poll_cq;
+       dev->ibdev.post_send = qedr_post_send;
+       dev->ibdev.post_recv = qedr_post_recv;
+
+       dev->ibdev.process_mad = qedr_process_mad;
+       dev->ibdev.get_port_immutable = qedr_port_immutable;
+       dev->ibdev.get_netdev = qedr_get_netdev;
+
+       dev->ibdev.dev.parent = &dev->pdev->dev;
+
+       dev->ibdev.get_link_layer = qedr_link_layer;
+       dev->ibdev.get_dev_fw_str = qedr_get_dev_fw_str;
+
+       return ib_register_device(&dev->ibdev, NULL);
+}
+
+/* This function allocates fast-path status block memory */
+static int qedr_alloc_mem_sb(struct qedr_dev *dev,
+                            struct qed_sb_info *sb_info, u16 sb_id)
+{
+       struct status_block *sb_virt;
+       dma_addr_t sb_phys;
+       int rc;
+
+       sb_virt = dma_alloc_coherent(&dev->pdev->dev,
+                                    sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
+       if (!sb_virt)
+               return -ENOMEM;
+
+       rc = dev->ops->common->sb_init(dev->cdev, sb_info,
+                                      sb_virt, sb_phys, sb_id,
+                                      QED_SB_TYPE_CNQ);
+       if (rc) {
+               pr_err("Status block initialization failed\n");
+               dma_free_coherent(&dev->pdev->dev, sizeof(*sb_virt),
+                                 sb_virt, sb_phys);
+               return rc;
+       }
+
+       return 0;
+}
+
+static void qedr_free_mem_sb(struct qedr_dev *dev,
+                            struct qed_sb_info *sb_info, int sb_id)
+{
+       if (sb_info->sb_virt) {
+               dev->ops->common->sb_release(dev->cdev, sb_info, sb_id);
+               dma_free_coherent(&dev->pdev->dev, sizeof(*sb_info->sb_virt),
+                                 (void *)sb_info->sb_virt, sb_info->sb_phys);
+       }
+}
+
+static void qedr_free_resources(struct qedr_dev *dev)
+{
+       int i;
+
+       for (i = 0; i < dev->num_cnq; i++) {
+               qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
+               dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
+       }
+
+       kfree(dev->cnq_array);
+       kfree(dev->sb_array);
+       kfree(dev->sgid_tbl);
+}
+
+static int qedr_alloc_resources(struct qedr_dev *dev)
+{
+       struct qedr_cnq *cnq;
+       __le16 *cons_pi;
+       u16 n_entries;
+       int i, rc;
+
+       dev->sgid_tbl = kzalloc(sizeof(union ib_gid) *
+                               QEDR_MAX_SGID, GFP_KERNEL);
+       if (!dev->sgid_tbl)
+               return -ENOMEM;
+
+       spin_lock_init(&dev->sgid_lock);
+
+       /* Allocate Status blocks for CNQ */
+       dev->sb_array = kcalloc(dev->num_cnq, sizeof(*dev->sb_array),
+                               GFP_KERNEL);
+       if (!dev->sb_array) {
+               rc = -ENOMEM;
+               goto err1;
+       }
+
+       dev->cnq_array = kcalloc(dev->num_cnq,
+                                sizeof(*dev->cnq_array), GFP_KERNEL);
+       if (!dev->cnq_array) {
+               rc = -ENOMEM;
+               goto err2;
+       }
+
+       dev->sb_start = dev->ops->rdma_get_start_sb(dev->cdev);
+
+       /* Allocate CNQ PBLs */
+       n_entries = min_t(u32, QED_RDMA_MAX_CNQ_SIZE, QEDR_ROCE_MAX_CNQ_SIZE);
+       for (i = 0; i < dev->num_cnq; i++) {
+               cnq = &dev->cnq_array[i];
+
+               rc = qedr_alloc_mem_sb(dev, &dev->sb_array[i],
+                                      dev->sb_start + i);
+               if (rc)
+                       goto err3;
+
+               rc = dev->ops->common->chain_alloc(dev->cdev,
+                                                  QED_CHAIN_USE_TO_CONSUME,
+                                                  QED_CHAIN_MODE_PBL,
+                                                  QED_CHAIN_CNT_TYPE_U16,
+                                                  n_entries,
+                                                  sizeof(struct regpair *),
+                                                  &cnq->pbl);
+               if (rc)
+                       goto err4;
+
+               cnq->dev = dev;
+               cnq->sb = &dev->sb_array[i];
+               cons_pi = dev->sb_array[i].sb_virt->pi_array;
+               cnq->hw_cons_ptr = &cons_pi[QED_ROCE_PROTOCOL_INDEX];
+               cnq->index = i;
+               sprintf(cnq->name, "qedr%d@pci:%s", i, pci_name(dev->pdev));
+
+               DP_DEBUG(dev, QEDR_MSG_INIT, "cnq[%d].cons=%d\n",
+                        i, qed_chain_get_cons_idx(&cnq->pbl));
+       }
+
+       return 0;
+err4:
+       qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
+err3:
+       for (--i; i >= 0; i--) {
+               dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
+               qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
+       }
+       kfree(dev->cnq_array);
+err2:
+       kfree(dev->sb_array);
+err1:
+       kfree(dev->sgid_tbl);
+       return rc;
+}
+
+/* QEDR sysfs interface */
+static ssize_t show_rev(struct device *device, struct device_attribute *attr,
+                       char *buf)
+{
+       struct qedr_dev *dev = dev_get_drvdata(device);
+
+       return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->pdev->vendor);
+}
+
+static ssize_t show_hca_type(struct device *device,
+                            struct device_attribute *attr, char *buf)
+{
+       return scnprintf(buf, PAGE_SIZE, "%s\n", "HCA_TYPE_TO_SET");
+}
+
+static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
+static DEVICE_ATTR(hca_type, S_IRUGO, show_hca_type, NULL);
+
+static struct device_attribute *qedr_attributes[] = {
+       &dev_attr_hw_rev,
+       &dev_attr_hca_type
+};
+
+static void qedr_remove_sysfiles(struct qedr_dev *dev)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++)
+               device_remove_file(&dev->ibdev.dev, qedr_attributes[i]);
+}
+
+static void qedr_pci_set_atomic(struct qedr_dev *dev, struct pci_dev *pdev)
+{
+       struct pci_dev *bridge;
+       u32 val;
+
+       dev->atomic_cap = IB_ATOMIC_NONE;
+
+       bridge = pdev->bus->self;
+       if (!bridge)
+               return;
+
+       /* Check whether we are connected directly or via a switch */
+       while (bridge && bridge->bus->parent) {
+               DP_DEBUG(dev, QEDR_MSG_INIT,
+                        "Device is not connected directly to root. bridge->bus->number=%d primary=%d\n",
+                        bridge->bus->number, bridge->bus->primary);
+               /* Need to check Atomic Op Routing Supported all the way to
+                * root complex.
+                */
+               pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &val);
+               if (!(val & PCI_EXP_DEVCAP2_ATOMIC_ROUTE)) {
+                       pcie_capability_clear_word(pdev,
+                                                  PCI_EXP_DEVCTL2,
+                                                  PCI_EXP_DEVCTL2_ATOMIC_REQ);
+                       return;
+               }
+               bridge = bridge->bus->parent->self;
+       }
+       bridge = pdev->bus->self;
+
+       /* according to bridge capability */
+       pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &val);
+       if (val & PCI_EXP_DEVCAP2_ATOMIC_COMP64) {
+               pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
+                                        PCI_EXP_DEVCTL2_ATOMIC_REQ);
+               dev->atomic_cap = IB_ATOMIC_GLOB;
+       } else {
+               pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL2,
+                                          PCI_EXP_DEVCTL2_ATOMIC_REQ);
+       }
+}
+
+static const struct qed_rdma_ops *qed_ops;
+
+#define HILO_U64(hi, lo)               ((((u64)(hi)) << 32) + (lo))
+
+static irqreturn_t qedr_irq_handler(int irq, void *handle)
+{
+       u16 hw_comp_cons, sw_comp_cons;
+       struct qedr_cnq *cnq = handle;
+       struct regpair *cq_handle;
+       struct qedr_cq *cq;
+
+       qed_sb_ack(cnq->sb, IGU_INT_DISABLE, 0);
+
+       qed_sb_update_sb_idx(cnq->sb);
+
+       hw_comp_cons = le16_to_cpu(*cnq->hw_cons_ptr);
+       sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
+
+       /* Align protocol-index and chain reads */
+       rmb();
+
+       while (sw_comp_cons != hw_comp_cons) {
+               cq_handle = (struct regpair *)qed_chain_consume(&cnq->pbl);
+               cq = (struct qedr_cq *)(uintptr_t)HILO_U64(cq_handle->hi,
+                               cq_handle->lo);
+
+               if (cq == NULL) {
+                       DP_ERR(cnq->dev,
+                              "Received NULL CQ cq_handle->hi=%d cq_handle->lo=%d sw_comp_cons=%d hw_comp_cons=%d\n",
+                              cq_handle->hi, cq_handle->lo, sw_comp_cons,
+                              hw_comp_cons);
+
+                       break;
+               }
+
+               if (cq->sig != QEDR_CQ_MAGIC_NUMBER) {
+                       DP_ERR(cnq->dev,
+                              "Problem with cq signature, cq_handle->hi=%d ch_handle->lo=%d cq=%p\n",
+                              cq_handle->hi, cq_handle->lo, cq);
+                       break;
+               }
+
+               cq->arm_flags = 0;
+
+               if (cq->ibcq.comp_handler)
+                       (*cq->ibcq.comp_handler)
+                               (&cq->ibcq, cq->ibcq.cq_context);
+
+               sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
+
+               cnq->n_comp++;
+
+       }
+
+       qed_ops->rdma_cnq_prod_update(cnq->dev->rdma_ctx, cnq->index,
+                                     sw_comp_cons);
+
+       qed_sb_ack(cnq->sb, IGU_INT_ENABLE, 1);
+
+       return IRQ_HANDLED;
+}
+
+static void qedr_sync_free_irqs(struct qedr_dev *dev)
+{
+       u32 vector;
+       int i;
+
+       for (i = 0; i < dev->int_info.used_cnt; i++) {
+               if (dev->int_info.msix_cnt) {
+                       vector = dev->int_info.msix[i * dev->num_hwfns].vector;
+                       synchronize_irq(vector);
+                       free_irq(vector, &dev->cnq_array[i]);
+               }
+       }
+
+       dev->int_info.used_cnt = 0;
+}
+
+static int qedr_req_msix_irqs(struct qedr_dev *dev)
+{
+       int i, rc = 0;
+
+       if (dev->num_cnq > dev->int_info.msix_cnt) {
+               DP_ERR(dev,
+                      "Interrupt mismatch: %d CNQ queues > %d MSI-x vectors\n",
+                      dev->num_cnq, dev->int_info.msix_cnt);
+               return -EINVAL;
+       }
+
+       for (i = 0; i < dev->num_cnq; i++) {
+               rc = request_irq(dev->int_info.msix[i * dev->num_hwfns].vector,
+                                qedr_irq_handler, 0, dev->cnq_array[i].name,
+                                &dev->cnq_array[i]);
+               if (rc) {
+                       DP_ERR(dev, "Request cnq %d irq failed\n", i);
+                       qedr_sync_free_irqs(dev);
+               } else {
+                       DP_DEBUG(dev, QEDR_MSG_INIT,
+                                "Requested cnq irq for %s [entry %d]. Cookie is at %p\n",
+                                dev->cnq_array[i].name, i,
+                                &dev->cnq_array[i]);
+                       dev->int_info.used_cnt++;
+               }
+       }
+
+       return rc;
+}
+
+static int qedr_setup_irqs(struct qedr_dev *dev)
+{
+       int rc;
+
+       DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs\n");
+
+       /* Learn Interrupt configuration */
+       rc = dev->ops->rdma_set_rdma_int(dev->cdev, dev->num_cnq);
+       if (rc < 0)
+               return rc;
+
+       rc = dev->ops->rdma_get_rdma_int(dev->cdev, &dev->int_info);
+       if (rc) {
+               DP_DEBUG(dev, QEDR_MSG_INIT, "get_rdma_int failed\n");
+               return rc;
+       }
+
+       if (dev->int_info.msix_cnt) {
+               DP_DEBUG(dev, QEDR_MSG_INIT, "rdma msix_cnt = %d\n",
+                        dev->int_info.msix_cnt);
+               rc = qedr_req_msix_irqs(dev);
+               if (rc)
+                       return rc;
+       }
+
+       DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs succeeded\n");
+
+       return 0;
+}
+
+static int qedr_set_device_attr(struct qedr_dev *dev)
+{
+       struct qed_rdma_device *qed_attr;
+       struct qedr_device_attr *attr;
+       u32 page_size;
+
+       /* Part 1 - query core capabilities */
+       qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx);
+
+       /* Part 2 - check capabilities */
+       page_size = ~dev->attr.page_size_caps + 1;
+       if (page_size > PAGE_SIZE) {
+               DP_ERR(dev,
+                      "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n",
+                      PAGE_SIZE, page_size);
+               return -ENODEV;
+       }
+
+       /* Part 3 - copy and update capabilities */
+       attr = &dev->attr;
+       attr->vendor_id = qed_attr->vendor_id;
+       attr->vendor_part_id = qed_attr->vendor_part_id;
+       attr->hw_ver = qed_attr->hw_ver;
+       attr->fw_ver = qed_attr->fw_ver;
+       attr->node_guid = qed_attr->node_guid;
+       attr->sys_image_guid = qed_attr->sys_image_guid;
+       attr->max_cnq = qed_attr->max_cnq;
+       attr->max_sge = qed_attr->max_sge;
+       attr->max_inline = qed_attr->max_inline;
+       attr->max_sqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_SQE);
+       attr->max_rqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_RQE);
+       attr->max_qp_resp_rd_atomic_resc = qed_attr->max_qp_resp_rd_atomic_resc;
+       attr->max_qp_req_rd_atomic_resc = qed_attr->max_qp_req_rd_atomic_resc;
+       attr->max_dev_resp_rd_atomic_resc =
+           qed_attr->max_dev_resp_rd_atomic_resc;
+       attr->max_cq = qed_attr->max_cq;
+       attr->max_qp = qed_attr->max_qp;
+       attr->max_mr = qed_attr->max_mr;
+       attr->max_mr_size = qed_attr->max_mr_size;
+       attr->max_cqe = min_t(u64, qed_attr->max_cqe, QEDR_MAX_CQES);
+       attr->max_mw = qed_attr->max_mw;
+       attr->max_fmr = qed_attr->max_fmr;
+       attr->max_mr_mw_fmr_pbl = qed_attr->max_mr_mw_fmr_pbl;
+       attr->max_mr_mw_fmr_size = qed_attr->max_mr_mw_fmr_size;
+       attr->max_pd = qed_attr->max_pd;
+       attr->max_ah = qed_attr->max_ah;
+       attr->max_pkey = qed_attr->max_pkey;
+       attr->max_srq = qed_attr->max_srq;
+       attr->max_srq_wr = qed_attr->max_srq_wr;
+       attr->dev_caps = qed_attr->dev_caps;
+       attr->page_size_caps = qed_attr->page_size_caps;
+       attr->dev_ack_delay = qed_attr->dev_ack_delay;
+       attr->reserved_lkey = qed_attr->reserved_lkey;
+       attr->bad_pkey_counter = qed_attr->bad_pkey_counter;
+       attr->max_stats_queues = qed_attr->max_stats_queues;
+
+       return 0;
+}
+
+void qedr_unaffiliated_event(void *context, u8 event_code)
+{
+       pr_err("unaffiliated event not implemented yet\n");
+}
+
+void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
+{
+#define EVENT_TYPE_NOT_DEFINED 0
+#define EVENT_TYPE_CQ          1
+#define EVENT_TYPE_QP          2
+       struct qedr_dev *dev = (struct qedr_dev *)context;
+       struct regpair *async_handle = (struct regpair *)fw_handle;
+       u64 roce_handle64 = ((u64) async_handle->hi << 32) + async_handle->lo;
+       u8 event_type = EVENT_TYPE_NOT_DEFINED;
+       struct ib_event event;
+       struct ib_cq *ibcq;
+       struct ib_qp *ibqp;
+       struct qedr_cq *cq;
+       struct qedr_qp *qp;
+
+       switch (e_code) {
+       case ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR:
+               event.event = IB_EVENT_CQ_ERR;
+               event_type = EVENT_TYPE_CQ;
+               break;
+       case ROCE_ASYNC_EVENT_SQ_DRAINED:
+               event.event = IB_EVENT_SQ_DRAINED;
+               event_type = EVENT_TYPE_QP;
+               break;
+       case ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR:
+               event.event = IB_EVENT_QP_FATAL;
+               event_type = EVENT_TYPE_QP;
+               break;
+       case ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR:
+               event.event = IB_EVENT_QP_REQ_ERR;
+               event_type = EVENT_TYPE_QP;
+               break;
+       case ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR:
+               event.event = IB_EVENT_QP_ACCESS_ERR;
+               event_type = EVENT_TYPE_QP;
+               break;
+       default:
+               DP_ERR(dev, "unsupported event %d on handle=%llx\n", e_code,
+                      roce_handle64);
+       }
+
+       switch (event_type) {
+       case EVENT_TYPE_CQ:
+               cq = (struct qedr_cq *)(uintptr_t)roce_handle64;
+               if (cq) {
+                       ibcq = &cq->ibcq;
+                       if (ibcq->event_handler) {
+                               event.device = ibcq->device;
+                               event.element.cq = ibcq;
+                               ibcq->event_handler(&event, ibcq->cq_context);
+                       }
+               } else {
+                       WARN(1,
+                            "Error: CQ event with NULL pointer ibcq. Handle=%llx\n",
+                            roce_handle64);
+               }
+               DP_ERR(dev, "CQ event %d on hanlde %p\n", e_code, cq);
+               break;
+       case EVENT_TYPE_QP:
+               qp = (struct qedr_qp *)(uintptr_t)roce_handle64;
+               if (qp) {
+                       ibqp = &qp->ibqp;
+                       if (ibqp->event_handler) {
+                               event.device = ibqp->device;
+                               event.element.qp = ibqp;
+                               ibqp->event_handler(&event, ibqp->qp_context);
+                       }
+               } else {
+                       WARN(1,
+                            "Error: QP event with NULL pointer ibqp. Handle=%llx\n",
+                            roce_handle64);
+               }
+               DP_ERR(dev, "QP event %d on hanlde %p\n", e_code, qp);
+               break;
+       default:
+               break;
+       }
+}
+
+static int qedr_init_hw(struct qedr_dev *dev)
+{
+       struct qed_rdma_add_user_out_params out_params;
+       struct qed_rdma_start_in_params *in_params;
+       struct qed_rdma_cnq_params *cur_pbl;
+       struct qed_rdma_events events;
+       dma_addr_t p_phys_table;
+       u32 page_cnt;
+       int rc = 0;
+       int i;
+
+       in_params =  kzalloc(sizeof(*in_params), GFP_KERNEL);
+       if (!in_params) {
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       in_params->desired_cnq = dev->num_cnq;
+       for (i = 0; i < dev->num_cnq; i++) {
+               cur_pbl = &in_params->cnq_pbl_list[i];
+
+               page_cnt = qed_chain_get_page_cnt(&dev->cnq_array[i].pbl);
+               cur_pbl->num_pbl_pages = page_cnt;
+
+               p_phys_table = qed_chain_get_pbl_phys(&dev->cnq_array[i].pbl);
+               cur_pbl->pbl_ptr = (u64)p_phys_table;
+       }
+
+       events.affiliated_event = qedr_affiliated_event;
+       events.unaffiliated_event = qedr_unaffiliated_event;
+       events.context = dev;
+
+       in_params->events = &events;
+       in_params->cq_mode = QED_RDMA_CQ_MODE_32_BITS;
+       in_params->max_mtu = dev->ndev->mtu;
+       ether_addr_copy(&in_params->mac_addr[0], dev->ndev->dev_addr);
+
+       rc = dev->ops->rdma_init(dev->cdev, in_params);
+       if (rc)
+               goto out;
+
+       rc = dev->ops->rdma_add_user(dev->rdma_ctx, &out_params);
+       if (rc)
+               goto out;
+
+       dev->db_addr = (void *)(uintptr_t)out_params.dpi_addr;
+       dev->db_phys_addr = out_params.dpi_phys_addr;
+       dev->db_size = out_params.dpi_size;
+       dev->dpi = out_params.dpi;
+
+       rc = qedr_set_device_attr(dev);
+out:
+       kfree(in_params);
+       if (rc)
+               DP_ERR(dev, "Init HW Failed rc = %d\n", rc);
+
+       return rc;
+}
+
+void qedr_stop_hw(struct qedr_dev *dev)
+{
+       dev->ops->rdma_remove_user(dev->rdma_ctx, dev->dpi);
+       dev->ops->rdma_stop(dev->rdma_ctx);
+}
+
+static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
+                                struct net_device *ndev)
+{
+       struct qed_dev_rdma_info dev_info;
+       struct qedr_dev *dev;
+       int rc = 0, i;
+
+       dev = (struct qedr_dev *)ib_alloc_device(sizeof(*dev));
+       if (!dev) {
+               pr_err("Unable to allocate ib device\n");
+               return NULL;
+       }
+
+       DP_DEBUG(dev, QEDR_MSG_INIT, "qedr add device called\n");
+
+       dev->pdev = pdev;
+       dev->ndev = ndev;
+       dev->cdev = cdev;
+
+       qed_ops = qed_get_rdma_ops();
+       if (!qed_ops) {
+               DP_ERR(dev, "Failed to get qed roce operations\n");
+               goto init_err;
+       }
+
+       dev->ops = qed_ops;
+       rc = qed_ops->fill_dev_info(cdev, &dev_info);
+       if (rc)
+               goto init_err;
+
+       dev->num_hwfns = dev_info.common.num_hwfns;
+       dev->rdma_ctx = dev->ops->rdma_get_rdma_ctx(cdev);
+
+       dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev);
+       if (!dev->num_cnq) {
+               DP_ERR(dev, "not enough CNQ resources.\n");
+               goto init_err;
+       }
+
+       dev->wq_multiplier = QEDR_WQ_MULTIPLIER_DFT;
+
+       qedr_pci_set_atomic(dev, pdev);
+
+       rc = qedr_alloc_resources(dev);
+       if (rc)
+               goto init_err;
+
+       rc = qedr_init_hw(dev);
+       if (rc)
+               goto alloc_err;
+
+       rc = qedr_setup_irqs(dev);
+       if (rc)
+               goto irq_err;
+
+       rc = qedr_register_device(dev);
+       if (rc) {
+               DP_ERR(dev, "Unable to allocate register device\n");
+               goto reg_err;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++)
+               if (device_create_file(&dev->ibdev.dev, qedr_attributes[i]))
+                       goto sysfs_err;
+
+       if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
+               qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
+
+       DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
+       return dev;
+
+sysfs_err:
+       ib_unregister_device(&dev->ibdev);
+reg_err:
+       qedr_sync_free_irqs(dev);
+irq_err:
+       qedr_stop_hw(dev);
+alloc_err:
+       qedr_free_resources(dev);
+init_err:
+       ib_dealloc_device(&dev->ibdev);
+       DP_ERR(dev, "qedr driver load failed rc=%d\n", rc);
+
+       return NULL;
+}
+
+static void qedr_remove(struct qedr_dev *dev)
+{
+       /* First unregister with stack to stop all the active traffic
+        * of the registered clients.
+        */
+       qedr_remove_sysfiles(dev);
+       ib_unregister_device(&dev->ibdev);
+
+       qedr_stop_hw(dev);
+       qedr_sync_free_irqs(dev);
+       qedr_free_resources(dev);
+       ib_dealloc_device(&dev->ibdev);
+}
+
+static void qedr_close(struct qedr_dev *dev)
+{
+       if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
+               qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR);
+}
+
+static void qedr_shutdown(struct qedr_dev *dev)
+{
+       qedr_close(dev);
+       qedr_remove(dev);
+}
+
+static void qedr_open(struct qedr_dev *dev)
+{
+       if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
+               qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
+}
+
+static void qedr_mac_address_change(struct qedr_dev *dev)
+{
+       union ib_gid *sgid = &dev->sgid_tbl[0];
+       u8 guid[8], mac_addr[6];
+       int rc;
+
+       /* Update SGID */
+       ether_addr_copy(&mac_addr[0], dev->ndev->dev_addr);
+       guid[0] = mac_addr[0] ^ 2;
+       guid[1] = mac_addr[1];
+       guid[2] = mac_addr[2];
+       guid[3] = 0xff;
+       guid[4] = 0xfe;
+       guid[5] = mac_addr[3];
+       guid[6] = mac_addr[4];
+       guid[7] = mac_addr[5];
+       sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
+       memcpy(&sgid->raw[8], guid, sizeof(guid));
+
+       /* Update LL2 */
+       rc = dev->ops->roce_ll2_set_mac_filter(dev->cdev,
+                                              dev->gsi_ll2_mac_address,
+                                              dev->ndev->dev_addr);
+
+       ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
+
+       qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE);
+
+       if (rc)
+               DP_ERR(dev, "Error updating mac filter\n");
+}
+
+/* event handling via NIC driver ensures that all the NIC specific
+ * initialization done before RoCE driver notifies
+ * event to stack.
+ */
+static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event)
+{
+       switch (event) {
+       case QEDE_UP:
+               qedr_open(dev);
+               break;
+       case QEDE_DOWN:
+               qedr_close(dev);
+               break;
+       case QEDE_CLOSE:
+               qedr_shutdown(dev);
+               break;
+       case QEDE_CHANGE_ADDR:
+               qedr_mac_address_change(dev);
+               break;
+       default:
+               pr_err("Event not supported\n");
+       }
+}
+
+static struct qedr_driver qedr_drv = {
+       .name = "qedr_driver",
+       .add = qedr_add,
+       .remove = qedr_remove,
+       .notify = qedr_notify,
+};
+
+static int __init qedr_init_module(void)
+{
+       return qede_roce_register_driver(&qedr_drv);
+}
+
+static void __exit qedr_exit_module(void)
+{
+       qede_roce_unregister_driver(&qedr_drv);
+}
+
+module_init(qedr_init_module);
+module_exit(qedr_exit_module);
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
new file mode 100644 (file)
index 0000000..5cb9195
--- /dev/null
@@ -0,0 +1,498 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __QEDR_H__
+#define __QEDR_H__
+
+#include <linux/pci.h>
+#include <rdma/ib_addr.h>
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_roce_if.h>
+#include <linux/qed/qede_roce.h>
+#include <linux/qed/roce_common.h>
+#include "qedr_hsi_rdma.h"
+
+#define QEDR_MODULE_VERSION    "8.10.10.0"
+#define QEDR_NODE_DESC "QLogic 579xx RoCE HCA"
+#define DP_NAME(dev) ((dev)->ibdev.name)
+
+#define DP_DEBUG(dev, module, fmt, ...)                                        \
+       pr_debug("(%s) " module ": " fmt,                               \
+                DP_NAME(dev) ? DP_NAME(dev) : "", ## __VA_ARGS__)
+
+#define QEDR_MSG_INIT "INIT"
+#define QEDR_MSG_MISC "MISC"
+#define QEDR_MSG_CQ   "  CQ"
+#define QEDR_MSG_MR   "  MR"
+#define QEDR_MSG_RQ   "  RQ"
+#define QEDR_MSG_SQ   "  SQ"
+#define QEDR_MSG_QP   "  QP"
+#define QEDR_MSG_GSI  " GSI"
+
+#define QEDR_CQ_MAGIC_NUMBER   (0x11223344)
+
+struct qedr_dev;
+
+struct qedr_cnq {
+       struct qedr_dev         *dev;
+       struct qed_chain        pbl;
+       struct qed_sb_info      *sb;
+       char                    name[32];
+       u64                     n_comp;
+       __le16                  *hw_cons_ptr;
+       u8                      index;
+};
+
+#define QEDR_MAX_SGID 128
+
+struct qedr_device_attr {
+       u32     vendor_id;
+       u32     vendor_part_id;
+       u32     hw_ver;
+       u64     fw_ver;
+       u64     node_guid;
+       u64     sys_image_guid;
+       u8      max_cnq;
+       u8      max_sge;
+       u16     max_inline;
+       u32     max_sqe;
+       u32     max_rqe;
+       u8      max_qp_resp_rd_atomic_resc;
+       u8      max_qp_req_rd_atomic_resc;
+       u64     max_dev_resp_rd_atomic_resc;
+       u32     max_cq;
+       u32     max_qp;
+       u32     max_mr;
+       u64     max_mr_size;
+       u32     max_cqe;
+       u32     max_mw;
+       u32     max_fmr;
+       u32     max_mr_mw_fmr_pbl;
+       u64     max_mr_mw_fmr_size;
+       u32     max_pd;
+       u32     max_ah;
+       u8      max_pkey;
+       u32     max_srq;
+       u32     max_srq_wr;
+       u8      max_srq_sge;
+       u8      max_stats_queues;
+       u32     dev_caps;
+
+       u64     page_size_caps;
+       u8      dev_ack_delay;
+       u32     reserved_lkey;
+       u32     bad_pkey_counter;
+       struct qed_rdma_events events;
+};
+
+#define QEDR_ENET_STATE_BIT    (0)
+
+struct qedr_dev {
+       struct ib_device        ibdev;
+       struct qed_dev          *cdev;
+       struct pci_dev          *pdev;
+       struct net_device       *ndev;
+
+       enum ib_atomic_cap      atomic_cap;
+
+       void *rdma_ctx;
+       struct qedr_device_attr attr;
+
+       const struct qed_rdma_ops *ops;
+       struct qed_int_info     int_info;
+
+       struct qed_sb_info      *sb_array;
+       struct qedr_cnq         *cnq_array;
+       int                     num_cnq;
+       int                     sb_start;
+
+       void __iomem            *db_addr;
+       u64                     db_phys_addr;
+       u32                     db_size;
+       u16                     dpi;
+
+       union ib_gid *sgid_tbl;
+
+       /* Lock for sgid table */
+       spinlock_t sgid_lock;
+
+       u64                     guid;
+
+       u32                     dp_module;
+       u8                      dp_level;
+       u8                      num_hwfns;
+       uint                    wq_multiplier;
+       u8                      gsi_ll2_mac_address[ETH_ALEN];
+       int                     gsi_qp_created;
+       struct qedr_cq          *gsi_sqcq;
+       struct qedr_cq          *gsi_rqcq;
+       struct qedr_qp          *gsi_qp;
+
+       unsigned long enet_state;
+};
+
+#define QEDR_MAX_SQ_PBL                        (0x8000)
+#define QEDR_MAX_SQ_PBL_ENTRIES                (0x10000 / sizeof(void *))
+#define QEDR_SQE_ELEMENT_SIZE          (sizeof(struct rdma_sq_sge))
+#define QEDR_MAX_SQE_ELEMENTS_PER_SQE  (ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE / \
+                                        QEDR_SQE_ELEMENT_SIZE)
+#define QEDR_MAX_SQE_ELEMENTS_PER_PAGE ((RDMA_RING_PAGE_SIZE) / \
+                                        QEDR_SQE_ELEMENT_SIZE)
+#define QEDR_MAX_SQE                   ((QEDR_MAX_SQ_PBL_ENTRIES) *\
+                                        (RDMA_RING_PAGE_SIZE) / \
+                                        (QEDR_SQE_ELEMENT_SIZE) /\
+                                        (QEDR_MAX_SQE_ELEMENTS_PER_SQE))
+/* RQ */
+#define QEDR_MAX_RQ_PBL                        (0x2000)
+#define QEDR_MAX_RQ_PBL_ENTRIES                (0x10000 / sizeof(void *))
+#define QEDR_RQE_ELEMENT_SIZE          (sizeof(struct rdma_rq_sge))
+#define QEDR_MAX_RQE_ELEMENTS_PER_RQE  (RDMA_MAX_SGE_PER_RQ_WQE)
+#define QEDR_MAX_RQE_ELEMENTS_PER_PAGE ((RDMA_RING_PAGE_SIZE) / \
+                                        QEDR_RQE_ELEMENT_SIZE)
+#define QEDR_MAX_RQE                   ((QEDR_MAX_RQ_PBL_ENTRIES) *\
+                                        (RDMA_RING_PAGE_SIZE) / \
+                                        (QEDR_RQE_ELEMENT_SIZE) /\
+                                        (QEDR_MAX_RQE_ELEMENTS_PER_RQE))
+
+#define QEDR_CQE_SIZE  (sizeof(union rdma_cqe))
+#define QEDR_MAX_CQE_PBL_SIZE (512 * 1024)
+#define QEDR_MAX_CQE_PBL_ENTRIES (((QEDR_MAX_CQE_PBL_SIZE) / \
+                                 sizeof(u64)) - 1)
+#define QEDR_MAX_CQES ((u32)((QEDR_MAX_CQE_PBL_ENTRIES) * \
+                            (QED_CHAIN_PAGE_SIZE) / QEDR_CQE_SIZE))
+
+#define QEDR_ROCE_MAX_CNQ_SIZE         (0x4000)
+
+#define QEDR_MAX_PORT                  (1)
+#define QEDR_PORT                      (1)
+
+#define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
+
+#define QEDR_ROCE_PKEY_MAX 1
+#define QEDR_ROCE_PKEY_TABLE_LEN 1
+#define QEDR_ROCE_PKEY_DEFAULT 0xffff
+
+struct qedr_pbl {
+       struct list_head list_entry;
+       void *va;
+       dma_addr_t pa;
+};
+
+struct qedr_ucontext {
+       struct ib_ucontext ibucontext;
+       struct qedr_dev *dev;
+       struct qedr_pd *pd;
+       u64 dpi_addr;
+       u64 dpi_phys_addr;
+       u32 dpi_size;
+       u16 dpi;
+
+       struct list_head mm_head;
+
+       /* Lock to protect mm list */
+       struct mutex mm_list_lock;
+};
+
+union db_prod64 {
+       struct rdma_pwm_val32_data data;
+       u64 raw;
+};
+
+enum qedr_cq_type {
+       QEDR_CQ_TYPE_GSI,
+       QEDR_CQ_TYPE_KERNEL,
+       QEDR_CQ_TYPE_USER,
+};
+
+struct qedr_pbl_info {
+       u32 num_pbls;
+       u32 num_pbes;
+       u32 pbl_size;
+       u32 pbe_size;
+       bool two_layered;
+};
+
+struct qedr_userq {
+       struct ib_umem *umem;
+       struct qedr_pbl_info pbl_info;
+       struct qedr_pbl *pbl_tbl;
+       u64 buf_addr;
+       size_t buf_len;
+};
+
+struct qedr_cq {
+       struct ib_cq ibcq;
+
+       enum qedr_cq_type cq_type;
+       u32 sig;
+
+       u16 icid;
+
+       /* Lock to protect multiplem CQ's */
+       spinlock_t cq_lock;
+       u8 arm_flags;
+       struct qed_chain pbl;
+
+       void __iomem *db_addr;
+       union db_prod64 db;
+
+       u8 pbl_toggle;
+       union rdma_cqe *latest_cqe;
+       union rdma_cqe *toggle_cqe;
+
+       u32 cq_cons;
+
+       struct qedr_userq q;
+};
+
+struct qedr_pd {
+       struct ib_pd ibpd;
+       u32 pd_id;
+       struct qedr_ucontext *uctx;
+};
+
+struct qedr_mm {
+       struct {
+               u64 phy_addr;
+               unsigned long len;
+       } key;
+       struct list_head entry;
+};
+
+union db_prod32 {
+       struct rdma_pwm_val16_data data;
+       u32 raw;
+};
+
+struct qedr_qp_hwq_info {
+       /* WQE Elements */
+       struct qed_chain pbl;
+       u64 p_phys_addr_tbl;
+       u32 max_sges;
+
+       /* WQE */
+       u16 prod;
+       u16 cons;
+       u16 wqe_cons;
+       u16 gsi_cons;
+       u16 max_wr;
+
+       /* DB */
+       void __iomem *db;
+       union db_prod32 db_data;
+};
+
+#define QEDR_INC_SW_IDX(p_info, index)                                 \
+       do {                                                            \
+               p_info->index = (p_info->index + 1) &                   \
+                               qed_chain_get_capacity(p_info->pbl)     \
+       } while (0)
+
+enum qedr_qp_err_bitmap {
+       QEDR_QP_ERR_SQ_FULL = 1,
+       QEDR_QP_ERR_RQ_FULL = 2,
+       QEDR_QP_ERR_BAD_SR = 4,
+       QEDR_QP_ERR_BAD_RR = 8,
+       QEDR_QP_ERR_SQ_PBL_FULL = 16,
+       QEDR_QP_ERR_RQ_PBL_FULL = 32,
+};
+
+struct qedr_qp {
+       struct ib_qp ibqp;      /* must be first */
+       struct qedr_dev *dev;
+
+       struct qedr_qp_hwq_info sq;
+       struct qedr_qp_hwq_info rq;
+
+       u32 max_inline_data;
+
+       /* Lock for QP's */
+       spinlock_t q_lock;
+       struct qedr_cq *sq_cq;
+       struct qedr_cq *rq_cq;
+       struct qedr_srq *srq;
+       enum qed_roce_qp_state state;
+       u32 id;
+       struct qedr_pd *pd;
+       enum ib_qp_type qp_type;
+       struct qed_rdma_qp *qed_qp;
+       u32 qp_id;
+       u16 icid;
+       u16 mtu;
+       int sgid_idx;
+       u32 rq_psn;
+       u32 sq_psn;
+       u32 qkey;
+       u32 dest_qp_num;
+
+       /* Relevant to qps created from kernel space only (ULPs) */
+       u8 prev_wqe_size;
+       u16 wqe_cons;
+       u32 err_bitmap;
+       bool signaled;
+
+       /* SQ shadow */
+       struct {
+               u64 wr_id;
+               enum ib_wc_opcode opcode;
+               u32 bytes_len;
+               u8 wqe_size;
+               bool signaled;
+               dma_addr_t icrc_mapping;
+               u32 *icrc;
+               struct qedr_mr *mr;
+       } *wqe_wr_id;
+
+       /* RQ shadow */
+       struct {
+               u64 wr_id;
+               struct ib_sge sg_list[RDMA_MAX_SGE_PER_RQ_WQE];
+               u8 wqe_size;
+
+               u8 smac[ETH_ALEN];
+               u16 vlan_id;
+               int rc;
+       } *rqe_wr_id;
+
+       /* Relevant to qps created from user space only (applications) */
+       struct qedr_userq usq;
+       struct qedr_userq urq;
+};
+
+struct qedr_ah {
+       struct ib_ah ibah;
+       struct ib_ah_attr attr;
+};
+
+enum qedr_mr_type {
+       QEDR_MR_USER,
+       QEDR_MR_KERNEL,
+       QEDR_MR_DMA,
+       QEDR_MR_FRMR,
+};
+
+struct mr_info {
+       struct qedr_pbl *pbl_table;
+       struct qedr_pbl_info pbl_info;
+       struct list_head free_pbl_list;
+       struct list_head inuse_pbl_list;
+       u32 completed;
+       u32 completed_handled;
+};
+
+struct qedr_mr {
+       struct ib_mr ibmr;
+       struct ib_umem *umem;
+
+       struct qed_rdma_register_tid_in_params hw_mr;
+       enum qedr_mr_type type;
+
+       struct qedr_dev *dev;
+       struct mr_info info;
+
+       u64 *pages;
+       u32 npages;
+};
+
+#define SET_FIELD2(value, name, flag) ((value) |= ((flag) << (name ## _SHIFT)))
+
+#define QEDR_RESP_IMM  (RDMA_CQE_RESPONDER_IMM_FLG_MASK << \
+                        RDMA_CQE_RESPONDER_IMM_FLG_SHIFT)
+#define QEDR_RESP_RDMA (RDMA_CQE_RESPONDER_RDMA_FLG_MASK << \
+                        RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT)
+#define QEDR_RESP_RDMA_IMM (QEDR_RESP_IMM | QEDR_RESP_RDMA)
+
+static inline void qedr_inc_sw_cons(struct qedr_qp_hwq_info *info)
+{
+       info->cons = (info->cons + 1) % info->max_wr;
+       info->wqe_cons++;
+}
+
+static inline void qedr_inc_sw_prod(struct qedr_qp_hwq_info *info)
+{
+       info->prod = (info->prod + 1) % info->max_wr;
+}
+
+static inline int qedr_get_dmac(struct qedr_dev *dev,
+                               struct ib_ah_attr *ah_attr, u8 *mac_addr)
+{
+       union ib_gid zero_sgid = { { 0 } };
+       struct in6_addr in6;
+
+       if (!memcmp(&ah_attr->grh.dgid, &zero_sgid, sizeof(union ib_gid))) {
+               DP_ERR(dev, "Local port GID not supported\n");
+               eth_zero_addr(mac_addr);
+               return -EINVAL;
+       }
+
+       memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
+       ether_addr_copy(mac_addr, ah_attr->dmac);
+
+       return 0;
+}
+
+static inline
+struct qedr_ucontext *get_qedr_ucontext(struct ib_ucontext *ibucontext)
+{
+       return container_of(ibucontext, struct qedr_ucontext, ibucontext);
+}
+
+static inline struct qedr_dev *get_qedr_dev(struct ib_device *ibdev)
+{
+       return container_of(ibdev, struct qedr_dev, ibdev);
+}
+
+static inline struct qedr_pd *get_qedr_pd(struct ib_pd *ibpd)
+{
+       return container_of(ibpd, struct qedr_pd, ibpd);
+}
+
+static inline struct qedr_cq *get_qedr_cq(struct ib_cq *ibcq)
+{
+       return container_of(ibcq, struct qedr_cq, ibcq);
+}
+
+static inline struct qedr_qp *get_qedr_qp(struct ib_qp *ibqp)
+{
+       return container_of(ibqp, struct qedr_qp, ibqp);
+}
+
+static inline struct qedr_ah *get_qedr_ah(struct ib_ah *ibah)
+{
+       return container_of(ibah, struct qedr_ah, ibah);
+}
+
+static inline struct qedr_mr *get_qedr_mr(struct ib_mr *ibmr)
+{
+       return container_of(ibmr, struct qedr_mr, ibmr);
+}
+#endif
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c
new file mode 100644 (file)
index 0000000..a2180c0
--- /dev/null
@@ -0,0 +1,613 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/dma-mapping.h>
+#include <linux/crc32.h>
+#include <linux/iommu.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/udp.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/iw_cm.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_cache.h>
+
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_roce_if.h>
+#include "qedr.h"
+#include "verbs.h"
+#include <rdma/qedr-abi.h>
+#include "qedr_cm.h"
+
+void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info)
+{
+       info->gsi_cons = (info->gsi_cons + 1) % info->max_wr;
+}
+
+void qedr_store_gsi_qp_cq(struct qedr_dev *dev, struct qedr_qp *qp,
+                         struct ib_qp_init_attr *attrs)
+{
+       dev->gsi_qp_created = 1;
+       dev->gsi_sqcq = get_qedr_cq(attrs->send_cq);
+       dev->gsi_rqcq = get_qedr_cq(attrs->recv_cq);
+       dev->gsi_qp = qp;
+}
+
+void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt)
+{
+       struct qedr_dev *dev = (struct qedr_dev *)_qdev;
+       struct qedr_cq *cq = dev->gsi_sqcq;
+       struct qedr_qp *qp = dev->gsi_qp;
+       unsigned long flags;
+
+       DP_DEBUG(dev, QEDR_MSG_GSI,
+                "LL2 TX CB: gsi_sqcq=%p, gsi_rqcq=%p, gsi_cons=%d, ibcq_comp=%s\n",
+                dev->gsi_sqcq, dev->gsi_rqcq, qp->sq.gsi_cons,
+                cq->ibcq.comp_handler ? "Yes" : "No");
+
+       dma_free_coherent(&dev->pdev->dev, pkt->header.len, pkt->header.vaddr,
+                         pkt->header.baddr);
+       kfree(pkt);
+
+       spin_lock_irqsave(&qp->q_lock, flags);
+       qedr_inc_sw_gsi_cons(&qp->sq);
+       spin_unlock_irqrestore(&qp->q_lock, flags);
+
+       if (cq->ibcq.comp_handler)
+               (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
+}
+
+void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
+                   struct qed_roce_ll2_rx_params *params)
+{
+       struct qedr_dev *dev = (struct qedr_dev *)_dev;
+       struct qedr_cq *cq = dev->gsi_rqcq;
+       struct qedr_qp *qp = dev->gsi_qp;
+       unsigned long flags;
+
+       spin_lock_irqsave(&qp->q_lock, flags);
+
+       qp->rqe_wr_id[qp->rq.gsi_cons].rc = params->rc;
+       qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = params->vlan_id;
+       qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length = pkt->payload[0].len;
+       ether_addr_copy(qp->rqe_wr_id[qp->rq.gsi_cons].smac, params->smac);
+
+       qedr_inc_sw_gsi_cons(&qp->rq);
+
+       spin_unlock_irqrestore(&qp->q_lock, flags);
+
+       if (cq->ibcq.comp_handler)
+               (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
+}
+
+static void qedr_destroy_gsi_cq(struct qedr_dev *dev,
+                               struct ib_qp_init_attr *attrs)
+{
+       struct qed_rdma_destroy_cq_in_params iparams;
+       struct qed_rdma_destroy_cq_out_params oparams;
+       struct qedr_cq *cq;
+
+       cq = get_qedr_cq(attrs->send_cq);
+       iparams.icid = cq->icid;
+       dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
+       dev->ops->common->chain_free(dev->cdev, &cq->pbl);
+
+       cq = get_qedr_cq(attrs->recv_cq);
+       /* if a dedicated recv_cq was used, delete it too */
+       if (iparams.icid != cq->icid) {
+               iparams.icid = cq->icid;
+               dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
+               dev->ops->common->chain_free(dev->cdev, &cq->pbl);
+       }
+}
+
+static inline int qedr_check_gsi_qp_attrs(struct qedr_dev *dev,
+                                         struct ib_qp_init_attr *attrs)
+{
+       if (attrs->cap.max_recv_sge > QEDR_GSI_MAX_RECV_SGE) {
+               DP_ERR(dev,
+                      " create gsi qp: failed. max_recv_sge is larger the max %d>%d\n",
+                      attrs->cap.max_recv_sge, QEDR_GSI_MAX_RECV_SGE);
+               return -EINVAL;
+       }
+
+       if (attrs->cap.max_recv_wr > QEDR_GSI_MAX_RECV_WR) {
+               DP_ERR(dev,
+                      " create gsi qp: failed. max_recv_wr is too large %d>%d\n",
+                      attrs->cap.max_recv_wr, QEDR_GSI_MAX_RECV_WR);
+               return -EINVAL;
+       }
+
+       if (attrs->cap.max_send_wr > QEDR_GSI_MAX_SEND_WR) {
+               DP_ERR(dev,
+                      " create gsi qp: failed. max_send_wr is too large %d>%d\n",
+                      attrs->cap.max_send_wr, QEDR_GSI_MAX_SEND_WR);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
+                                struct ib_qp_init_attr *attrs,
+                                struct qedr_qp *qp)
+{
+       struct qed_roce_ll2_params ll2_params;
+       int rc;
+
+       rc = qedr_check_gsi_qp_attrs(dev, attrs);
+       if (rc)
+               return ERR_PTR(rc);
+
+       /* configure and start LL2 */
+       memset(&ll2_params, 0, sizeof(ll2_params));
+       ll2_params.max_tx_buffers = attrs->cap.max_send_wr;
+       ll2_params.max_rx_buffers = attrs->cap.max_recv_wr;
+       ll2_params.cbs.tx_cb = qedr_ll2_tx_cb;
+       ll2_params.cbs.rx_cb = qedr_ll2_rx_cb;
+       ll2_params.cb_cookie = (void *)dev;
+       ll2_params.mtu = dev->ndev->mtu;
+       ether_addr_copy(ll2_params.mac_address, dev->ndev->dev_addr);
+       rc = dev->ops->roce_ll2_start(dev->cdev, &ll2_params);
+       if (rc) {
+               DP_ERR(dev, "create gsi qp: failed on ll2 start. rc=%d\n", rc);
+               return ERR_PTR(rc);
+       }
+
+       /* create QP */
+       qp->ibqp.qp_num = 1;
+       qp->rq.max_wr = attrs->cap.max_recv_wr;
+       qp->sq.max_wr = attrs->cap.max_send_wr;
+
+       qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
+                               GFP_KERNEL);
+       if (!qp->rqe_wr_id)
+               goto err;
+       qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
+                               GFP_KERNEL);
+       if (!qp->wqe_wr_id)
+               goto err;
+
+       qedr_store_gsi_qp_cq(dev, qp, attrs);
+       ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
+
+       /* the GSI CQ is handled by the driver so remove it from the FW */
+       qedr_destroy_gsi_cq(dev, attrs);
+       dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI;
+       dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI;
+
+       DP_DEBUG(dev, QEDR_MSG_GSI, "created GSI QP %p\n", qp);
+
+       return &qp->ibqp;
+
+err:
+       kfree(qp->rqe_wr_id);
+
+       rc = dev->ops->roce_ll2_stop(dev->cdev);
+       if (rc)
+               DP_ERR(dev, "create gsi qp: failed destroy on create\n");
+
+       return ERR_PTR(-ENOMEM);
+}
+
+int qedr_destroy_gsi_qp(struct qedr_dev *dev)
+{
+       int rc;
+
+       rc = dev->ops->roce_ll2_stop(dev->cdev);
+       if (rc)
+               DP_ERR(dev, "destroy gsi qp: failed (rc=%d)\n", rc);
+       else
+               DP_DEBUG(dev, QEDR_MSG_GSI, "destroy gsi qp: success\n");
+
+       return rc;
+}
+
+#define QEDR_MAX_UD_HEADER_SIZE        (100)
+#define QEDR_GSI_QPN           (1)
+static inline int qedr_gsi_build_header(struct qedr_dev *dev,
+                                       struct qedr_qp *qp,
+                                       struct ib_send_wr *swr,
+                                       struct ib_ud_header *udh,
+                                       int *roce_mode)
+{
+       bool has_vlan = false, has_grh_ipv6 = true;
+       struct ib_ah_attr *ah_attr = &get_qedr_ah(ud_wr(swr)->ah)->attr;
+       struct ib_global_route *grh = &ah_attr->grh;
+       union ib_gid sgid;
+       int send_size = 0;
+       u16 vlan_id = 0;
+       u16 ether_type;
+       struct ib_gid_attr sgid_attr;
+       int rc;
+       int ip_ver = 0;
+
+       bool has_udp = false;
+       int i;
+
+       send_size = 0;
+       for (i = 0; i < swr->num_sge; ++i)
+               send_size += swr->sg_list[i].length;
+
+       rc = ib_get_cached_gid(qp->ibqp.device, ah_attr->port_num,
+                              grh->sgid_index, &sgid, &sgid_attr);
+       if (rc) {
+               DP_ERR(dev,
+                      "gsi post send: failed to get cached GID (port=%d, ix=%d)\n",
+                      ah_attr->port_num, grh->sgid_index);
+               return rc;
+       }
+
+       vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
+       if (vlan_id < VLAN_CFI_MASK)
+               has_vlan = true;
+       if (sgid_attr.ndev)
+               dev_put(sgid_attr.ndev);
+
+       if (!memcmp(&sgid, &zgid, sizeof(sgid))) {
+               DP_ERR(dev, "gsi post send: GID not found GID index %d\n",
+                      ah_attr->grh.sgid_index);
+               return -ENOENT;
+       }
+
+       has_udp = (sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP);
+       if (!has_udp) {
+               /* RoCE v1 */
+               ether_type = ETH_P_ROCE;
+               *roce_mode = ROCE_V1;
+       } else if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
+               /* RoCE v2 IPv4 */
+               ip_ver = 4;
+               ether_type = ETH_P_IP;
+               has_grh_ipv6 = false;
+               *roce_mode = ROCE_V2_IPV4;
+       } else {
+               /* RoCE v2 IPv6 */
+               ip_ver = 6;
+               ether_type = ETH_P_IPV6;
+               *roce_mode = ROCE_V2_IPV6;
+       }
+
+       rc = ib_ud_header_init(send_size, false, true, has_vlan,
+                              has_grh_ipv6, ip_ver, has_udp, 0, udh);
+       if (rc) {
+               DP_ERR(dev, "gsi post send: failed to init header\n");
+               return rc;
+       }
+
+       /* ENET + VLAN headers */
+       ether_addr_copy(udh->eth.dmac_h, ah_attr->dmac);
+       ether_addr_copy(udh->eth.smac_h, dev->ndev->dev_addr);
+       if (has_vlan) {
+               udh->eth.type = htons(ETH_P_8021Q);
+               udh->vlan.tag = htons(vlan_id);
+               udh->vlan.type = htons(ether_type);
+       } else {
+               udh->eth.type = htons(ether_type);
+       }
+
+       /* BTH */
+       udh->bth.solicited_event = !!(swr->send_flags & IB_SEND_SOLICITED);
+       udh->bth.pkey = QEDR_ROCE_PKEY_DEFAULT;
+       udh->bth.destination_qpn = htonl(ud_wr(swr)->remote_qpn);
+       udh->bth.psn = htonl((qp->sq_psn++) & ((1 << 24) - 1));
+       udh->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
+
+       /* DETH */
+       udh->deth.qkey = htonl(0x80010000);
+       udh->deth.source_qpn = htonl(QEDR_GSI_QPN);
+
+       if (has_grh_ipv6) {
+               /* GRH / IPv6 header */
+               udh->grh.traffic_class = grh->traffic_class;
+               udh->grh.flow_label = grh->flow_label;
+               udh->grh.hop_limit = grh->hop_limit;
+               udh->grh.destination_gid = grh->dgid;
+               memcpy(&udh->grh.source_gid.raw, &sgid.raw,
+                      sizeof(udh->grh.source_gid.raw));
+       } else {
+               /* IPv4 header */
+               u32 ipv4_addr;
+
+               udh->ip4.protocol = IPPROTO_UDP;
+               udh->ip4.tos = htonl(ah_attr->grh.flow_label);
+               udh->ip4.frag_off = htons(IP_DF);
+               udh->ip4.ttl = ah_attr->grh.hop_limit;
+
+               ipv4_addr = qedr_get_ipv4_from_gid(sgid.raw);
+               udh->ip4.saddr = ipv4_addr;
+               ipv4_addr = qedr_get_ipv4_from_gid(ah_attr->grh.dgid.raw);
+               udh->ip4.daddr = ipv4_addr;
+               /* note: checksum is calculated by the device */
+       }
+
+       /* UDP */
+       if (has_udp) {
+               udh->udp.sport = htons(QEDR_ROCE_V2_UDP_SPORT);
+               udh->udp.dport = htons(ROCE_V2_UDP_DPORT);
+               udh->udp.csum = 0;
+               /* UDP length is untouched hence is zero */
+       }
+       return 0;
+}
+
+static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
+                                       struct qedr_qp *qp,
+                                       struct ib_send_wr *swr,
+                                       struct qed_roce_ll2_packet **p_packet)
+{
+       u8 ud_header_buffer[QEDR_MAX_UD_HEADER_SIZE];
+       struct qed_roce_ll2_packet *packet;
+       struct pci_dev *pdev = dev->pdev;
+       int roce_mode, header_size;
+       struct ib_ud_header udh;
+       int i, rc;
+
+       *p_packet = NULL;
+
+       rc = qedr_gsi_build_header(dev, qp, swr, &udh, &roce_mode);
+       if (rc)
+               return rc;
+
+       header_size = ib_ud_header_pack(&udh, &ud_header_buffer);
+
+       packet = kzalloc(sizeof(*packet), GFP_ATOMIC);
+       if (!packet)
+               return -ENOMEM;
+
+       packet->header.vaddr = dma_alloc_coherent(&pdev->dev, header_size,
+                                                 &packet->header.baddr,
+                                                 GFP_ATOMIC);
+       if (!packet->header.vaddr) {
+               kfree(packet);
+               return -ENOMEM;
+       }
+
+       if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h))
+               packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB;
+       else
+               packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
+
+       packet->roce_mode = roce_mode;
+       memcpy(packet->header.vaddr, ud_header_buffer, header_size);
+       packet->header.len = header_size;
+       packet->n_seg = swr->num_sge;
+       for (i = 0; i < packet->n_seg; i++) {
+               packet->payload[i].baddr = swr->sg_list[i].addr;
+               packet->payload[i].len = swr->sg_list[i].length;
+       }
+
+       *p_packet = packet;
+
+       return 0;
+}
+
+int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+                      struct ib_send_wr **bad_wr)
+{
+       struct qed_roce_ll2_packet *pkt = NULL;
+       struct qedr_qp *qp = get_qedr_qp(ibqp);
+       struct qed_roce_ll2_tx_params params;
+       struct qedr_dev *dev = qp->dev;
+       unsigned long flags;
+       int rc;
+
+       if (qp->state != QED_ROCE_QP_STATE_RTS) {
+               *bad_wr = wr;
+               DP_ERR(dev,
+                      "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTS\n",
+                      qp->state);
+               return -EINVAL;
+       }
+
+       if (wr->num_sge > RDMA_MAX_SGE_PER_SQ_WQE) {
+               DP_ERR(dev, "gsi post send: num_sge is too large (%d>%d)\n",
+                      wr->num_sge, RDMA_MAX_SGE_PER_SQ_WQE);
+               rc = -EINVAL;
+               goto err;
+       }
+
+       if (wr->opcode != IB_WR_SEND) {
+               DP_ERR(dev,
+                      "gsi post send: failed due to unsupported opcode %d\n",
+                      wr->opcode);
+               rc = -EINVAL;
+               goto err;
+       }
+
+       memset(&params, 0, sizeof(params));
+
+       spin_lock_irqsave(&qp->q_lock, flags);
+
+       rc = qedr_gsi_build_packet(dev, qp, wr, &pkt);
+       if (rc) {
+               spin_unlock_irqrestore(&qp->q_lock, flags);
+               goto err;
+       }
+
+       rc = dev->ops->roce_ll2_tx(dev->cdev, pkt, &params);
+       if (!rc) {
+               qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
+               qedr_inc_sw_prod(&qp->sq);
+               DP_DEBUG(qp->dev, QEDR_MSG_GSI,
+                        "gsi post send: opcode=%d, in_irq=%ld, irqs_disabled=%d, wr_id=%llx\n",
+                        wr->opcode, in_irq(), irqs_disabled(), wr->wr_id);
+       } else {
+               if (rc == QED_ROCE_TX_HEAD_FAILURE) {
+                       /* TX failed while posting header - release resources */
+                       dma_free_coherent(&dev->pdev->dev, pkt->header.len,
+                                         pkt->header.vaddr, pkt->header.baddr);
+                       kfree(pkt);
+               } else if (rc == QED_ROCE_TX_FRAG_FAILURE) {
+                       /* NTD since TX failed while posting a fragment. We will
+                        * release the resources on TX callback
+                        */
+               }
+
+               DP_ERR(dev, "gsi post send: failed to transmit (rc=%d)\n", rc);
+               rc = -EAGAIN;
+               *bad_wr = wr;
+       }
+
+       spin_unlock_irqrestore(&qp->q_lock, flags);
+
+       if (wr->next) {
+               DP_ERR(dev,
+                      "gsi post send: failed second WR. Only one WR may be passed at a time\n");
+               *bad_wr = wr->next;
+               rc = -EINVAL;
+       }
+
+       return rc;
+
+err:
+       *bad_wr = wr;
+       return rc;
+}
+
+int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+                      struct ib_recv_wr **bad_wr)
+{
+       struct qedr_dev *dev = get_qedr_dev(ibqp->device);
+       struct qedr_qp *qp = get_qedr_qp(ibqp);
+       struct qed_roce_ll2_buffer buf;
+       unsigned long flags;
+       int status = 0;
+       int rc;
+
+       if ((qp->state != QED_ROCE_QP_STATE_RTR) &&
+           (qp->state != QED_ROCE_QP_STATE_RTS)) {
+               *bad_wr = wr;
+               DP_ERR(dev,
+                      "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTR/S\n",
+                      qp->state);
+               return -EINVAL;
+       }
+
+       memset(&buf, 0, sizeof(buf));
+
+       spin_lock_irqsave(&qp->q_lock, flags);
+
+       while (wr) {
+               if (wr->num_sge > QEDR_GSI_MAX_RECV_SGE) {
+                       DP_ERR(dev,
+                              "gsi post recv: failed to post rx buffer. too many sges %d>%d\n",
+                              wr->num_sge, QEDR_GSI_MAX_RECV_SGE);
+                       goto err;
+               }
+
+               buf.baddr = wr->sg_list[0].addr;
+               buf.len = wr->sg_list[0].length;
+
+               rc = dev->ops->roce_ll2_post_rx_buffer(dev->cdev, &buf, 0, 1);
+               if (rc) {
+                       DP_ERR(dev,
+                              "gsi post recv: failed to post rx buffer (rc=%d)\n",
+                              rc);
+                       goto err;
+               }
+
+               memset(&qp->rqe_wr_id[qp->rq.prod], 0,
+                      sizeof(qp->rqe_wr_id[qp->rq.prod]));
+               qp->rqe_wr_id[qp->rq.prod].sg_list[0] = wr->sg_list[0];
+               qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
+
+               qedr_inc_sw_prod(&qp->rq);
+
+               wr = wr->next;
+       }
+
+       spin_unlock_irqrestore(&qp->q_lock, flags);
+
+       return status;
+err:
+       spin_unlock_irqrestore(&qp->q_lock, flags);
+       *bad_wr = wr;
+       return -ENOMEM;
+}
+
+int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+{
+       struct qedr_dev *dev = get_qedr_dev(ibcq->device);
+       struct qedr_cq *cq = get_qedr_cq(ibcq);
+       struct qedr_qp *qp = dev->gsi_qp;
+       unsigned long flags;
+       int i = 0;
+
+       spin_lock_irqsave(&cq->cq_lock, flags);
+
+       while (i < num_entries && qp->rq.cons != qp->rq.gsi_cons) {
+               memset(&wc[i], 0, sizeof(*wc));
+
+               wc[i].qp = &qp->ibqp;
+               wc[i].wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
+               wc[i].opcode = IB_WC_RECV;
+               wc[i].pkey_index = 0;
+               wc[i].status = (qp->rqe_wr_id[qp->rq.cons].rc) ?
+                   IB_WC_GENERAL_ERR : IB_WC_SUCCESS;
+               /* 0 - currently only one recv sg is supported */
+               wc[i].byte_len = qp->rqe_wr_id[qp->rq.cons].sg_list[0].length;
+               wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK;
+               ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac);
+               wc[i].wc_flags |= IB_WC_WITH_SMAC;
+               if (qp->rqe_wr_id[qp->rq.cons].vlan_id) {
+                       wc[i].wc_flags |= IB_WC_WITH_VLAN;
+                       wc[i].vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan_id;
+               }
+
+               qedr_inc_sw_cons(&qp->rq);
+               i++;
+       }
+
+       while (i < num_entries && qp->sq.cons != qp->sq.gsi_cons) {
+               memset(&wc[i], 0, sizeof(*wc));
+
+               wc[i].qp = &qp->ibqp;
+               wc[i].wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
+               wc[i].opcode = IB_WC_SEND;
+               wc[i].status = IB_WC_SUCCESS;
+
+               qedr_inc_sw_cons(&qp->sq);
+               i++;
+       }
+
+       spin_unlock_irqrestore(&cq->cq_lock, flags);
+
+       DP_DEBUG(dev, QEDR_MSG_GSI,
+                "gsi poll_cq: requested entries=%d, actual=%d, qp->rq.cons=%d, qp->rq.gsi_cons=%x, qp->sq.cons=%d, qp->sq.gsi_cons=%d, qp_num=%d\n",
+                num_entries, i, qp->rq.cons, qp->rq.gsi_cons, qp->sq.cons,
+                qp->sq.gsi_cons, qp->ibqp.qp_num);
+
+       return i;
+}
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.h b/drivers/infiniband/hw/qedr/qedr_cm.h
new file mode 100644 (file)
index 0000000..9ba6e15
--- /dev/null
@@ -0,0 +1,61 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef LINUX_QEDR_CM_H_
+#define LINUX_QEDR_CM_H_
+
+#define QEDR_GSI_MAX_RECV_WR   (4096)
+#define QEDR_GSI_MAX_SEND_WR   (4096)
+
+#define QEDR_GSI_MAX_RECV_SGE  (1)     /* LL2 FW limitation */
+
+#define ETH_P_ROCE             (0x8915)
+#define QEDR_ROCE_V2_UDP_SPORT (0000)
+
+static inline u32 qedr_get_ipv4_from_gid(u8 *gid)
+{
+       return *(u32 *)(void *)&gid[12];
+}
+
+/* RDMA CM */
+int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+                      struct ib_recv_wr **bad_wr);
+int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+                      struct ib_send_wr **bad_wr);
+struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
+                                struct ib_qp_init_attr *attrs,
+                                struct qedr_qp *qp);
+void qedr_store_gsi_qp_cq(struct qedr_dev *dev,
+                         struct qedr_qp *qp, struct ib_qp_init_attr *attrs);
+int qedr_destroy_gsi_qp(struct qedr_dev *dev);
+void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info);
+#endif
diff --git a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
new file mode 100644 (file)
index 0000000..5c98d20
--- /dev/null
@@ -0,0 +1,748 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __QED_HSI_RDMA__
+#define __QED_HSI_RDMA__
+
+#include <linux/qed/rdma_common.h>
+
+/* rdma completion notification queue element */
+struct rdma_cnqe {
+       struct regpair  cq_handle;
+};
+
+struct rdma_cqe_responder {
+       struct regpair srq_wr_id;
+       struct regpair qp_handle;
+       __le32 imm_data_or_inv_r_Key;
+       __le32 length;
+       __le32 imm_data_hi;
+       __le16 rq_cons;
+       u8 flags;
+#define RDMA_CQE_RESPONDER_TOGGLE_BIT_MASK  0x1
+#define RDMA_CQE_RESPONDER_TOGGLE_BIT_SHIFT 0
+#define RDMA_CQE_RESPONDER_TYPE_MASK        0x3
+#define RDMA_CQE_RESPONDER_TYPE_SHIFT       1
+#define RDMA_CQE_RESPONDER_INV_FLG_MASK     0x1
+#define RDMA_CQE_RESPONDER_INV_FLG_SHIFT    3
+#define RDMA_CQE_RESPONDER_IMM_FLG_MASK     0x1
+#define RDMA_CQE_RESPONDER_IMM_FLG_SHIFT    4
+#define RDMA_CQE_RESPONDER_RDMA_FLG_MASK    0x1
+#define RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT   5
+#define RDMA_CQE_RESPONDER_RESERVED2_MASK   0x3
+#define RDMA_CQE_RESPONDER_RESERVED2_SHIFT  6
+       u8 status;
+};
+
+struct rdma_cqe_requester {
+       __le16 sq_cons;
+       __le16 reserved0;
+       __le32 reserved1;
+       struct regpair qp_handle;
+       struct regpair reserved2;
+       __le32 reserved3;
+       __le16 reserved4;
+       u8 flags;
+#define RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK  0x1
+#define RDMA_CQE_REQUESTER_TOGGLE_BIT_SHIFT 0
+#define RDMA_CQE_REQUESTER_TYPE_MASK        0x3
+#define RDMA_CQE_REQUESTER_TYPE_SHIFT       1
+#define RDMA_CQE_REQUESTER_RESERVED5_MASK   0x1F
+#define RDMA_CQE_REQUESTER_RESERVED5_SHIFT  3
+       u8 status;
+};
+
+struct rdma_cqe_common {
+       struct regpair reserved0;
+       struct regpair qp_handle;
+       __le16 reserved1[7];
+       u8 flags;
+#define RDMA_CQE_COMMON_TOGGLE_BIT_MASK  0x1
+#define RDMA_CQE_COMMON_TOGGLE_BIT_SHIFT 0
+#define RDMA_CQE_COMMON_TYPE_MASK        0x3
+#define RDMA_CQE_COMMON_TYPE_SHIFT       1
+#define RDMA_CQE_COMMON_RESERVED2_MASK   0x1F
+#define RDMA_CQE_COMMON_RESERVED2_SHIFT  3
+       u8 status;
+};
+
+/* rdma completion queue element */
+union rdma_cqe {
+       struct rdma_cqe_responder resp;
+       struct rdma_cqe_requester req;
+       struct rdma_cqe_common cmn;
+};
+
+/* * CQE requester status enumeration */
+enum rdma_cqe_requester_status_enum {
+       RDMA_CQE_REQ_STS_OK,
+       RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR,
+       RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR,
+       RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR,
+       RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR,
+       RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR,
+       RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR,
+       RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR,
+       RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR,
+       RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR,
+       RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR,
+       RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR,
+       MAX_RDMA_CQE_REQUESTER_STATUS_ENUM
+};
+
+/* CQE responder status enumeration */
+enum rdma_cqe_responder_status_enum {
+       RDMA_CQE_RESP_STS_OK,
+       RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR,
+       RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR,
+       RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR,
+       RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR,
+       RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR,
+       RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR,
+       RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR,
+       MAX_RDMA_CQE_RESPONDER_STATUS_ENUM
+};
+
+/* CQE type enumeration */
+enum rdma_cqe_type {
+       RDMA_CQE_TYPE_REQUESTER,
+       RDMA_CQE_TYPE_RESPONDER_RQ,
+       RDMA_CQE_TYPE_RESPONDER_SRQ,
+       RDMA_CQE_TYPE_INVALID,
+       MAX_RDMA_CQE_TYPE
+};
+
+struct rdma_sq_sge {
+       __le32 length;
+       struct regpair  addr;
+       __le32 l_key;
+};
+
+struct rdma_rq_sge {
+       struct regpair addr;
+       __le32 length;
+       __le32 flags;
+#define RDMA_RQ_SGE_L_KEY_MASK      0x3FFFFFF
+#define RDMA_RQ_SGE_L_KEY_SHIFT     0
+#define RDMA_RQ_SGE_NUM_SGES_MASK   0x7
+#define RDMA_RQ_SGE_NUM_SGES_SHIFT  26
+#define RDMA_RQ_SGE_RESERVED0_MASK  0x7
+#define RDMA_RQ_SGE_RESERVED0_SHIFT 29
+};
+
+struct rdma_srq_sge {
+       struct regpair addr;
+       __le32 length;
+       __le32 l_key;
+};
+
+/* Rdma doorbell data for SQ and RQ */
+struct rdma_pwm_val16_data {
+       __le16 icid;
+       __le16 value;
+};
+
+union rdma_pwm_val16_data_union {
+       struct rdma_pwm_val16_data as_struct;
+       __le32 as_dword;
+};
+
+/* Rdma doorbell data for CQ */
+struct rdma_pwm_val32_data {
+       __le16 icid;
+       u8 agg_flags;
+       u8 params;
+#define RDMA_PWM_VAL32_DATA_AGG_CMD_MASK    0x3
+#define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT   0
+#define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK  0x1
+#define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT 2
+#define RDMA_PWM_VAL32_DATA_RESERVED_MASK   0x1F
+#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT  3
+       __le32 value;
+};
+
+/* DIF Block size options */
+enum rdma_dif_block_size {
+       RDMA_DIF_BLOCK_512 = 0,
+       RDMA_DIF_BLOCK_4096 = 1,
+       MAX_RDMA_DIF_BLOCK_SIZE
+};
+
+/* DIF CRC initial value */
+enum rdma_dif_crc_seed {
+       RDMA_DIF_CRC_SEED_0000 = 0,
+       RDMA_DIF_CRC_SEED_FFFF = 1,
+       MAX_RDMA_DIF_CRC_SEED
+};
+
+/* RDMA DIF Error Result Structure */
+struct rdma_dif_error_result {
+       __le32 error_intervals;
+       __le32 dif_error_1st_interval;
+       u8 flags;
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_CRC_MASK      0x1
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_CRC_SHIFT     0
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_APP_TAG_MASK  0x1
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_APP_TAG_SHIFT 1
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_REF_TAG_MASK  0x1
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_REF_TAG_SHIFT 2
+#define RDMA_DIF_ERROR_RESULT_RESERVED0_MASK               0xF
+#define RDMA_DIF_ERROR_RESULT_RESERVED0_SHIFT              3
+#define RDMA_DIF_ERROR_RESULT_TOGGLE_BIT_MASK              0x1
+#define RDMA_DIF_ERROR_RESULT_TOGGLE_BIT_SHIFT             7
+       u8 reserved1[55];
+};
+
+/* DIF IO direction */
+enum rdma_dif_io_direction_flg {
+       RDMA_DIF_DIR_RX = 0,
+       RDMA_DIF_DIR_TX = 1,
+       MAX_RDMA_DIF_IO_DIRECTION_FLG
+};
+
+/* RDMA DIF Runt Result Structure */
+struct rdma_dif_runt_result {
+       __le16 guard_tag;
+       __le16 reserved[3];
+};
+
+/* Memory window type enumeration */
+enum rdma_mw_type {
+       RDMA_MW_TYPE_1,
+       RDMA_MW_TYPE_2A,
+       MAX_RDMA_MW_TYPE
+};
+
+struct rdma_sq_atomic_wqe {
+       __le32 reserved1;
+       __le32 length;
+       __le32 xrc_srq;
+       u8 req_type;
+       u8 flags;
+#define RDMA_SQ_ATOMIC_WQE_COMP_FLG_MASK         0x1
+#define RDMA_SQ_ATOMIC_WQE_COMP_FLG_SHIFT        0
+#define RDMA_SQ_ATOMIC_WQE_RD_FENCE_FLG_MASK     0x1
+#define RDMA_SQ_ATOMIC_WQE_RD_FENCE_FLG_SHIFT    1
+#define RDMA_SQ_ATOMIC_WQE_INV_FENCE_FLG_MASK    0x1
+#define RDMA_SQ_ATOMIC_WQE_INV_FENCE_FLG_SHIFT   2
+#define RDMA_SQ_ATOMIC_WQE_SE_FLG_MASK           0x1
+#define RDMA_SQ_ATOMIC_WQE_SE_FLG_SHIFT          3
+#define RDMA_SQ_ATOMIC_WQE_INLINE_FLG_MASK       0x1
+#define RDMA_SQ_ATOMIC_WQE_INLINE_FLG_SHIFT      4
+#define RDMA_SQ_ATOMIC_WQE_DIF_ON_HOST_FLG_MASK  0x1
+#define RDMA_SQ_ATOMIC_WQE_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_ATOMIC_WQE_RESERVED0_MASK        0x3
+#define RDMA_SQ_ATOMIC_WQE_RESERVED0_SHIFT       6
+       u8 wqe_size;
+       u8 prev_wqe_size;
+       struct regpair remote_va;
+       __le32 r_key;
+       __le32 reserved2;
+       struct regpair cmp_data;
+       struct regpair swap_data;
+};
+
+/* First element (16 bytes) of atomic wqe */
+struct rdma_sq_atomic_wqe_1st {
+       __le32 reserved1;
+       __le32 length;
+       __le32 xrc_srq;
+       u8 req_type;
+       u8 flags;
+#define RDMA_SQ_ATOMIC_WQE_1ST_COMP_FLG_MASK       0x1
+#define RDMA_SQ_ATOMIC_WQE_1ST_COMP_FLG_SHIFT      0
+#define RDMA_SQ_ATOMIC_WQE_1ST_RD_FENCE_FLG_MASK   0x1
+#define RDMA_SQ_ATOMIC_WQE_1ST_RD_FENCE_FLG_SHIFT  1
+#define RDMA_SQ_ATOMIC_WQE_1ST_INV_FENCE_FLG_MASK  0x1
+#define RDMA_SQ_ATOMIC_WQE_1ST_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_ATOMIC_WQE_1ST_SE_FLG_MASK         0x1
+#define RDMA_SQ_ATOMIC_WQE_1ST_SE_FLG_SHIFT        3
+#define RDMA_SQ_ATOMIC_WQE_1ST_INLINE_FLG_MASK     0x1
+#define RDMA_SQ_ATOMIC_WQE_1ST_INLINE_FLG_SHIFT    4
+#define RDMA_SQ_ATOMIC_WQE_1ST_RESERVED0_MASK      0x7
+#define RDMA_SQ_ATOMIC_WQE_1ST_RESERVED0_SHIFT     5
+       u8 wqe_size;
+       u8 prev_wqe_size;
+};
+
+/* Second element (16 bytes) of atomic wqe */
+struct rdma_sq_atomic_wqe_2nd {
+       struct regpair remote_va;
+       __le32 r_key;
+       __le32 reserved2;
+};
+
+/* Third element (16 bytes) of atomic wqe */
+struct rdma_sq_atomic_wqe_3rd {
+       struct regpair cmp_data;
+       struct regpair swap_data;
+};
+
+struct rdma_sq_bind_wqe {
+       struct regpair addr;
+       __le32 l_key;
+       u8 req_type;
+       u8 flags;
+#define RDMA_SQ_BIND_WQE_COMP_FLG_MASK       0x1
+#define RDMA_SQ_BIND_WQE_COMP_FLG_SHIFT      0
+#define RDMA_SQ_BIND_WQE_RD_FENCE_FLG_MASK   0x1
+#define RDMA_SQ_BIND_WQE_RD_FENCE_FLG_SHIFT  1
+#define RDMA_SQ_BIND_WQE_INV_FENCE_FLG_MASK  0x1
+#define RDMA_SQ_BIND_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_BIND_WQE_SE_FLG_MASK         0x1
+#define RDMA_SQ_BIND_WQE_SE_FLG_SHIFT        3
+#define RDMA_SQ_BIND_WQE_INLINE_FLG_MASK     0x1
+#define RDMA_SQ_BIND_WQE_INLINE_FLG_SHIFT    4
+#define RDMA_SQ_BIND_WQE_RESERVED0_MASK      0x7
+#define RDMA_SQ_BIND_WQE_RESERVED0_SHIFT     5
+       u8 wqe_size;
+       u8 prev_wqe_size;
+       u8 bind_ctrl;
+#define RDMA_SQ_BIND_WQE_ZERO_BASED_MASK     0x1
+#define RDMA_SQ_BIND_WQE_ZERO_BASED_SHIFT    0
+#define RDMA_SQ_BIND_WQE_MW_TYPE_MASK        0x1
+#define RDMA_SQ_BIND_WQE_MW_TYPE_SHIFT       1
+#define RDMA_SQ_BIND_WQE_RESERVED1_MASK      0x3F
+#define RDMA_SQ_BIND_WQE_RESERVED1_SHIFT     2
+       u8 access_ctrl;
+#define RDMA_SQ_BIND_WQE_REMOTE_READ_MASK    0x1
+#define RDMA_SQ_BIND_WQE_REMOTE_READ_SHIFT   0
+#define RDMA_SQ_BIND_WQE_REMOTE_WRITE_MASK   0x1
+#define RDMA_SQ_BIND_WQE_REMOTE_WRITE_SHIFT  1
+#define RDMA_SQ_BIND_WQE_ENABLE_ATOMIC_MASK  0x1
+#define RDMA_SQ_BIND_WQE_ENABLE_ATOMIC_SHIFT 2
+#define RDMA_SQ_BIND_WQE_LOCAL_READ_MASK     0x1
+#define RDMA_SQ_BIND_WQE_LOCAL_READ_SHIFT    3
+#define RDMA_SQ_BIND_WQE_LOCAL_WRITE_MASK    0x1
+#define RDMA_SQ_BIND_WQE_LOCAL_WRITE_SHIFT   4
+#define RDMA_SQ_BIND_WQE_RESERVED2_MASK      0x7
+#define RDMA_SQ_BIND_WQE_RESERVED2_SHIFT     5
+       u8 reserved3;
+       u8 length_hi;
+       __le32 length_lo;
+       __le32 parent_l_key;
+       __le32 reserved4;
+};
+
+/* First element (16 bytes) of bind wqe */
+struct rdma_sq_bind_wqe_1st {
+       struct regpair addr;
+       __le32 l_key;
+       u8 req_type;
+       u8 flags;
+#define RDMA_SQ_BIND_WQE_1ST_COMP_FLG_MASK       0x1
+#define RDMA_SQ_BIND_WQE_1ST_COMP_FLG_SHIFT      0
+#define RDMA_SQ_BIND_WQE_1ST_RD_FENCE_FLG_MASK   0x1
+#define RDMA_SQ_BIND_WQE_1ST_RD_FENCE_FLG_SHIFT  1
+#define RDMA_SQ_BIND_WQE_1ST_INV_FENCE_FLG_MASK  0x1
+#define RDMA_SQ_BIND_WQE_1ST_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_BIND_WQE_1ST_SE_FLG_MASK         0x1
+#define RDMA_SQ_BIND_WQE_1ST_SE_FLG_SHIFT        3
+#define RDMA_SQ_BIND_WQE_1ST_INLINE_FLG_MASK     0x1
+#define RDMA_SQ_BIND_WQE_1ST_INLINE_FLG_SHIFT    4
+#define RDMA_SQ_BIND_WQE_1ST_RESERVED0_MASK      0x7
+#define RDMA_SQ_BIND_WQE_1ST_RESERVED0_SHIFT     5
+       u8 wqe_size;
+       u8 prev_wqe_size;
+};
+
+/* Second element (16 bytes) of bind wqe */
+struct rdma_sq_bind_wqe_2nd {
+       u8 bind_ctrl;
+#define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_MASK     0x1
+#define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_SHIFT    0
+#define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_MASK        0x1
+#define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_SHIFT       1
+#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_MASK      0x3F
+#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_SHIFT     2
+       u8 access_ctrl;
+#define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_MASK    0x1
+#define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_SHIFT   0
+#define RDMA_SQ_BIND_WQE_2ND_REMOTE_WRITE_MASK   0x1
+#define RDMA_SQ_BIND_WQE_2ND_REMOTE_WRITE_SHIFT  1
+#define RDMA_SQ_BIND_WQE_2ND_ENABLE_ATOMIC_MASK  0x1
+#define RDMA_SQ_BIND_WQE_2ND_ENABLE_ATOMIC_SHIFT 2
+#define RDMA_SQ_BIND_WQE_2ND_LOCAL_READ_MASK     0x1
+#define RDMA_SQ_BIND_WQE_2ND_LOCAL_READ_SHIFT    3
+#define RDMA_SQ_BIND_WQE_2ND_LOCAL_WRITE_MASK    0x1
+#define RDMA_SQ_BIND_WQE_2ND_LOCAL_WRITE_SHIFT   4
+#define RDMA_SQ_BIND_WQE_2ND_RESERVED2_MASK      0x7
+#define RDMA_SQ_BIND_WQE_2ND_RESERVED2_SHIFT     5
+       u8 reserved3;
+       u8 length_hi;
+       __le32 length_lo;
+       __le32 parent_l_key;
+       __le32 reserved4;
+};
+
+/* Structure with only the SQ WQE common
+ * fields. Size is of one SQ element (16B)
+ */
+struct rdma_sq_common_wqe {
+       __le32 reserved1[3];
+       u8 req_type;
+       u8 flags;
+#define RDMA_SQ_COMMON_WQE_COMP_FLG_MASK       0x1
+#define RDMA_SQ_COMMON_WQE_COMP_FLG_SHIFT      0
+#define RDMA_SQ_COMMON_WQE_RD_FENCE_FLG_MASK   0x1
+#define RDMA_SQ_COMMON_WQE_RD_FENCE_FLG_SHIFT  1
+#define RDMA_SQ_COMMON_WQE_INV_FENCE_FLG_MASK  0x1
+#define RDMA_SQ_COMMON_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_COMMON_WQE_SE_FLG_MASK         0x1
+#define RDMA_SQ_COMMON_WQE_SE_FLG_SHIFT        3
+#define RDMA_SQ_COMMON_WQE_INLINE_FLG_MASK     0x1
+#define RDMA_SQ_COMMON_WQE_INLINE_FLG_SHIFT    4
+#define RDMA_SQ_COMMON_WQE_RESERVED0_MASK      0x7
+#define RDMA_SQ_COMMON_WQE_RESERVED0_SHIFT     5
+       u8 wqe_size;
+       u8 prev_wqe_size;
+};
+
+struct rdma_sq_fmr_wqe {
+       struct regpair addr;
+       __le32 l_key;
+       u8 req_type;
+       u8 flags;
+#define RDMA_SQ_FMR_WQE_COMP_FLG_MASK                0x1
+#define RDMA_SQ_FMR_WQE_COMP_FLG_SHIFT               0
+#define RDMA_SQ_FMR_WQE_RD_FENCE_FLG_MASK            0x1
+#define RDMA_SQ_FMR_WQE_RD_FENCE_FLG_SHIFT           1
+#define RDMA_SQ_FMR_WQE_INV_FENCE_FLG_MASK           0x1
+#define RDMA_SQ_FMR_WQE_INV_FENCE_FLG_SHIFT          2
+#define RDMA_SQ_FMR_WQE_SE_FLG_MASK                  0x1
+#define RDMA_SQ_FMR_WQE_SE_FLG_SHIFT                 3
+#define RDMA_SQ_FMR_WQE_INLINE_FLG_MASK              0x1
+#define RDMA_SQ_FMR_WQE_INLINE_FLG_SHIFT             4
+#define RDMA_SQ_FMR_WQE_DIF_ON_HOST_FLG_MASK         0x1
+#define RDMA_SQ_FMR_WQE_DIF_ON_HOST_FLG_SHIFT        5
+#define RDMA_SQ_FMR_WQE_RESERVED0_MASK               0x3
+#define RDMA_SQ_FMR_WQE_RESERVED0_SHIFT              6
+       u8 wqe_size;
+       u8 prev_wqe_size;
+       u8 fmr_ctrl;
+#define RDMA_SQ_FMR_WQE_PAGE_SIZE_LOG_MASK           0x1F
+#define RDMA_SQ_FMR_WQE_PAGE_SIZE_LOG_SHIFT          0
+#define RDMA_SQ_FMR_WQE_ZERO_BASED_MASK              0x1
+#define RDMA_SQ_FMR_WQE_ZERO_BASED_SHIFT             5
+#define RDMA_SQ_FMR_WQE_BIND_EN_MASK                 0x1
+#define RDMA_SQ_FMR_WQE_BIND_EN_SHIFT                6
+#define RDMA_SQ_FMR_WQE_RESERVED1_MASK               0x1
+#define RDMA_SQ_FMR_WQE_RESERVED1_SHIFT              7
+       u8 access_ctrl;
+#define RDMA_SQ_FMR_WQE_REMOTE_READ_MASK             0x1
+#define RDMA_SQ_FMR_WQE_REMOTE_READ_SHIFT            0
+#define RDMA_SQ_FMR_WQE_REMOTE_WRITE_MASK            0x1
+#define RDMA_SQ_FMR_WQE_REMOTE_WRITE_SHIFT           1
+#define RDMA_SQ_FMR_WQE_ENABLE_ATOMIC_MASK           0x1
+#define RDMA_SQ_FMR_WQE_ENABLE_ATOMIC_SHIFT          2
+#define RDMA_SQ_FMR_WQE_LOCAL_READ_MASK              0x1
+#define RDMA_SQ_FMR_WQE_LOCAL_READ_SHIFT             3
+#define RDMA_SQ_FMR_WQE_LOCAL_WRITE_MASK             0x1
+#define RDMA_SQ_FMR_WQE_LOCAL_WRITE_SHIFT            4
+#define RDMA_SQ_FMR_WQE_RESERVED2_MASK               0x7
+#define RDMA_SQ_FMR_WQE_RESERVED2_SHIFT              5
+       u8 reserved3;
+       u8 length_hi;
+       __le32 length_lo;
+       struct regpair pbl_addr;
+       __le32 dif_base_ref_tag;
+       __le16 dif_app_tag;
+       __le16 dif_app_tag_mask;
+       __le16 dif_runt_crc_value;
+       __le16 dif_flags;
+#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK    0x1
+#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT   0
+#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK          0x1
+#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT         1
+#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK      0x1
+#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT     2
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK  0x1
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT 3
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK    0x1
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT   4
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK    0x1
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT   5
+#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK            0x1
+#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT           6
+#define RDMA_SQ_FMR_WQE_RESERVED4_MASK               0x1FF
+#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT              7
+       __le32 Reserved5;
+};
+
+/* First element (16 bytes) of fmr wqe */
+struct rdma_sq_fmr_wqe_1st {
+       struct regpair addr;
+       __le32 l_key;
+       u8 req_type;
+       u8 flags;
+#define RDMA_SQ_FMR_WQE_1ST_COMP_FLG_MASK         0x1
+#define RDMA_SQ_FMR_WQE_1ST_COMP_FLG_SHIFT        0
+#define RDMA_SQ_FMR_WQE_1ST_RD_FENCE_FLG_MASK     0x1
+#define RDMA_SQ_FMR_WQE_1ST_RD_FENCE_FLG_SHIFT    1
+#define RDMA_SQ_FMR_WQE_1ST_INV_FENCE_FLG_MASK    0x1
+#define RDMA_SQ_FMR_WQE_1ST_INV_FENCE_FLG_SHIFT   2
+#define RDMA_SQ_FMR_WQE_1ST_SE_FLG_MASK           0x1
+#define RDMA_SQ_FMR_WQE_1ST_SE_FLG_SHIFT          3
+#define RDMA_SQ_FMR_WQE_1ST_INLINE_FLG_MASK       0x1
+#define RDMA_SQ_FMR_WQE_1ST_INLINE_FLG_SHIFT      4
+#define RDMA_SQ_FMR_WQE_1ST_DIF_ON_HOST_FLG_MASK  0x1
+#define RDMA_SQ_FMR_WQE_1ST_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_FMR_WQE_1ST_RESERVED0_MASK        0x3
+#define RDMA_SQ_FMR_WQE_1ST_RESERVED0_SHIFT       6
+       u8 wqe_size;
+       u8 prev_wqe_size;
+};
+
+/* Second element (16 bytes) of fmr wqe */
+struct rdma_sq_fmr_wqe_2nd {
+       u8 fmr_ctrl;
+#define RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG_MASK  0x1F
+#define RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG_SHIFT 0
+#define RDMA_SQ_FMR_WQE_2ND_ZERO_BASED_MASK     0x1
+#define RDMA_SQ_FMR_WQE_2ND_ZERO_BASED_SHIFT    5
+#define RDMA_SQ_FMR_WQE_2ND_BIND_EN_MASK        0x1
+#define RDMA_SQ_FMR_WQE_2ND_BIND_EN_SHIFT       6
+#define RDMA_SQ_FMR_WQE_2ND_RESERVED1_MASK      0x1
+#define RDMA_SQ_FMR_WQE_2ND_RESERVED1_SHIFT     7
+       u8 access_ctrl;
+#define RDMA_SQ_FMR_WQE_2ND_REMOTE_READ_MASK    0x1
+#define RDMA_SQ_FMR_WQE_2ND_REMOTE_READ_SHIFT   0
+#define RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE_MASK   0x1
+#define RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE_SHIFT  1
+#define RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC_MASK  0x1
+#define RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC_SHIFT 2
+#define RDMA_SQ_FMR_WQE_2ND_LOCAL_READ_MASK     0x1
+#define RDMA_SQ_FMR_WQE_2ND_LOCAL_READ_SHIFT    3
+#define RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE_MASK    0x1
+#define RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE_SHIFT   4
+#define RDMA_SQ_FMR_WQE_2ND_RESERVED2_MASK      0x7
+#define RDMA_SQ_FMR_WQE_2ND_RESERVED2_SHIFT     5
+       u8 reserved3;
+       u8 length_hi;
+       __le32 length_lo;
+       struct regpair pbl_addr;
+};
+
+/* Third element (16 bytes) of fmr wqe */
+struct rdma_sq_fmr_wqe_3rd {
+       __le32 dif_base_ref_tag;
+       __le16 dif_app_tag;
+       __le16 dif_app_tag_mask;
+       __le16 dif_runt_crc_value;
+       __le16 dif_flags;
+#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK    0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT   0
+#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK          0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT         1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK      0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT     2
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK  0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT 3
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK    0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT   4
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK    0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT   5
+#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK            0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT           6
+#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK               0x1FF
+#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_SHIFT              7
+       __le32 Reserved5;
+};
+
+struct rdma_sq_local_inv_wqe {
+       struct regpair reserved;
+       __le32 inv_l_key;
+       u8 req_type;
+       u8 flags;
+#define RDMA_SQ_LOCAL_INV_WQE_COMP_FLG_MASK         0x1
+#define RDMA_SQ_LOCAL_INV_WQE_COMP_FLG_SHIFT        0
+#define RDMA_SQ_LOCAL_INV_WQE_RD_FENCE_FLG_MASK     0x1
+#define RDMA_SQ_LOCAL_INV_WQE_RD_FENCE_FLG_SHIFT    1
+#define RDMA_SQ_LOCAL_INV_WQE_INV_FENCE_FLG_MASK    0x1
+#define RDMA_SQ_LOCAL_INV_WQE_INV_FENCE_FLG_SHIFT   2
+#define RDMA_SQ_LOCAL_INV_WQE_SE_FLG_MASK           0x1
+#define RDMA_SQ_LOCAL_INV_WQE_SE_FLG_SHIFT          3
+#define RDMA_SQ_LOCAL_INV_WQE_INLINE_FLG_MASK       0x1
+#define RDMA_SQ_LOCAL_INV_WQE_INLINE_FLG_SHIFT      4
+#define RDMA_SQ_LOCAL_INV_WQE_DIF_ON_HOST_FLG_MASK  0x1
+#define RDMA_SQ_LOCAL_INV_WQE_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_LOCAL_INV_WQE_RESERVED0_MASK        0x3
+#define RDMA_SQ_LOCAL_INV_WQE_RESERVED0_SHIFT       6
+       u8 wqe_size;
+       u8 prev_wqe_size;
+};
+
+struct rdma_sq_rdma_wqe {
+       __le32 imm_data;
+       __le32 length;
+       __le32 xrc_srq;
+       u8 req_type;
+       u8 flags;
+#define RDMA_SQ_RDMA_WQE_COMP_FLG_MASK                  0x1
+#define RDMA_SQ_RDMA_WQE_COMP_FLG_SHIFT                 0
+#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_MASK              0x1
+#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_SHIFT             1
+#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_MASK             0x1
+#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_SHIFT            2
+#define RDMA_SQ_RDMA_WQE_SE_FLG_MASK                    0x1
+#define RDMA_SQ_RDMA_WQE_SE_FLG_SHIFT                   3
+#define RDMA_SQ_RDMA_WQE_INLINE_FLG_MASK                0x1
+#define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT               4
+#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK           0x1
+#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT          5
+#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK                 0x3
+#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT                6
+       u8 wqe_size;
+       u8 prev_wqe_size;
+       struct regpair remote_va;
+       __le32 r_key;
+       u8 dif_flags;
+#define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_MASK            0x1
+#define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_SHIFT           0
+#define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_MASK  0x1
+#define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_SHIFT 1
+#define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_MASK   0x1
+#define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_SHIFT  2
+#define RDMA_SQ_RDMA_WQE_RESERVED1_MASK                 0x1F
+#define RDMA_SQ_RDMA_WQE_RESERVED1_SHIFT                3
+       u8 reserved2[3];
+};
+
+/* First element (16 bytes) of rdma wqe */
+struct rdma_sq_rdma_wqe_1st {
+       __le32 imm_data;
+       __le32 length;
+       __le32 xrc_srq;
+       u8 req_type;
+       u8 flags;
+#define RDMA_SQ_RDMA_WQE_1ST_COMP_FLG_MASK         0x1
+#define RDMA_SQ_RDMA_WQE_1ST_COMP_FLG_SHIFT        0
+#define RDMA_SQ_RDMA_WQE_1ST_RD_FENCE_FLG_MASK     0x1
+#define RDMA_SQ_RDMA_WQE_1ST_RD_FENCE_FLG_SHIFT    1
+#define RDMA_SQ_RDMA_WQE_1ST_INV_FENCE_FLG_MASK    0x1
+#define RDMA_SQ_RDMA_WQE_1ST_INV_FENCE_FLG_SHIFT   2
+#define RDMA_SQ_RDMA_WQE_1ST_SE_FLG_MASK           0x1
+#define RDMA_SQ_RDMA_WQE_1ST_SE_FLG_SHIFT          3
+#define RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG_MASK       0x1
+#define RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG_SHIFT      4
+#define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_MASK  0x1
+#define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_MASK        0x3
+#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_SHIFT       6
+       u8 wqe_size;
+       u8 prev_wqe_size;
+};
+
+/* Second element (16 bytes) of rdma wqe */
+struct rdma_sq_rdma_wqe_2nd {
+       struct regpair remote_va;
+       __le32 r_key;
+       u8 dif_flags;
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_BLOCK_SIZE_MASK         0x1
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_BLOCK_SIZE_SHIFT        0
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_FIRST_SEGMENT_FLG_MASK  0x1
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_FIRST_SEGMENT_FLG_SHIFT 1
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_LAST_SEGMENT_FLG_MASK   0x1
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_LAST_SEGMENT_FLG_SHIFT  2
+#define RDMA_SQ_RDMA_WQE_2ND_RESERVED1_MASK              0x1F
+#define RDMA_SQ_RDMA_WQE_2ND_RESERVED1_SHIFT             3
+       u8 reserved2[3];
+};
+
+/* SQ WQE req type enumeration */
+enum rdma_sq_req_type {
+       RDMA_SQ_REQ_TYPE_SEND,
+       RDMA_SQ_REQ_TYPE_SEND_WITH_IMM,
+       RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE,
+       RDMA_SQ_REQ_TYPE_RDMA_WR,
+       RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM,
+       RDMA_SQ_REQ_TYPE_RDMA_RD,
+       RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP,
+       RDMA_SQ_REQ_TYPE_ATOMIC_ADD,
+       RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE,
+       RDMA_SQ_REQ_TYPE_FAST_MR,
+       RDMA_SQ_REQ_TYPE_BIND,
+       RDMA_SQ_REQ_TYPE_INVALID,
+       MAX_RDMA_SQ_REQ_TYPE
+};
+
+struct rdma_sq_send_wqe {
+       __le32 inv_key_or_imm_data;
+       __le32 length;
+       __le32 xrc_srq;
+       u8 req_type;
+       u8 flags;
+#define RDMA_SQ_SEND_WQE_COMP_FLG_MASK         0x1
+#define RDMA_SQ_SEND_WQE_COMP_FLG_SHIFT        0
+#define RDMA_SQ_SEND_WQE_RD_FENCE_FLG_MASK     0x1
+#define RDMA_SQ_SEND_WQE_RD_FENCE_FLG_SHIFT    1
+#define RDMA_SQ_SEND_WQE_INV_FENCE_FLG_MASK    0x1
+#define RDMA_SQ_SEND_WQE_INV_FENCE_FLG_SHIFT   2
+#define RDMA_SQ_SEND_WQE_SE_FLG_MASK           0x1
+#define RDMA_SQ_SEND_WQE_SE_FLG_SHIFT          3
+#define RDMA_SQ_SEND_WQE_INLINE_FLG_MASK       0x1
+#define RDMA_SQ_SEND_WQE_INLINE_FLG_SHIFT      4
+#define RDMA_SQ_SEND_WQE_DIF_ON_HOST_FLG_MASK  0x1
+#define RDMA_SQ_SEND_WQE_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_SEND_WQE_RESERVED0_MASK        0x3
+#define RDMA_SQ_SEND_WQE_RESERVED0_SHIFT       6
+       u8 wqe_size;
+       u8 prev_wqe_size;
+       __le32 reserved1[4];
+};
+
+struct rdma_sq_send_wqe_1st {
+       __le32 inv_key_or_imm_data;
+       __le32 length;
+       __le32 xrc_srq;
+       u8 req_type;
+       u8 flags;
+#define RDMA_SQ_SEND_WQE_1ST_COMP_FLG_MASK       0x1
+#define RDMA_SQ_SEND_WQE_1ST_COMP_FLG_SHIFT      0
+#define RDMA_SQ_SEND_WQE_1ST_RD_FENCE_FLG_MASK   0x1
+#define RDMA_SQ_SEND_WQE_1ST_RD_FENCE_FLG_SHIFT  1
+#define RDMA_SQ_SEND_WQE_1ST_INV_FENCE_FLG_MASK  0x1
+#define RDMA_SQ_SEND_WQE_1ST_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_SEND_WQE_1ST_SE_FLG_MASK         0x1
+#define RDMA_SQ_SEND_WQE_1ST_SE_FLG_SHIFT        3
+#define RDMA_SQ_SEND_WQE_1ST_INLINE_FLG_MASK     0x1
+#define RDMA_SQ_SEND_WQE_1ST_INLINE_FLG_SHIFT    4
+#define RDMA_SQ_SEND_WQE_1ST_RESERVED0_MASK      0x7
+#define RDMA_SQ_SEND_WQE_1ST_RESERVED0_SHIFT     5
+       u8 wqe_size;
+       u8 prev_wqe_size;
+};
+
+struct rdma_sq_send_wqe_2st {
+       __le32 reserved1[4];
+};
+
+#endif /* __QED_HSI_RDMA__ */
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
new file mode 100644 (file)
index 0000000..9d89d0b
--- /dev/null
@@ -0,0 +1,3508 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/dma-mapping.h>
+#include <linux/crc32.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/udp.h>
+#include <linux/iommu.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/iw_cm.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_cache.h>
+
+#include <linux/qed/common_hsi.h>
+#include "qedr_hsi_rdma.h"
+#include <linux/qed/qed_if.h>
+#include "qedr.h"
+#include "verbs.h"
+#include <rdma/qedr-abi.h>
+#include "qedr_cm.h"
+
+#define DB_ADDR_SHIFT(addr)            ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
+
+int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
+{
+       if (index > QEDR_ROCE_PKEY_TABLE_LEN)
+               return -EINVAL;
+
+       *pkey = QEDR_ROCE_PKEY_DEFAULT;
+       return 0;
+}
+
+int qedr_query_gid(struct ib_device *ibdev, u8 port, int index,
+                  union ib_gid *sgid)
+{
+       struct qedr_dev *dev = get_qedr_dev(ibdev);
+       int rc = 0;
+
+       if (!rdma_cap_roce_gid_table(ibdev, port))
+               return -ENODEV;
+
+       rc = ib_get_cached_gid(ibdev, port, index, sgid, NULL);
+       if (rc == -EAGAIN) {
+               memcpy(sgid, &zgid, sizeof(*sgid));
+               return 0;
+       }
+
+       DP_DEBUG(dev, QEDR_MSG_INIT, "query gid: index=%d %llx:%llx\n", index,
+                sgid->global.interface_id, sgid->global.subnet_prefix);
+
+       return rc;
+}
+
+int qedr_add_gid(struct ib_device *device, u8 port_num,
+                unsigned int index, const union ib_gid *gid,
+                const struct ib_gid_attr *attr, void **context)
+{
+       if (!rdma_cap_roce_gid_table(device, port_num))
+               return -EINVAL;
+
+       if (port_num > QEDR_MAX_PORT)
+               return -EINVAL;
+
+       if (!context)
+               return -EINVAL;
+
+       return 0;
+}
+
+int qedr_del_gid(struct ib_device *device, u8 port_num,
+                unsigned int index, void **context)
+{
+       if (!rdma_cap_roce_gid_table(device, port_num))
+               return -EINVAL;
+
+       if (port_num > QEDR_MAX_PORT)
+               return -EINVAL;
+
+       if (!context)
+               return -EINVAL;
+
+       return 0;
+}
+
+int qedr_query_device(struct ib_device *ibdev,
+                     struct ib_device_attr *attr, struct ib_udata *udata)
+{
+       struct qedr_dev *dev = get_qedr_dev(ibdev);
+       struct qedr_device_attr *qattr = &dev->attr;
+
+       if (!dev->rdma_ctx) {
+               DP_ERR(dev,
+                      "qedr_query_device called with invalid params rdma_ctx=%p\n",
+                      dev->rdma_ctx);
+               return -EINVAL;
+       }
+
+       memset(attr, 0, sizeof(*attr));
+
+       attr->fw_ver = qattr->fw_ver;
+       attr->sys_image_guid = qattr->sys_image_guid;
+       attr->max_mr_size = qattr->max_mr_size;
+       attr->page_size_cap = qattr->page_size_caps;
+       attr->vendor_id = qattr->vendor_id;
+       attr->vendor_part_id = qattr->vendor_part_id;
+       attr->hw_ver = qattr->hw_ver;
+       attr->max_qp = qattr->max_qp;
+       attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
+       attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
+           IB_DEVICE_RC_RNR_NAK_GEN |
+           IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
+
+       attr->max_sge = qattr->max_sge;
+       attr->max_sge_rd = qattr->max_sge;
+       attr->max_cq = qattr->max_cq;
+       attr->max_cqe = qattr->max_cqe;
+       attr->max_mr = qattr->max_mr;
+       attr->max_mw = qattr->max_mw;
+       attr->max_pd = qattr->max_pd;
+       attr->atomic_cap = dev->atomic_cap;
+       attr->max_fmr = qattr->max_fmr;
+       attr->max_map_per_fmr = 16;
+       attr->max_qp_init_rd_atom =
+           1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
+       attr->max_qp_rd_atom =
+           min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
+               attr->max_qp_init_rd_atom);
+
+       attr->max_srq = qattr->max_srq;
+       attr->max_srq_sge = qattr->max_srq_sge;
+       attr->max_srq_wr = qattr->max_srq_wr;
+
+       attr->local_ca_ack_delay = qattr->dev_ack_delay;
+       attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
+       attr->max_pkeys = QEDR_ROCE_PKEY_MAX;
+       attr->max_ah = qattr->max_ah;
+
+       return 0;
+}
+
+#define QEDR_SPEED_SDR         (1)
+#define QEDR_SPEED_DDR         (2)
+#define QEDR_SPEED_QDR         (4)
+#define QEDR_SPEED_FDR10       (8)
+#define QEDR_SPEED_FDR         (16)
+#define QEDR_SPEED_EDR         (32)
+
+static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
+                                           u8 *ib_width)
+{
+       switch (speed) {
+       case 1000:
+               *ib_speed = QEDR_SPEED_SDR;
+               *ib_width = IB_WIDTH_1X;
+               break;
+       case 10000:
+               *ib_speed = QEDR_SPEED_QDR;
+               *ib_width = IB_WIDTH_1X;
+               break;
+
+       case 20000:
+               *ib_speed = QEDR_SPEED_DDR;
+               *ib_width = IB_WIDTH_4X;
+               break;
+
+       case 25000:
+               *ib_speed = QEDR_SPEED_EDR;
+               *ib_width = IB_WIDTH_1X;
+               break;
+
+       case 40000:
+               *ib_speed = QEDR_SPEED_QDR;
+               *ib_width = IB_WIDTH_4X;
+               break;
+
+       case 50000:
+               *ib_speed = QEDR_SPEED_QDR;
+               *ib_width = IB_WIDTH_4X;
+               break;
+
+       case 100000:
+               *ib_speed = QEDR_SPEED_EDR;
+               *ib_width = IB_WIDTH_4X;
+               break;
+
+       default:
+               /* Unsupported */
+               *ib_speed = QEDR_SPEED_SDR;
+               *ib_width = IB_WIDTH_1X;
+       }
+}
+
+int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
+{
+       struct qedr_dev *dev;
+       struct qed_rdma_port *rdma_port;
+
+       dev = get_qedr_dev(ibdev);
+       if (port > 1) {
+               DP_ERR(dev, "invalid_port=0x%x\n", port);
+               return -EINVAL;
+       }
+
+       if (!dev->rdma_ctx) {
+               DP_ERR(dev, "rdma_ctx is NULL\n");
+               return -EINVAL;
+       }
+
+       rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
+       memset(attr, 0, sizeof(*attr));
+
+       if (rdma_port->port_state == QED_RDMA_PORT_UP) {
+               attr->state = IB_PORT_ACTIVE;
+               attr->phys_state = 5;
+       } else {
+               attr->state = IB_PORT_DOWN;
+               attr->phys_state = 3;
+       }
+       attr->max_mtu = IB_MTU_4096;
+       attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
+       attr->lid = 0;
+       attr->lmc = 0;
+       attr->sm_lid = 0;
+       attr->sm_sl = 0;
+       attr->port_cap_flags = IB_PORT_IP_BASED_GIDS;
+       attr->gid_tbl_len = QEDR_MAX_SGID;
+       attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
+       attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
+       attr->qkey_viol_cntr = 0;
+       get_link_speed_and_width(rdma_port->link_speed,
+                                &attr->active_speed, &attr->active_width);
+       attr->max_msg_sz = rdma_port->max_msg_size;
+       attr->max_vl_num = 4;
+
+       return 0;
+}
+
+int qedr_modify_port(struct ib_device *ibdev, u8 port, int mask,
+                    struct ib_port_modify *props)
+{
+       struct qedr_dev *dev;
+
+       dev = get_qedr_dev(ibdev);
+       if (port > 1) {
+               DP_ERR(dev, "invalid_port=0x%x\n", port);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int qedr_add_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
+                        unsigned long len)
+{
+       struct qedr_mm *mm;
+
+       mm = kzalloc(sizeof(*mm), GFP_KERNEL);
+       if (!mm)
+               return -ENOMEM;
+
+       mm->key.phy_addr = phy_addr;
+       /* This function might be called with a length which is not a multiple
+        * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
+        * forces this granularity by increasing the requested size if needed.
+        * When qedr_mmap is called, it will search the list with the updated
+        * length as a key. To prevent search failures, the length is rounded up
+        * in advance to PAGE_SIZE.
+        */
+       mm->key.len = roundup(len, PAGE_SIZE);
+       INIT_LIST_HEAD(&mm->entry);
+
+       mutex_lock(&uctx->mm_list_lock);
+       list_add(&mm->entry, &uctx->mm_head);
+       mutex_unlock(&uctx->mm_list_lock);
+
+       DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
+                "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
+                (unsigned long long)mm->key.phy_addr,
+                (unsigned long)mm->key.len, uctx);
+
+       return 0;
+}
+
+static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
+                            unsigned long len)
+{
+       bool found = false;
+       struct qedr_mm *mm;
+
+       mutex_lock(&uctx->mm_list_lock);
+       list_for_each_entry(mm, &uctx->mm_head, entry) {
+               if (len != mm->key.len || phy_addr != mm->key.phy_addr)
+                       continue;
+
+               found = true;
+               break;
+       }
+       mutex_unlock(&uctx->mm_list_lock);
+       DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
+                "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n",
+                mm->key.phy_addr, mm->key.len, uctx, found);
+
+       return found;
+}
+
+struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
+                                       struct ib_udata *udata)
+{
+       int rc;
+       struct qedr_ucontext *ctx;
+       struct qedr_alloc_ucontext_resp uresp;
+       struct qedr_dev *dev = get_qedr_dev(ibdev);
+       struct qed_rdma_add_user_out_params oparams;
+
+       if (!udata)
+               return ERR_PTR(-EFAULT);
+
+       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
+               return ERR_PTR(-ENOMEM);
+
+       rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
+       if (rc) {
+               DP_ERR(dev,
+                      "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
+                      rc);
+               goto err;
+       }
+
+       ctx->dpi = oparams.dpi;
+       ctx->dpi_addr = oparams.dpi_addr;
+       ctx->dpi_phys_addr = oparams.dpi_phys_addr;
+       ctx->dpi_size = oparams.dpi_size;
+       INIT_LIST_HEAD(&ctx->mm_head);
+       mutex_init(&ctx->mm_list_lock);
+
+       memset(&uresp, 0, sizeof(uresp));
+
+       uresp.db_pa = ctx->dpi_phys_addr;
+       uresp.db_size = ctx->dpi_size;
+       uresp.max_send_wr = dev->attr.max_sqe;
+       uresp.max_recv_wr = dev->attr.max_rqe;
+       uresp.max_srq_wr = dev->attr.max_srq_wr;
+       uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
+       uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
+       uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
+       uresp.max_cqes = QEDR_MAX_CQES;
+
+       rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+       if (rc)
+               goto err;
+
+       ctx->dev = dev;
+
+       rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
+       if (rc)
+               goto err;
+
+       DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
+                &ctx->ibucontext);
+       return &ctx->ibucontext;
+
+err:
+       kfree(ctx);
+       return ERR_PTR(rc);
+}
+
+int qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
+{
+       struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
+       struct qedr_mm *mm, *tmp;
+       int status = 0;
+
+       DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
+                uctx);
+       uctx->dev->ops->rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
+
+       list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
+               DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
+                        "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n",
+                        mm->key.phy_addr, mm->key.len, uctx);
+               list_del(&mm->entry);
+               kfree(mm);
+       }
+
+       kfree(uctx);
+       return status;
+}
+
+int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+{
+       struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
+       struct qedr_dev *dev = get_qedr_dev(context->device);
+       unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
+       u64 unmapped_db = dev->db_phys_addr;
+       unsigned long len = (vma->vm_end - vma->vm_start);
+       int rc = 0;
+       bool found;
+
+       DP_DEBUG(dev, QEDR_MSG_INIT,
+                "qedr_mmap called vm_page=0x%lx vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n",
+                vm_page, vma->vm_pgoff, unmapped_db, dev->db_size, len);
+       if (vma->vm_start & (PAGE_SIZE - 1)) {
+               DP_ERR(dev, "Vma_start not page aligned = %ld\n",
+                      vma->vm_start);
+               return -EINVAL;
+       }
+
+       found = qedr_search_mmap(ucontext, vm_page, len);
+       if (!found) {
+               DP_ERR(dev, "Vma_pgoff not found in mapped array = %ld\n",
+                      vma->vm_pgoff);
+               return -EINVAL;
+       }
+
+       DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
+
+       if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
+                                                    dev->db_size))) {
+               DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
+               if (vma->vm_flags & VM_READ) {
+                       DP_ERR(dev, "Trying to map doorbell bar for read\n");
+                       return -EPERM;
+               }
+
+               vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+               rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+                                       PAGE_SIZE, vma->vm_page_prot);
+       } else {
+               DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping chains\n");
+               rc = remap_pfn_range(vma, vma->vm_start,
+                                    vma->vm_pgoff, len, vma->vm_page_prot);
+       }
+       DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc);
+       return rc;
+}
+
+struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
+                           struct ib_ucontext *context, struct ib_udata *udata)
+{
+       struct qedr_dev *dev = get_qedr_dev(ibdev);
+       struct qedr_pd *pd;
+       u16 pd_id;
+       int rc;
+
+       DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
+                (udata && context) ? "User Lib" : "Kernel");
+
+       if (!dev->rdma_ctx) {
+               DP_ERR(dev, "invlaid RDMA context\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+       if (!pd)
+               return ERR_PTR(-ENOMEM);
+
+       rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
+       if (rc)
+               goto err;
+
+       pd->pd_id = pd_id;
+
+       if (udata && context) {
+               struct qedr_alloc_pd_uresp uresp;
+
+               uresp.pd_id = pd_id;
+
+               rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+               if (rc) {
+                       DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
+                       dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
+                       goto err;
+               }
+
+               pd->uctx = get_qedr_ucontext(context);
+               pd->uctx->pd = pd;
+       }
+
+       return &pd->ibpd;
+
+err:
+       kfree(pd);
+       return ERR_PTR(rc);
+}
+
+int qedr_dealloc_pd(struct ib_pd *ibpd)
+{
+       struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+       struct qedr_pd *pd = get_qedr_pd(ibpd);
+
+       if (!pd) {
+               pr_err("Invalid PD received in dealloc_pd\n");
+               return -EINVAL;
+       }
+
+       DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
+       dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
+
+       kfree(pd);
+
+       return 0;
+}
+
+static void qedr_free_pbl(struct qedr_dev *dev,
+                         struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
+{
+       struct pci_dev *pdev = dev->pdev;
+       int i;
+
+       for (i = 0; i < pbl_info->num_pbls; i++) {
+               if (!pbl[i].va)
+                       continue;
+               dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
+                                 pbl[i].va, pbl[i].pa);
+       }
+
+       kfree(pbl);
+}
+
+#define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
+#define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
+
+#define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
+#define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
+#define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
+
+static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
+                                          struct qedr_pbl_info *pbl_info,
+                                          gfp_t flags)
+{
+       struct pci_dev *pdev = dev->pdev;
+       struct qedr_pbl *pbl_table;
+       dma_addr_t *pbl_main_tbl;
+       dma_addr_t pa;
+       void *va;
+       int i;
+
+       pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
+       if (!pbl_table)
+               return ERR_PTR(-ENOMEM);
+
+       for (i = 0; i < pbl_info->num_pbls; i++) {
+               va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size,
+                                       &pa, flags);
+               if (!va)
+                       goto err;
+
+               memset(va, 0, pbl_info->pbl_size);
+               pbl_table[i].va = va;
+               pbl_table[i].pa = pa;
+       }
+
+       /* Two-Layer PBLs, if we have more than one pbl we need to initialize
+        * the first one with physical pointers to all of the rest
+        */
+       pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
+       for (i = 0; i < pbl_info->num_pbls - 1; i++)
+               pbl_main_tbl[i] = pbl_table[i + 1].pa;
+
+       return pbl_table;
+
+err:
+       for (i--; i >= 0; i--)
+               dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
+                                 pbl_table[i].va, pbl_table[i].pa);
+
+       qedr_free_pbl(dev, pbl_info, pbl_table);
+
+       return ERR_PTR(-ENOMEM);
+}
+
+static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
+                               struct qedr_pbl_info *pbl_info,
+                               u32 num_pbes, int two_layer_capable)
+{
+       u32 pbl_capacity;
+       u32 pbl_size;
+       u32 num_pbls;
+
+       if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
+               if (num_pbes > MAX_PBES_TWO_LAYER) {
+                       DP_ERR(dev, "prepare pbl table: too many pages %d\n",
+                              num_pbes);
+                       return -EINVAL;
+               }
+
+               /* calculate required pbl page size */
+               pbl_size = MIN_FW_PBL_PAGE_SIZE;
+               pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
+                              NUM_PBES_ON_PAGE(pbl_size);
+
+               while (pbl_capacity < num_pbes) {
+                       pbl_size *= 2;
+                       pbl_capacity = pbl_size / sizeof(u64);
+                       pbl_capacity = pbl_capacity * pbl_capacity;
+               }
+
+               num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
+               num_pbls++;     /* One for the layer0 ( points to the pbls) */
+               pbl_info->two_layered = true;
+       } else {
+               /* One layered PBL */
+               num_pbls = 1;
+               pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
+                                roundup_pow_of_two((num_pbes * sizeof(u64))));
+               pbl_info->two_layered = false;
+       }
+
+       pbl_info->num_pbls = num_pbls;
+       pbl_info->pbl_size = pbl_size;
+       pbl_info->num_pbes = num_pbes;
+
+       DP_DEBUG(dev, QEDR_MSG_MR,
+                "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
+                pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
+
+       return 0;
+}
+
+static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
+                              struct qedr_pbl *pbl,
+                              struct qedr_pbl_info *pbl_info)
+{
+       int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
+       struct qedr_pbl *pbl_tbl;
+       struct scatterlist *sg;
+       struct regpair *pbe;
+       int entry;
+       u32 addr;
+
+       if (!pbl_info->num_pbes)
+               return;
+
+       /* If we have a two layered pbl, the first pbl points to the rest
+        * of the pbls and the first entry lays on the second pbl in the table
+        */
+       if (pbl_info->two_layered)
+               pbl_tbl = &pbl[1];
+       else
+               pbl_tbl = pbl;
+
+       pbe = (struct regpair *)pbl_tbl->va;
+       if (!pbe) {
+               DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
+               return;
+       }
+
+       pbe_cnt = 0;
+
+       shift = ilog2(umem->page_size);
+
+       for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
+               pages = sg_dma_len(sg) >> shift;
+               for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
+                       /* store the page address in pbe */
+                       pbe->lo = cpu_to_le32(sg_dma_address(sg) +
+                                             umem->page_size * pg_cnt);
+                       addr = upper_32_bits(sg_dma_address(sg) +
+                                            umem->page_size * pg_cnt);
+                       pbe->hi = cpu_to_le32(addr);
+                       pbe_cnt++;
+                       total_num_pbes++;
+                       pbe++;
+
+                       if (total_num_pbes == pbl_info->num_pbes)
+                               return;
+
+                       /* If the given pbl is full storing the pbes,
+                        * move to next pbl.
+                        */
+                       if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
+                               pbl_tbl++;
+                               pbe = (struct regpair *)pbl_tbl->va;
+                               pbe_cnt = 0;
+                       }
+               }
+       }
+}
+
+static int qedr_copy_cq_uresp(struct qedr_dev *dev,
+                             struct qedr_cq *cq, struct ib_udata *udata)
+{
+       struct qedr_create_cq_uresp uresp;
+       int rc;
+
+       memset(&uresp, 0, sizeof(uresp));
+
+       uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
+       uresp.icid = cq->icid;
+
+       rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+       if (rc)
+               DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
+
+       return rc;
+}
+
+static void consume_cqe(struct qedr_cq *cq)
+{
+       if (cq->latest_cqe == cq->toggle_cqe)
+               cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
+
+       cq->latest_cqe = qed_chain_consume(&cq->pbl);
+}
+
+static inline int qedr_align_cq_entries(int entries)
+{
+       u64 size, aligned_size;
+
+       /* We allocate an extra entry that we don't report to the FW. */
+       size = (entries + 1) * QEDR_CQE_SIZE;
+       aligned_size = ALIGN(size, PAGE_SIZE);
+
+       return aligned_size / QEDR_CQE_SIZE;
+}
+
+static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
+                                      struct qedr_dev *dev,
+                                      struct qedr_userq *q,
+                                      u64 buf_addr, size_t buf_len,
+                                      int access, int dmasync)
+{
+       int page_cnt;
+       int rc;
+
+       q->buf_addr = buf_addr;
+       q->buf_len = buf_len;
+       q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync);
+       if (IS_ERR(q->umem)) {
+               DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
+                      PTR_ERR(q->umem));
+               return PTR_ERR(q->umem);
+       }
+
+       page_cnt = ib_umem_page_count(q->umem);
+       rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt, 0);
+       if (rc)
+               goto err0;
+
+       q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
+       if (IS_ERR(q->pbl_tbl)) {
+               rc = PTR_ERR(q->pbl_tbl);
+               goto err0;
+       }
+
+       qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info);
+
+       return 0;
+
+err0:
+       ib_umem_release(q->umem);
+
+       return rc;
+}
+
+static inline void qedr_init_cq_params(struct qedr_cq *cq,
+                                      struct qedr_ucontext *ctx,
+                                      struct qedr_dev *dev, int vector,
+                                      int chain_entries, int page_cnt,
+                                      u64 pbl_ptr,
+                                      struct qed_rdma_create_cq_in_params
+                                      *params)
+{
+       memset(params, 0, sizeof(*params));
+       params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
+       params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
+       params->cnq_id = vector;
+       params->cq_size = chain_entries - 1;
+       params->dpi = (ctx) ? ctx->dpi : dev->dpi;
+       params->pbl_num_pages = page_cnt;
+       params->pbl_ptr = pbl_ptr;
+       params->pbl_two_level = 0;
+}
+
+static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
+{
+       /* Flush data before signalling doorbell */
+       wmb();
+       cq->db.data.agg_flags = flags;
+       cq->db.data.value = cpu_to_le32(cons);
+       writeq(cq->db.raw, cq->db_addr);
+
+       /* Make sure write would stick */
+       mmiowb();
+}
+
+int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
+{
+       struct qedr_cq *cq = get_qedr_cq(ibcq);
+       unsigned long sflags;
+
+       if (cq->cq_type == QEDR_CQ_TYPE_GSI)
+               return 0;
+
+       spin_lock_irqsave(&cq->cq_lock, sflags);
+
+       cq->arm_flags = 0;
+
+       if (flags & IB_CQ_SOLICITED)
+               cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
+
+       if (flags & IB_CQ_NEXT_COMP)
+               cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
+
+       doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
+
+       spin_unlock_irqrestore(&cq->cq_lock, sflags);
+
+       return 0;
+}
+
+struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
+                            const struct ib_cq_init_attr *attr,
+                            struct ib_ucontext *ib_ctx, struct ib_udata *udata)
+{
+       struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx);
+       struct qed_rdma_destroy_cq_out_params destroy_oparams;
+       struct qed_rdma_destroy_cq_in_params destroy_iparams;
+       struct qedr_dev *dev = get_qedr_dev(ibdev);
+       struct qed_rdma_create_cq_in_params params;
+       struct qedr_create_cq_ureq ureq;
+       int vector = attr->comp_vector;
+       int entries = attr->cqe;
+       struct qedr_cq *cq;
+       int chain_entries;
+       int page_cnt;
+       u64 pbl_ptr;
+       u16 icid;
+       int rc;
+
+       DP_DEBUG(dev, QEDR_MSG_INIT,
+                "create_cq: called from %s. entries=%d, vector=%d\n",
+                udata ? "User Lib" : "Kernel", entries, vector);
+
+       if (entries > QEDR_MAX_CQES) {
+               DP_ERR(dev,
+                      "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
+                      entries, QEDR_MAX_CQES);
+               return ERR_PTR(-EINVAL);
+       }
+
+       chain_entries = qedr_align_cq_entries(entries);
+       chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
+
+       cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+       if (!cq)
+               return ERR_PTR(-ENOMEM);
+
+       if (udata) {
+               memset(&ureq, 0, sizeof(ureq));
+               if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
+                       DP_ERR(dev,
+                              "create cq: problem copying data from user space\n");
+                       goto err0;
+               }
+
+               if (!ureq.len) {
+                       DP_ERR(dev,
+                              "create cq: cannot create a cq with 0 entries\n");
+                       goto err0;
+               }
+
+               cq->cq_type = QEDR_CQ_TYPE_USER;
+
+               rc = qedr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr,
+                                         ureq.len, IB_ACCESS_LOCAL_WRITE, 1);
+               if (rc)
+                       goto err0;
+
+               pbl_ptr = cq->q.pbl_tbl->pa;
+               page_cnt = cq->q.pbl_info.num_pbes;
+
+               cq->ibcq.cqe = chain_entries;
+       } else {
+               cq->cq_type = QEDR_CQ_TYPE_KERNEL;
+
+               rc = dev->ops->common->chain_alloc(dev->cdev,
+                                                  QED_CHAIN_USE_TO_CONSUME,
+                                                  QED_CHAIN_MODE_PBL,
+                                                  QED_CHAIN_CNT_TYPE_U32,
+                                                  chain_entries,
+                                                  sizeof(union rdma_cqe),
+                                                  &cq->pbl);
+               if (rc)
+                       goto err1;
+
+               page_cnt = qed_chain_get_page_cnt(&cq->pbl);
+               pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
+               cq->ibcq.cqe = cq->pbl.capacity;
+       }
+
+       qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
+                           pbl_ptr, &params);
+
+       rc = dev->ops->rdma_create_cq(dev->rdma_ctx, &params, &icid);
+       if (rc)
+               goto err2;
+
+       cq->icid = icid;
+       cq->sig = QEDR_CQ_MAGIC_NUMBER;
+       spin_lock_init(&cq->cq_lock);
+
+       if (ib_ctx) {
+               rc = qedr_copy_cq_uresp(dev, cq, udata);
+               if (rc)
+                       goto err3;
+       } else {
+               /* Generate doorbell address. */
+               cq->db_addr = dev->db_addr +
+                   DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
+               cq->db.data.icid = cq->icid;
+               cq->db.data.params = DB_AGG_CMD_SET <<
+                   RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
+
+               /* point to the very last element, passing it we will toggle */
+               cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
+               cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
+               cq->latest_cqe = NULL;
+               consume_cqe(cq);
+               cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
+       }
+
+       DP_DEBUG(dev, QEDR_MSG_CQ,
+                "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
+                cq->icid, cq, params.cq_size);
+
+       return &cq->ibcq;
+
+err3:
+       destroy_iparams.icid = cq->icid;
+       dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
+                                 &destroy_oparams);
+err2:
+       if (udata)
+               qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
+       else
+               dev->ops->common->chain_free(dev->cdev, &cq->pbl);
+err1:
+       if (udata)
+               ib_umem_release(cq->q.umem);
+err0:
+       kfree(cq);
+       return ERR_PTR(-EINVAL);
+}
+
+int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
+{
+       struct qedr_dev *dev = get_qedr_dev(ibcq->device);
+       struct qedr_cq *cq = get_qedr_cq(ibcq);
+
+       DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
+
+       return 0;
+}
+
+int qedr_destroy_cq(struct ib_cq *ibcq)
+{
+       struct qedr_dev *dev = get_qedr_dev(ibcq->device);
+       struct qed_rdma_destroy_cq_out_params oparams;
+       struct qed_rdma_destroy_cq_in_params iparams;
+       struct qedr_cq *cq = get_qedr_cq(ibcq);
+
+       DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq: cq_id %d", cq->icid);
+
+       /* GSIs CQs are handled by driver, so they don't exist in the FW */
+       if (cq->cq_type != QEDR_CQ_TYPE_GSI) {
+               int rc;
+
+               iparams.icid = cq->icid;
+               rc = dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams,
+                                              &oparams);
+               if (rc)
+                       return rc;
+               dev->ops->common->chain_free(dev->cdev, &cq->pbl);
+       }
+
+       if (ibcq->uobject && ibcq->uobject->context) {
+               qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
+               ib_umem_release(cq->q.umem);
+       }
+
+       kfree(cq);
+
+       return 0;
+}
+
+static inline int get_gid_info_from_table(struct ib_qp *ibqp,
+                                         struct ib_qp_attr *attr,
+                                         int attr_mask,
+                                         struct qed_rdma_modify_qp_in_params
+                                         *qp_params)
+{
+       enum rdma_network_type nw_type;
+       struct ib_gid_attr gid_attr;
+       union ib_gid gid;
+       u32 ipv4_addr;
+       int rc = 0;
+       int i;
+
+       rc = ib_get_cached_gid(ibqp->device, attr->ah_attr.port_num,
+                              attr->ah_attr.grh.sgid_index, &gid, &gid_attr);
+       if (rc)
+               return rc;
+
+       if (!memcmp(&gid, &zgid, sizeof(gid)))
+               return -ENOENT;
+
+       if (gid_attr.ndev) {
+               qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr.ndev);
+
+               dev_put(gid_attr.ndev);
+               nw_type = ib_gid_to_network_type(gid_attr.gid_type, &gid);
+               switch (nw_type) {
+               case RDMA_NETWORK_IPV6:
+                       memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
+                              sizeof(qp_params->sgid));
+                       memcpy(&qp_params->dgid.bytes[0],
+                              &attr->ah_attr.grh.dgid,
+                              sizeof(qp_params->dgid));
+                       qp_params->roce_mode = ROCE_V2_IPV6;
+                       SET_FIELD(qp_params->modify_flags,
+                                 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
+                       break;
+               case RDMA_NETWORK_IB:
+                       memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
+                              sizeof(qp_params->sgid));
+                       memcpy(&qp_params->dgid.bytes[0],
+                              &attr->ah_attr.grh.dgid,
+                              sizeof(qp_params->dgid));
+                       qp_params->roce_mode = ROCE_V1;
+                       break;
+               case RDMA_NETWORK_IPV4:
+                       memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
+                       memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
+                       ipv4_addr = qedr_get_ipv4_from_gid(gid.raw);
+                       qp_params->sgid.ipv4_addr = ipv4_addr;
+                       ipv4_addr =
+                           qedr_get_ipv4_from_gid(attr->ah_attr.grh.dgid.raw);
+                       qp_params->dgid.ipv4_addr = ipv4_addr;
+                       SET_FIELD(qp_params->modify_flags,
+                                 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
+                       qp_params->roce_mode = ROCE_V2_IPV4;
+                       break;
+               }
+       }
+
+       for (i = 0; i < 4; i++) {
+               qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
+               qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
+       }
+
+       if (qp_params->vlan_id >= VLAN_CFI_MASK)
+               qp_params->vlan_id = 0;
+
+       return 0;
+}
+
+static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
+                              struct ib_qp_init_attr *attrs)
+{
+       struct qedr_device_attr *qattr = &dev->attr;
+
+       /* QP0... attrs->qp_type == IB_QPT_GSI */
+       if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
+               DP_DEBUG(dev, QEDR_MSG_QP,
+                        "create qp: unsupported qp type=0x%x requested\n",
+                        attrs->qp_type);
+               return -EINVAL;
+       }
+
+       if (attrs->cap.max_send_wr > qattr->max_sqe) {
+               DP_ERR(dev,
+                      "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
+                      attrs->cap.max_send_wr, qattr->max_sqe);
+               return -EINVAL;
+       }
+
+       if (attrs->cap.max_inline_data > qattr->max_inline) {
+               DP_ERR(dev,
+                      "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
+                      attrs->cap.max_inline_data, qattr->max_inline);
+               return -EINVAL;
+       }
+
+       if (attrs->cap.max_send_sge > qattr->max_sge) {
+               DP_ERR(dev,
+                      "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
+                      attrs->cap.max_send_sge, qattr->max_sge);
+               return -EINVAL;
+       }
+
+       if (attrs->cap.max_recv_sge > qattr->max_sge) {
+               DP_ERR(dev,
+                      "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
+                      attrs->cap.max_recv_sge, qattr->max_sge);
+               return -EINVAL;
+       }
+
+       /* Unprivileged user space cannot create special QP */
+       if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
+               DP_ERR(dev,
+                      "create qp: userspace can't create special QPs of type=0x%x\n",
+                      attrs->qp_type);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static void qedr_copy_rq_uresp(struct qedr_create_qp_uresp *uresp,
+                              struct qedr_qp *qp)
+{
+       uresp->rq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
+       uresp->rq_icid = qp->icid;
+}
+
+static void qedr_copy_sq_uresp(struct qedr_create_qp_uresp *uresp,
+                              struct qedr_qp *qp)
+{
+       uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
+       uresp->sq_icid = qp->icid + 1;
+}
+
+static int qedr_copy_qp_uresp(struct qedr_dev *dev,
+                             struct qedr_qp *qp, struct ib_udata *udata)
+{
+       struct qedr_create_qp_uresp uresp;
+       int rc;
+
+       memset(&uresp, 0, sizeof(uresp));
+       qedr_copy_sq_uresp(&uresp, qp);
+       qedr_copy_rq_uresp(&uresp, qp);
+
+       uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
+       uresp.qp_id = qp->qp_id;
+
+       rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+       if (rc)
+               DP_ERR(dev,
+                      "create qp: failed a copy to user space with qp icid=0x%x.\n",
+                      qp->icid);
+
+       return rc;
+}
+
+static void qedr_set_common_qp_params(struct qedr_dev *dev,
+                                     struct qedr_qp *qp,
+                                     struct qedr_pd *pd,
+                                     struct ib_qp_init_attr *attrs)
+{
+       spin_lock_init(&qp->q_lock);
+       qp->pd = pd;
+       qp->qp_type = attrs->qp_type;
+       qp->max_inline_data = attrs->cap.max_inline_data;
+       qp->sq.max_sges = attrs->cap.max_send_sge;
+       qp->state = QED_ROCE_QP_STATE_RESET;
+       qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
+       qp->sq_cq = get_qedr_cq(attrs->send_cq);
+       qp->rq_cq = get_qedr_cq(attrs->recv_cq);
+       qp->dev = dev;
+       qp->rq.max_sges = attrs->cap.max_recv_sge;
+
+       DP_DEBUG(dev, QEDR_MSG_QP,
+                "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
+                qp->rq.max_sges, qp->rq_cq->icid);
+       DP_DEBUG(dev, QEDR_MSG_QP,
+                "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
+                pd->pd_id, qp->qp_type, qp->max_inline_data,
+                qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
+       DP_DEBUG(dev, QEDR_MSG_QP,
+                "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
+                qp->sq.max_sges, qp->sq_cq->icid);
+}
+
+static void qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+       qp->sq.db = dev->db_addr +
+                   DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
+       qp->sq.db_data.data.icid = qp->icid + 1;
+       qp->rq.db = dev->db_addr +
+                   DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
+       qp->rq.db_data.data.icid = qp->icid;
+}
+
+static inline void
+qedr_init_common_qp_in_params(struct qedr_dev *dev,
+                             struct qedr_pd *pd,
+                             struct qedr_qp *qp,
+                             struct ib_qp_init_attr *attrs,
+                             bool fmr_and_reserved_lkey,
+                             struct qed_rdma_create_qp_in_params *params)
+{
+       /* QP handle to be written in an async event */
+       params->qp_handle_async_lo = lower_32_bits((uintptr_t) qp);
+       params->qp_handle_async_hi = upper_32_bits((uintptr_t) qp);
+
+       params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
+       params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
+       params->pd = pd->pd_id;
+       params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
+       params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
+       params->stats_queue = 0;
+       params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
+       params->srq_id = 0;
+       params->use_srq = false;
+}
+
+static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+       DP_DEBUG(dev, QEDR_MSG_QP, "create qp: successfully created user QP. "
+                "qp=%p. "
+                "sq_addr=0x%llx, "
+                "sq_len=%zd, "
+                "rq_addr=0x%llx, "
+                "rq_len=%zd"
+                "\n",
+                qp,
+                qp->usq.buf_addr,
+                qp->usq.buf_len, qp->urq.buf_addr, qp->urq.buf_len);
+}
+
+static void qedr_cleanup_user(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+       if (qp->usq.umem)
+               ib_umem_release(qp->usq.umem);
+       qp->usq.umem = NULL;
+
+       if (qp->urq.umem)
+               ib_umem_release(qp->urq.umem);
+       qp->urq.umem = NULL;
+}
+
+static int qedr_create_user_qp(struct qedr_dev *dev,
+                              struct qedr_qp *qp,
+                              struct ib_pd *ibpd,
+                              struct ib_udata *udata,
+                              struct ib_qp_init_attr *attrs)
+{
+       struct qed_rdma_create_qp_in_params in_params;
+       struct qed_rdma_create_qp_out_params out_params;
+       struct qedr_pd *pd = get_qedr_pd(ibpd);
+       struct ib_ucontext *ib_ctx = NULL;
+       struct qedr_ucontext *ctx = NULL;
+       struct qedr_create_qp_ureq ureq;
+       int rc = -EINVAL;
+
+       ib_ctx = ibpd->uobject->context;
+       ctx = get_qedr_ucontext(ib_ctx);
+
+       memset(&ureq, 0, sizeof(ureq));
+       rc = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
+       if (rc) {
+               DP_ERR(dev, "Problem copying data from user space\n");
+               return rc;
+       }
+
+       /* SQ - read access only (0), dma sync not required (0) */
+       rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq.sq_addr,
+                                 ureq.sq_len, 0, 0);
+       if (rc)
+               return rc;
+
+       /* RQ - read access only (0), dma sync not required (0) */
+       rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr,
+                                 ureq.rq_len, 0, 0);
+
+       if (rc)
+               return rc;
+
+       memset(&in_params, 0, sizeof(in_params));
+       qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
+       in_params.qp_handle_lo = ureq.qp_handle_lo;
+       in_params.qp_handle_hi = ureq.qp_handle_hi;
+       in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
+       in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
+       in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
+       in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
+
+       qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
+                                             &in_params, &out_params);
+
+       if (!qp->qed_qp) {
+               rc = -ENOMEM;
+               goto err1;
+       }
+
+       qp->qp_id = out_params.qp_id;
+       qp->icid = out_params.icid;
+
+       rc = qedr_copy_qp_uresp(dev, qp, udata);
+       if (rc)
+               goto err;
+
+       qedr_qp_user_print(dev, qp);
+
+       return 0;
+err:
+       rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
+       if (rc)
+               DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
+
+err1:
+       qedr_cleanup_user(dev, qp);
+       return rc;
+}
+
+static int
+qedr_roce_create_kernel_qp(struct qedr_dev *dev,
+                          struct qedr_qp *qp,
+                          struct qed_rdma_create_qp_in_params *in_params,
+                          u32 n_sq_elems, u32 n_rq_elems)
+{
+       struct qed_rdma_create_qp_out_params out_params;
+       int rc;
+
+       rc = dev->ops->common->chain_alloc(dev->cdev,
+                                          QED_CHAIN_USE_TO_PRODUCE,
+                                          QED_CHAIN_MODE_PBL,
+                                          QED_CHAIN_CNT_TYPE_U32,
+                                          n_sq_elems,
+                                          QEDR_SQE_ELEMENT_SIZE,
+                                          &qp->sq.pbl);
+
+       if (rc)
+               return rc;
+
+       in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
+       in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
+
+       rc = dev->ops->common->chain_alloc(dev->cdev,
+                                          QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+                                          QED_CHAIN_MODE_PBL,
+                                          QED_CHAIN_CNT_TYPE_U32,
+                                          n_rq_elems,
+                                          QEDR_RQE_ELEMENT_SIZE,
+                                          &qp->rq.pbl);
+       if (rc)
+               return rc;
+
+       in_params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
+       in_params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
+
+       qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
+                                             in_params, &out_params);
+
+       if (!qp->qed_qp)
+               return -EINVAL;
+
+       qp->qp_id = out_params.qp_id;
+       qp->icid = out_params.icid;
+
+       qedr_set_roce_db_info(dev, qp);
+
+       return 0;
+}
+
+static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+       dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
+       kfree(qp->wqe_wr_id);
+
+       dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
+       kfree(qp->rqe_wr_id);
+}
+
+static int qedr_create_kernel_qp(struct qedr_dev *dev,
+                                struct qedr_qp *qp,
+                                struct ib_pd *ibpd,
+                                struct ib_qp_init_attr *attrs)
+{
+       struct qed_rdma_create_qp_in_params in_params;
+       struct qedr_pd *pd = get_qedr_pd(ibpd);
+       int rc = -EINVAL;
+       u32 n_rq_elems;
+       u32 n_sq_elems;
+       u32 n_sq_entries;
+
+       memset(&in_params, 0, sizeof(in_params));
+
+       /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
+        * the ring. The ring should allow at least a single WR, even if the
+        * user requested none, due to allocation issues.
+        * We should add an extra WR since the prod and cons indices of
+        * wqe_wr_id are managed in such a way that the WQ is considered full
+        * when (prod+1)%max_wr==cons. We currently don't do that because we
+        * double the number of entries due an iSER issue that pushes far more
+        * WRs than indicated. If we decline its ib_post_send() then we get
+        * error prints in the dmesg we'd like to avoid.
+        */
+       qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
+                             dev->attr.max_sqe);
+
+       qp->wqe_wr_id = kzalloc(qp->sq.max_wr * sizeof(*qp->wqe_wr_id),
+                               GFP_KERNEL);
+       if (!qp->wqe_wr_id) {
+               DP_ERR(dev, "create qp: failed SQ shadow memory allocation\n");
+               return -ENOMEM;
+       }
+
+       /* QP handle to be written in CQE */
+       in_params.qp_handle_lo = lower_32_bits((uintptr_t) qp);
+       in_params.qp_handle_hi = upper_32_bits((uintptr_t) qp);
+
+       /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
+        * the ring. There ring should allow at least a single WR, even if the
+        * user requested none, due to allocation issues.
+        */
+       qp->rq.max_wr = (u16) max_t(u32, attrs->cap.max_recv_wr, 1);
+
+       /* Allocate driver internal RQ array */
+       qp->rqe_wr_id = kzalloc(qp->rq.max_wr * sizeof(*qp->rqe_wr_id),
+                               GFP_KERNEL);
+       if (!qp->rqe_wr_id) {
+               DP_ERR(dev,
+                      "create qp: failed RQ shadow memory allocation\n");
+               kfree(qp->wqe_wr_id);
+               return -ENOMEM;
+       }
+
+       qedr_init_common_qp_in_params(dev, pd, qp, attrs, true, &in_params);
+
+       n_sq_entries = attrs->cap.max_send_wr;
+       n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
+       n_sq_entries = max_t(u32, n_sq_entries, 1);
+       n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
+
+       n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
+
+       rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
+                                       n_sq_elems, n_rq_elems);
+       if (rc)
+               qedr_cleanup_kernel(dev, qp);
+
+       return rc;
+}
+
+struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
+                            struct ib_qp_init_attr *attrs,
+                            struct ib_udata *udata)
+{
+       struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+       struct qedr_pd *pd = get_qedr_pd(ibpd);
+       struct qedr_qp *qp;
+       struct ib_qp *ibqp;
+       int rc = 0;
+
+       DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
+                udata ? "user library" : "kernel", pd);
+
+       rc = qedr_check_qp_attrs(ibpd, dev, attrs);
+       if (rc)
+               return ERR_PTR(rc);
+
+       if (attrs->srq)
+               return ERR_PTR(-EINVAL);
+
+       DP_DEBUG(dev, QEDR_MSG_QP,
+                "create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
+                udata ? "user library" : "kernel", attrs->event_handler, pd,
+                get_qedr_cq(attrs->send_cq),
+                get_qedr_cq(attrs->send_cq)->icid,
+                get_qedr_cq(attrs->recv_cq),
+                get_qedr_cq(attrs->recv_cq)->icid);
+
+       qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+       if (!qp) {
+               DP_ERR(dev, "create qp: failed allocating memory\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       qedr_set_common_qp_params(dev, qp, pd, attrs);
+
+       if (attrs->qp_type == IB_QPT_GSI) {
+               ibqp = qedr_create_gsi_qp(dev, attrs, qp);
+               if (IS_ERR(ibqp))
+                       kfree(qp);
+               return ibqp;
+       }
+
+       if (udata)
+               rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs);
+       else
+               rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
+
+       if (rc)
+               goto err;
+
+       qp->ibqp.qp_num = qp->qp_id;
+
+       return &qp->ibqp;
+
+err:
+       kfree(qp);
+
+       return ERR_PTR(-EFAULT);
+}
+
+static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
+{
+       switch (qp_state) {
+       case QED_ROCE_QP_STATE_RESET:
+               return IB_QPS_RESET;
+       case QED_ROCE_QP_STATE_INIT:
+               return IB_QPS_INIT;
+       case QED_ROCE_QP_STATE_RTR:
+               return IB_QPS_RTR;
+       case QED_ROCE_QP_STATE_RTS:
+               return IB_QPS_RTS;
+       case QED_ROCE_QP_STATE_SQD:
+               return IB_QPS_SQD;
+       case QED_ROCE_QP_STATE_ERR:
+               return IB_QPS_ERR;
+       case QED_ROCE_QP_STATE_SQE:
+               return IB_QPS_SQE;
+       }
+       return IB_QPS_ERR;
+}
+
+static enum qed_roce_qp_state qedr_get_state_from_ibqp(
+                                       enum ib_qp_state qp_state)
+{
+       switch (qp_state) {
+       case IB_QPS_RESET:
+               return QED_ROCE_QP_STATE_RESET;
+       case IB_QPS_INIT:
+               return QED_ROCE_QP_STATE_INIT;
+       case IB_QPS_RTR:
+               return QED_ROCE_QP_STATE_RTR;
+       case IB_QPS_RTS:
+               return QED_ROCE_QP_STATE_RTS;
+       case IB_QPS_SQD:
+               return QED_ROCE_QP_STATE_SQD;
+       case IB_QPS_ERR:
+               return QED_ROCE_QP_STATE_ERR;
+       default:
+               return QED_ROCE_QP_STATE_ERR;
+       }
+}
+
+static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
+{
+       qed_chain_reset(&qph->pbl);
+       qph->prod = 0;
+       qph->cons = 0;
+       qph->wqe_cons = 0;
+       qph->db_data.data.value = cpu_to_le16(0);
+}
+
+static int qedr_update_qp_state(struct qedr_dev *dev,
+                               struct qedr_qp *qp,
+                               enum qed_roce_qp_state new_state)
+{
+       int status = 0;
+
+       if (new_state == qp->state)
+               return 0;
+
+       switch (qp->state) {
+       case QED_ROCE_QP_STATE_RESET:
+               switch (new_state) {
+               case QED_ROCE_QP_STATE_INIT:
+                       qp->prev_wqe_size = 0;
+                       qedr_reset_qp_hwq_info(&qp->sq);
+                       qedr_reset_qp_hwq_info(&qp->rq);
+                       break;
+               default:
+                       status = -EINVAL;
+                       break;
+               };
+               break;
+       case QED_ROCE_QP_STATE_INIT:
+               switch (new_state) {
+               case QED_ROCE_QP_STATE_RTR:
+                       /* Update doorbell (in case post_recv was
+                        * done before move to RTR)
+                        */
+                       wmb();
+                       writel(qp->rq.db_data.raw, qp->rq.db);
+                       /* Make sure write takes effect */
+                       mmiowb();
+                       break;
+               case QED_ROCE_QP_STATE_ERR:
+                       break;
+               default:
+                       /* Invalid state change. */
+                       status = -EINVAL;
+                       break;
+               };
+               break;
+       case QED_ROCE_QP_STATE_RTR:
+               /* RTR->XXX */
+               switch (new_state) {
+               case QED_ROCE_QP_STATE_RTS:
+                       break;
+               case QED_ROCE_QP_STATE_ERR:
+                       break;
+               default:
+                       /* Invalid state change. */
+                       status = -EINVAL;
+                       break;
+               };
+               break;
+       case QED_ROCE_QP_STATE_RTS:
+               /* RTS->XXX */
+               switch (new_state) {
+               case QED_ROCE_QP_STATE_SQD:
+                       break;
+               case QED_ROCE_QP_STATE_ERR:
+                       break;
+               default:
+                       /* Invalid state change. */
+                       status = -EINVAL;
+                       break;
+               };
+               break;
+       case QED_ROCE_QP_STATE_SQD:
+               /* SQD->XXX */
+               switch (new_state) {
+               case QED_ROCE_QP_STATE_RTS:
+               case QED_ROCE_QP_STATE_ERR:
+                       break;
+               default:
+                       /* Invalid state change. */
+                       status = -EINVAL;
+                       break;
+               };
+               break;
+       case QED_ROCE_QP_STATE_ERR:
+               /* ERR->XXX */
+               switch (new_state) {
+               case QED_ROCE_QP_STATE_RESET:
+                       if ((qp->rq.prod != qp->rq.cons) ||
+                           (qp->sq.prod != qp->sq.cons)) {
+                               DP_NOTICE(dev,
+                                         "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
+                                         qp->rq.prod, qp->rq.cons, qp->sq.prod,
+                                         qp->sq.cons);
+                               status = -EINVAL;
+                       }
+                       break;
+               default:
+                       status = -EINVAL;
+                       break;
+               };
+               break;
+       default:
+               status = -EINVAL;
+               break;
+       };
+
+       return status;
+}
+
+int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+                  int attr_mask, struct ib_udata *udata)
+{
+       struct qedr_qp *qp = get_qedr_qp(ibqp);
+       struct qed_rdma_modify_qp_in_params qp_params = { 0 };
+       struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
+       enum ib_qp_state old_qp_state, new_qp_state;
+       int rc = 0;
+
+       DP_DEBUG(dev, QEDR_MSG_QP,
+                "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
+                attr->qp_state);
+
+       old_qp_state = qedr_get_ibqp_state(qp->state);
+       if (attr_mask & IB_QP_STATE)
+               new_qp_state = attr->qp_state;
+       else
+               new_qp_state = old_qp_state;
+
+       if (!ib_modify_qp_is_ok
+           (old_qp_state, new_qp_state, ibqp->qp_type, attr_mask,
+            IB_LINK_LAYER_ETHERNET)) {
+               DP_ERR(dev,
+                      "modify qp: invalid attribute mask=0x%x specified for\n"
+                      "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
+                      attr_mask, qp->qp_id, ibqp->qp_type, old_qp_state,
+                      new_qp_state);
+               rc = -EINVAL;
+               goto err;
+       }
+
+       /* Translate the masks... */
+       if (attr_mask & IB_QP_STATE) {
+               SET_FIELD(qp_params.modify_flags,
+                         QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
+               qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
+       }
+
+       if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
+               qp_params.sqd_async = true;
+
+       if (attr_mask & IB_QP_PKEY_INDEX) {
+               SET_FIELD(qp_params.modify_flags,
+                         QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
+               if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
+                       rc = -EINVAL;
+                       goto err;
+               }
+
+               qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
+       }
+
+       if (attr_mask & IB_QP_QKEY)
+               qp->qkey = attr->qkey;
+
+       if (attr_mask & IB_QP_ACCESS_FLAGS) {
+               SET_FIELD(qp_params.modify_flags,
+                         QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
+               qp_params.incoming_rdma_read_en = attr->qp_access_flags &
+                                                 IB_ACCESS_REMOTE_READ;
+               qp_params.incoming_rdma_write_en = attr->qp_access_flags &
+                                                  IB_ACCESS_REMOTE_WRITE;
+               qp_params.incoming_atomic_en = attr->qp_access_flags &
+                                              IB_ACCESS_REMOTE_ATOMIC;
+       }
+
+       if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
+               if (attr_mask & IB_QP_PATH_MTU) {
+                       if (attr->path_mtu < IB_MTU_256 ||
+                           attr->path_mtu > IB_MTU_4096) {
+                               pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
+                               rc = -EINVAL;
+                               goto err;
+                       }
+                       qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
+                                     ib_mtu_enum_to_int(iboe_get_mtu
+                                                        (dev->ndev->mtu)));
+               }
+
+               if (!qp->mtu) {
+                       qp->mtu =
+                       ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
+                       pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
+               }
+
+               SET_FIELD(qp_params.modify_flags,
+                         QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
+
+               qp_params.traffic_class_tos = attr->ah_attr.grh.traffic_class;
+               qp_params.flow_label = attr->ah_attr.grh.flow_label;
+               qp_params.hop_limit_ttl = attr->ah_attr.grh.hop_limit;
+
+               qp->sgid_idx = attr->ah_attr.grh.sgid_index;
+
+               rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
+               if (rc) {
+                       DP_ERR(dev,
+                              "modify qp: problems with GID index %d (rc=%d)\n",
+                              attr->ah_attr.grh.sgid_index, rc);
+                       return rc;
+               }
+
+               rc = qedr_get_dmac(dev, &attr->ah_attr,
+                                  qp_params.remote_mac_addr);
+               if (rc)
+                       return rc;
+
+               qp_params.use_local_mac = true;
+               ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
+
+               DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
+                        qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
+                        qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
+               DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
+                        qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
+                        qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
+               DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
+                        qp_params.remote_mac_addr);
+
+               qp_params.mtu = qp->mtu;
+               qp_params.lb_indication = false;
+       }
+
+       if (!qp_params.mtu) {
+               /* Stay with current MTU */
+               if (qp->mtu)
+                       qp_params.mtu = qp->mtu;
+               else
+                       qp_params.mtu =
+                           ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
+       }
+
+       if (attr_mask & IB_QP_TIMEOUT) {
+               SET_FIELD(qp_params.modify_flags,
+                         QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
+
+               qp_params.ack_timeout = attr->timeout;
+               if (attr->timeout) {
+                       u32 temp;
+
+                       temp = 4096 * (1UL << attr->timeout) / 1000 / 1000;
+                       /* FW requires [msec] */
+                       qp_params.ack_timeout = temp;
+               } else {
+                       /* Infinite */
+                       qp_params.ack_timeout = 0;
+               }
+       }
+       if (attr_mask & IB_QP_RETRY_CNT) {
+               SET_FIELD(qp_params.modify_flags,
+                         QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
+               qp_params.retry_cnt = attr->retry_cnt;
+       }
+
+       if (attr_mask & IB_QP_RNR_RETRY) {
+               SET_FIELD(qp_params.modify_flags,
+                         QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
+               qp_params.rnr_retry_cnt = attr->rnr_retry;
+       }
+
+       if (attr_mask & IB_QP_RQ_PSN) {
+               SET_FIELD(qp_params.modify_flags,
+                         QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
+               qp_params.rq_psn = attr->rq_psn;
+               qp->rq_psn = attr->rq_psn;
+       }
+
+       if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
+               if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
+                       rc = -EINVAL;
+                       DP_ERR(dev,
+                              "unsupported max_rd_atomic=%d, supported=%d\n",
+                              attr->max_rd_atomic,
+                              dev->attr.max_qp_req_rd_atomic_resc);
+                       goto err;
+               }
+
+               SET_FIELD(qp_params.modify_flags,
+                         QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
+               qp_params.max_rd_atomic_req = attr->max_rd_atomic;
+       }
+
+       if (attr_mask & IB_QP_MIN_RNR_TIMER) {
+               SET_FIELD(qp_params.modify_flags,
+                         QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
+               qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
+       }
+
+       if (attr_mask & IB_QP_SQ_PSN) {
+               SET_FIELD(qp_params.modify_flags,
+                         QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
+               qp_params.sq_psn = attr->sq_psn;
+               qp->sq_psn = attr->sq_psn;
+       }
+
+       if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
+               if (attr->max_dest_rd_atomic >
+                   dev->attr.max_qp_resp_rd_atomic_resc) {
+                       DP_ERR(dev,
+                              "unsupported max_dest_rd_atomic=%d, supported=%d\n",
+                              attr->max_dest_rd_atomic,
+                              dev->attr.max_qp_resp_rd_atomic_resc);
+
+                       rc = -EINVAL;
+                       goto err;
+               }
+
+               SET_FIELD(qp_params.modify_flags,
+                         QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
+               qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
+       }
+
+       if (attr_mask & IB_QP_DEST_QPN) {
+               SET_FIELD(qp_params.modify_flags,
+                         QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
+
+               qp_params.dest_qp = attr->dest_qp_num;
+               qp->dest_qp_num = attr->dest_qp_num;
+       }
+
+       if (qp->qp_type != IB_QPT_GSI)
+               rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
+                                             qp->qed_qp, &qp_params);
+
+       if (attr_mask & IB_QP_STATE) {
+               if ((qp->qp_type != IB_QPT_GSI) && (!udata))
+                       rc = qedr_update_qp_state(dev, qp, qp_params.new_state);
+               qp->state = qp_params.new_state;
+       }
+
+err:
+       return rc;
+}
+
+static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
+{
+       int ib_qp_acc_flags = 0;
+
+       if (params->incoming_rdma_write_en)
+               ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
+       if (params->incoming_rdma_read_en)
+               ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
+       if (params->incoming_atomic_en)
+               ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
+       ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
+       return ib_qp_acc_flags;
+}
+
+int qedr_query_qp(struct ib_qp *ibqp,
+                 struct ib_qp_attr *qp_attr,
+                 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
+{
+       struct qed_rdma_query_qp_out_params params;
+       struct qedr_qp *qp = get_qedr_qp(ibqp);
+       struct qedr_dev *dev = qp->dev;
+       int rc = 0;
+
+       memset(&params, 0, sizeof(params));
+
+       rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params);
+       if (rc)
+               goto err;
+
+       memset(qp_attr, 0, sizeof(*qp_attr));
+       memset(qp_init_attr, 0, sizeof(*qp_init_attr));
+
+       qp_attr->qp_state = qedr_get_ibqp_state(params.state);
+       qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
+       qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
+       qp_attr->path_mig_state = IB_MIG_MIGRATED;
+       qp_attr->rq_psn = params.rq_psn;
+       qp_attr->sq_psn = params.sq_psn;
+       qp_attr->dest_qp_num = params.dest_qp;
+
+       qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(&params);
+
+       qp_attr->cap.max_send_wr = qp->sq.max_wr;
+       qp_attr->cap.max_recv_wr = qp->rq.max_wr;
+       qp_attr->cap.max_send_sge = qp->sq.max_sges;
+       qp_attr->cap.max_recv_sge = qp->rq.max_sges;
+       qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
+       qp_init_attr->cap = qp_attr->cap;
+
+       memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0],
+              sizeof(qp_attr->ah_attr.grh.dgid.raw));
+
+       qp_attr->ah_attr.grh.flow_label = params.flow_label;
+       qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
+       qp_attr->ah_attr.grh.hop_limit = params.hop_limit_ttl;
+       qp_attr->ah_attr.grh.traffic_class = params.traffic_class_tos;
+
+       qp_attr->ah_attr.ah_flags = IB_AH_GRH;
+       qp_attr->ah_attr.port_num = 1;
+       qp_attr->ah_attr.sl = 0;
+       qp_attr->timeout = params.timeout;
+       qp_attr->rnr_retry = params.rnr_retry;
+       qp_attr->retry_cnt = params.retry_cnt;
+       qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
+       qp_attr->pkey_index = params.pkey_index;
+       qp_attr->port_num = 1;
+       qp_attr->ah_attr.src_path_bits = 0;
+       qp_attr->ah_attr.static_rate = 0;
+       qp_attr->alt_pkey_index = 0;
+       qp_attr->alt_port_num = 0;
+       qp_attr->alt_timeout = 0;
+       memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
+
+       qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
+       qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
+       qp_attr->max_rd_atomic = params.max_rd_atomic;
+       qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
+
+       DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
+                qp_attr->cap.max_inline_data);
+
+err:
+       return rc;
+}
+
+int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+       int rc = 0;
+
+       if (qp->qp_type != IB_QPT_GSI) {
+               rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
+               if (rc)
+                       return rc;
+       }
+
+       if (qp->ibqp.uobject && qp->ibqp.uobject->context)
+               qedr_cleanup_user(dev, qp);
+       else
+               qedr_cleanup_kernel(dev, qp);
+
+       return 0;
+}
+
+int qedr_destroy_qp(struct ib_qp *ibqp)
+{
+       struct qedr_qp *qp = get_qedr_qp(ibqp);
+       struct qedr_dev *dev = qp->dev;
+       struct ib_qp_attr attr;
+       int attr_mask = 0;
+       int rc = 0;
+
+       DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
+                qp, qp->qp_type);
+
+       if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
+           (qp->state != QED_ROCE_QP_STATE_ERR) &&
+           (qp->state != QED_ROCE_QP_STATE_INIT)) {
+
+               attr.qp_state = IB_QPS_ERR;
+               attr_mask |= IB_QP_STATE;
+
+               /* Change the QP state to ERROR */
+               qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
+       }
+
+       if (qp->qp_type == IB_QPT_GSI)
+               qedr_destroy_gsi_qp(dev);
+
+       qedr_free_qp_resources(dev, qp);
+
+       kfree(qp);
+
+       return rc;
+}
+
+struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
+{
+       struct qedr_ah *ah;
+
+       ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
+       if (!ah)
+               return ERR_PTR(-ENOMEM);
+
+       ah->attr = *attr;
+
+       return &ah->ibah;
+}
+
+int qedr_destroy_ah(struct ib_ah *ibah)
+{
+       struct qedr_ah *ah = get_qedr_ah(ibah);
+
+       kfree(ah);
+       return 0;
+}
+
+static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
+{
+       struct qedr_pbl *pbl, *tmp;
+
+       if (info->pbl_table)
+               list_add_tail(&info->pbl_table->list_entry,
+                             &info->free_pbl_list);
+
+       if (!list_empty(&info->inuse_pbl_list))
+               list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
+
+       list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
+               list_del(&pbl->list_entry);
+               qedr_free_pbl(dev, &info->pbl_info, pbl);
+       }
+}
+
+static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
+                       size_t page_list_len, bool two_layered)
+{
+       struct qedr_pbl *tmp;
+       int rc;
+
+       INIT_LIST_HEAD(&info->free_pbl_list);
+       INIT_LIST_HEAD(&info->inuse_pbl_list);
+
+       rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
+                                 page_list_len, two_layered);
+       if (rc)
+               goto done;
+
+       info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
+       if (IS_ERR(info->pbl_table)) {
+               rc = PTR_ERR(info->pbl_table);
+               goto done;
+       }
+
+       DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
+                &info->pbl_table->pa);
+
+       /* in usual case we use 2 PBLs, so we add one to free
+        * list and allocating another one
+        */
+       tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
+       if (IS_ERR(tmp)) {
+               DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
+               goto done;
+       }
+
+       list_add_tail(&tmp->list_entry, &info->free_pbl_list);
+
+       DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
+
+done:
+       if (rc)
+               free_mr_info(dev, info);
+
+       return rc;
+}
+
+struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
+                              u64 usr_addr, int acc, struct ib_udata *udata)
+{
+       struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+       struct qedr_mr *mr;
+       struct qedr_pd *pd;
+       int rc = -ENOMEM;
+
+       pd = get_qedr_pd(ibpd);
+       DP_DEBUG(dev, QEDR_MSG_MR,
+                "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
+                pd->pd_id, start, len, usr_addr, acc);
+
+       if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
+               return ERR_PTR(-EINVAL);
+
+       mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+       if (!mr)
+               return ERR_PTR(rc);
+
+       mr->type = QEDR_MR_USER;
+
+       mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
+       if (IS_ERR(mr->umem)) {
+               rc = -EFAULT;
+               goto err0;
+       }
+
+       rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
+       if (rc)
+               goto err1;
+
+       qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
+                          &mr->info.pbl_info);
+
+       rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
+       if (rc) {
+               DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
+               goto err1;
+       }
+
+       /* Index only, 18 bit long, lkey = itid << 8 | key */
+       mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
+       mr->hw_mr.key = 0;
+       mr->hw_mr.pd = pd->pd_id;
+       mr->hw_mr.local_read = 1;
+       mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
+       mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
+       mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
+       mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
+       mr->hw_mr.mw_bind = false;
+       mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
+       mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
+       mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
+       mr->hw_mr.page_size_log = ilog2(mr->umem->page_size);
+       mr->hw_mr.fbo = ib_umem_offset(mr->umem);
+       mr->hw_mr.length = len;
+       mr->hw_mr.vaddr = usr_addr;
+       mr->hw_mr.zbva = false;
+       mr->hw_mr.phy_mr = false;
+       mr->hw_mr.dma_mr = false;
+
+       rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
+       if (rc) {
+               DP_ERR(dev, "roce register tid returned an error %d\n", rc);
+               goto err2;
+       }
+
+       mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
+       if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
+           mr->hw_mr.remote_atomic)
+               mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
+
+       DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
+                mr->ibmr.lkey);
+       return &mr->ibmr;
+
+err2:
+       dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
+err1:
+       qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
+err0:
+       kfree(mr);
+       return ERR_PTR(rc);
+}
+
+int qedr_dereg_mr(struct ib_mr *ib_mr)
+{
+       struct qedr_mr *mr = get_qedr_mr(ib_mr);
+       struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
+       int rc = 0;
+
+       rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
+       if (rc)
+               return rc;
+
+       dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
+
+       if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR))
+               qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
+
+       /* it could be user registered memory. */
+       if (mr->umem)
+               ib_umem_release(mr->umem);
+
+       kfree(mr);
+
+       return rc;
+}
+
+static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
+                                      int max_page_list_len)
+{
+       struct qedr_pd *pd = get_qedr_pd(ibpd);
+       struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+       struct qedr_mr *mr;
+       int rc = -ENOMEM;
+
+       DP_DEBUG(dev, QEDR_MSG_MR,
+                "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
+                max_page_list_len);
+
+       mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+       if (!mr)
+               return ERR_PTR(rc);
+
+       mr->dev = dev;
+       mr->type = QEDR_MR_FRMR;
+
+       rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
+       if (rc)
+               goto err0;
+
+       rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
+       if (rc) {
+               DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
+               goto err0;
+       }
+
+       /* Index only, 18 bit long, lkey = itid << 8 | key */
+       mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
+       mr->hw_mr.key = 0;
+       mr->hw_mr.pd = pd->pd_id;
+       mr->hw_mr.local_read = 1;
+       mr->hw_mr.local_write = 0;
+       mr->hw_mr.remote_read = 0;
+       mr->hw_mr.remote_write = 0;
+       mr->hw_mr.remote_atomic = 0;
+       mr->hw_mr.mw_bind = false;
+       mr->hw_mr.pbl_ptr = 0;
+       mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
+       mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
+       mr->hw_mr.fbo = 0;
+       mr->hw_mr.length = 0;
+       mr->hw_mr.vaddr = 0;
+       mr->hw_mr.zbva = false;
+       mr->hw_mr.phy_mr = true;
+       mr->hw_mr.dma_mr = false;
+
+       rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
+       if (rc) {
+               DP_ERR(dev, "roce register tid returned an error %d\n", rc);
+               goto err1;
+       }
+
+       mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
+       mr->ibmr.rkey = mr->ibmr.lkey;
+
+       DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
+       return mr;
+
+err1:
+       dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
+err0:
+       kfree(mr);
+       return ERR_PTR(rc);
+}
+
+struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd,
+                           enum ib_mr_type mr_type, u32 max_num_sg)
+{
+       struct qedr_dev *dev;
+       struct qedr_mr *mr;
+
+       if (mr_type != IB_MR_TYPE_MEM_REG)
+               return ERR_PTR(-EINVAL);
+
+       mr = __qedr_alloc_mr(ibpd, max_num_sg);
+
+       if (IS_ERR(mr))
+               return ERR_PTR(-EINVAL);
+
+       dev = mr->dev;
+
+       return &mr->ibmr;
+}
+
+static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
+{
+       struct qedr_mr *mr = get_qedr_mr(ibmr);
+       struct qedr_pbl *pbl_table;
+       struct regpair *pbe;
+       u32 pbes_in_page;
+
+       if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
+               DP_ERR(mr->dev, "qedr_set_page failes when %d\n", mr->npages);
+               return -ENOMEM;
+       }
+
+       DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
+                mr->npages, addr);
+
+       pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
+       pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
+       pbe = (struct regpair *)pbl_table->va;
+       pbe +=  mr->npages % pbes_in_page;
+       pbe->lo = cpu_to_le32((u32)addr);
+       pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
+
+       mr->npages++;
+
+       return 0;
+}
+
+static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
+{
+       int work = info->completed - info->completed_handled - 1;
+
+       DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
+       while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
+               struct qedr_pbl *pbl;
+
+               /* Free all the page list that are possible to be freed
+                * (all the ones that were invalidated), under the assumption
+                * that if an FMR was completed successfully that means that
+                * if there was an invalidate operation before it also ended
+                */
+               pbl = list_first_entry(&info->inuse_pbl_list,
+                                      struct qedr_pbl, list_entry);
+               list_move_tail(&pbl->list_entry, &info->free_pbl_list);
+               info->completed_handled++;
+       }
+}
+
+int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+                  int sg_nents, unsigned int *sg_offset)
+{
+       struct qedr_mr *mr = get_qedr_mr(ibmr);
+
+       mr->npages = 0;
+
+       handle_completed_mrs(mr->dev, &mr->info);
+       return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
+}
+
+struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
+{
+       struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+       struct qedr_pd *pd = get_qedr_pd(ibpd);
+       struct qedr_mr *mr;
+       int rc;
+
+       mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+       if (!mr)
+               return ERR_PTR(-ENOMEM);
+
+       mr->type = QEDR_MR_DMA;
+
+       rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
+       if (rc) {
+               DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
+               goto err1;
+       }
+
+       /* index only, 18 bit long, lkey = itid << 8 | key */
+       mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
+       mr->hw_mr.pd = pd->pd_id;
+       mr->hw_mr.local_read = 1;
+       mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
+       mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
+       mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
+       mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
+       mr->hw_mr.dma_mr = true;
+
+       rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
+       if (rc) {
+               DP_ERR(dev, "roce register tid returned an error %d\n", rc);
+               goto err2;
+       }
+
+       mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
+       if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
+           mr->hw_mr.remote_atomic)
+               mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
+
+       DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
+       return &mr->ibmr;
+
+err2:
+       dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
+err1:
+       kfree(mr);
+       return ERR_PTR(rc);
+}
+
+static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
+{
+       return (((wq->prod + 1) % wq->max_wr) == wq->cons);
+}
+
+static int sge_data_len(struct ib_sge *sg_list, int num_sge)
+{
+       int i, len = 0;
+
+       for (i = 0; i < num_sge; i++)
+               len += sg_list[i].length;
+
+       return len;
+}
+
+static void swap_wqe_data64(u64 *p)
+{
+       int i;
+
+       for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
+               *p = cpu_to_be64(cpu_to_le64(*p));
+}
+
+static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
+                                      struct qedr_qp *qp, u8 *wqe_size,
+                                      struct ib_send_wr *wr,
+                                      struct ib_send_wr **bad_wr, u8 *bits,
+                                      u8 bit)
+{
+       u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
+       char *seg_prt, *wqe;
+       int i, seg_siz;
+
+       if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
+               DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
+               *bad_wr = wr;
+               return 0;
+       }
+
+       if (!data_size)
+               return data_size;
+
+       *bits |= bit;
+
+       seg_prt = NULL;
+       wqe = NULL;
+       seg_siz = 0;
+
+       /* Copy data inline */
+       for (i = 0; i < wr->num_sge; i++) {
+               u32 len = wr->sg_list[i].length;
+               void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
+
+               while (len > 0) {
+                       u32 cur;
+
+                       /* New segment required */
+                       if (!seg_siz) {
+                               wqe = (char *)qed_chain_produce(&qp->sq.pbl);
+                               seg_prt = wqe;
+                               seg_siz = sizeof(struct rdma_sq_common_wqe);
+                               (*wqe_size)++;
+                       }
+
+                       /* Calculate currently allowed length */
+                       cur = min_t(u32, len, seg_siz);
+                       memcpy(seg_prt, src, cur);
+
+                       /* Update segment variables */
+                       seg_prt += cur;
+                       seg_siz -= cur;
+
+                       /* Update sge variables */
+                       src += cur;
+                       len -= cur;
+
+                       /* Swap fully-completed segments */
+                       if (!seg_siz)
+                               swap_wqe_data64((u64 *)wqe);
+               }
+       }
+
+       /* swap last not completed segment */
+       if (seg_siz)
+               swap_wqe_data64((u64 *)wqe);
+
+       return data_size;
+}
+
+#define RQ_SGE_SET(sge, vaddr, vlength, vflags)                        \
+       do {                                                    \
+               DMA_REGPAIR_LE(sge->addr, vaddr);               \
+               (sge)->length = cpu_to_le32(vlength);           \
+               (sge)->flags = cpu_to_le32(vflags);             \
+       } while (0)
+
+#define SRQ_HDR_SET(hdr, vwr_id, num_sge)                      \
+       do {                                                    \
+               DMA_REGPAIR_LE(hdr->wr_id, vwr_id);             \
+               (hdr)->num_sges = num_sge;                      \
+       } while (0)
+
+#define SRQ_SGE_SET(sge, vaddr, vlength, vlkey)                        \
+       do {                                                    \
+               DMA_REGPAIR_LE(sge->addr, vaddr);               \
+               (sge)->length = cpu_to_le32(vlength);           \
+               (sge)->l_key = cpu_to_le32(vlkey);              \
+       } while (0)
+
+static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
+                               struct ib_send_wr *wr)
+{
+       u32 data_size = 0;
+       int i;
+
+       for (i = 0; i < wr->num_sge; i++) {
+               struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
+
+               DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
+               sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
+               sge->length = cpu_to_le32(wr->sg_list[i].length);
+               data_size += wr->sg_list[i].length;
+       }
+
+       if (wqe_size)
+               *wqe_size += wr->num_sge;
+
+       return data_size;
+}
+
+static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
+                                    struct qedr_qp *qp,
+                                    struct rdma_sq_rdma_wqe_1st *rwqe,
+                                    struct rdma_sq_rdma_wqe_2nd *rwqe2,
+                                    struct ib_send_wr *wr,
+                                    struct ib_send_wr **bad_wr)
+{
+       rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
+       DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
+
+       if (wr->send_flags & IB_SEND_INLINE &&
+           (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
+            wr->opcode == IB_WR_RDMA_WRITE)) {
+               u8 flags = 0;
+
+               SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
+               return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
+                                                  bad_wr, &rwqe->flags, flags);
+       }
+
+       return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
+}
+
+static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
+                                    struct qedr_qp *qp,
+                                    struct rdma_sq_send_wqe_1st *swqe,
+                                    struct rdma_sq_send_wqe_2st *swqe2,
+                                    struct ib_send_wr *wr,
+                                    struct ib_send_wr **bad_wr)
+{
+       memset(swqe2, 0, sizeof(*swqe2));
+       if (wr->send_flags & IB_SEND_INLINE) {
+               u8 flags = 0;
+
+               SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
+               return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
+                                                  bad_wr, &swqe->flags, flags);
+       }
+
+       return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
+}
+
+static int qedr_prepare_reg(struct qedr_qp *qp,
+                           struct rdma_sq_fmr_wqe_1st *fwqe1,
+                           struct ib_reg_wr *wr)
+{
+       struct qedr_mr *mr = get_qedr_mr(wr->mr);
+       struct rdma_sq_fmr_wqe_2nd *fwqe2;
+
+       fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
+       fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
+       fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
+       fwqe1->l_key = wr->key;
+
+       SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
+                  !!(wr->access & IB_ACCESS_REMOTE_READ));
+       SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
+                  !!(wr->access & IB_ACCESS_REMOTE_WRITE));
+       SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
+                  !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
+       SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
+       SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
+                  !!(wr->access & IB_ACCESS_LOCAL_WRITE));
+       fwqe2->fmr_ctrl = 0;
+
+       SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
+                  ilog2(mr->ibmr.page_size) - 12);
+
+       fwqe2->length_hi = 0;
+       fwqe2->length_lo = mr->ibmr.length;
+       fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
+       fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
+
+       qp->wqe_wr_id[qp->sq.prod].mr = mr;
+
+       return 0;
+}
+
+static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
+{
+       switch (opcode) {
+       case IB_WR_RDMA_WRITE:
+       case IB_WR_RDMA_WRITE_WITH_IMM:
+               return IB_WC_RDMA_WRITE;
+       case IB_WR_SEND_WITH_IMM:
+       case IB_WR_SEND:
+       case IB_WR_SEND_WITH_INV:
+               return IB_WC_SEND;
+       case IB_WR_RDMA_READ:
+               return IB_WC_RDMA_READ;
+       case IB_WR_ATOMIC_CMP_AND_SWP:
+               return IB_WC_COMP_SWAP;
+       case IB_WR_ATOMIC_FETCH_AND_ADD:
+               return IB_WC_FETCH_ADD;
+       case IB_WR_REG_MR:
+               return IB_WC_REG_MR;
+       case IB_WR_LOCAL_INV:
+               return IB_WC_LOCAL_INV;
+       default:
+               return IB_WC_SEND;
+       }
+}
+
+static inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
+{
+       int wq_is_full, err_wr, pbl_is_full;
+       struct qedr_dev *dev = qp->dev;
+
+       /* prevent SQ overflow and/or processing of a bad WR */
+       err_wr = wr->num_sge > qp->sq.max_sges;
+       wq_is_full = qedr_wq_is_full(&qp->sq);
+       pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
+                     QEDR_MAX_SQE_ELEMENTS_PER_SQE;
+       if (wq_is_full || err_wr || pbl_is_full) {
+               if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
+                       DP_ERR(dev,
+                              "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
+                              qp);
+                       qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
+               }
+
+               if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
+                       DP_ERR(dev,
+                              "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
+                              qp);
+                       qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
+               }
+
+               if (pbl_is_full &&
+                   !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
+                       DP_ERR(dev,
+                              "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
+                              qp);
+                       qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
+               }
+               return false;
+       }
+       return true;
+}
+
+static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+                    struct ib_send_wr **bad_wr)
+{
+       struct qedr_dev *dev = get_qedr_dev(ibqp->device);
+       struct qedr_qp *qp = get_qedr_qp(ibqp);
+       struct rdma_sq_atomic_wqe_1st *awqe1;
+       struct rdma_sq_atomic_wqe_2nd *awqe2;
+       struct rdma_sq_atomic_wqe_3rd *awqe3;
+       struct rdma_sq_send_wqe_2st *swqe2;
+       struct rdma_sq_local_inv_wqe *iwqe;
+       struct rdma_sq_rdma_wqe_2nd *rwqe2;
+       struct rdma_sq_send_wqe_1st *swqe;
+       struct rdma_sq_rdma_wqe_1st *rwqe;
+       struct rdma_sq_fmr_wqe_1st *fwqe1;
+       struct rdma_sq_common_wqe *wqe;
+       u32 length;
+       int rc = 0;
+       bool comp;
+
+       if (!qedr_can_post_send(qp, wr)) {
+               *bad_wr = wr;
+               return -ENOMEM;
+       }
+
+       wqe = qed_chain_produce(&qp->sq.pbl);
+       qp->wqe_wr_id[qp->sq.prod].signaled =
+               !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
+
+       wqe->flags = 0;
+       SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
+                  !!(wr->send_flags & IB_SEND_SOLICITED));
+       comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
+       SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
+       SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
+                  !!(wr->send_flags & IB_SEND_FENCE));
+       wqe->prev_wqe_size = qp->prev_wqe_size;
+
+       qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
+
+       switch (wr->opcode) {
+       case IB_WR_SEND_WITH_IMM:
+               wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
+               swqe = (struct rdma_sq_send_wqe_1st *)wqe;
+               swqe->wqe_size = 2;
+               swqe2 = qed_chain_produce(&qp->sq.pbl);
+
+               swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.imm_data);
+               length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
+                                                  wr, bad_wr);
+               swqe->length = cpu_to_le32(length);
+               qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
+               qp->prev_wqe_size = swqe->wqe_size;
+               qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
+               break;
+       case IB_WR_SEND:
+               wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
+               swqe = (struct rdma_sq_send_wqe_1st *)wqe;
+
+               swqe->wqe_size = 2;
+               swqe2 = qed_chain_produce(&qp->sq.pbl);
+               length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
+                                                  wr, bad_wr);
+               swqe->length = cpu_to_le32(length);
+               qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
+               qp->prev_wqe_size = swqe->wqe_size;
+               qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
+               break;
+       case IB_WR_SEND_WITH_INV:
+               wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
+               swqe = (struct rdma_sq_send_wqe_1st *)wqe;
+               swqe2 = qed_chain_produce(&qp->sq.pbl);
+               swqe->wqe_size = 2;
+               swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
+               length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
+                                                  wr, bad_wr);
+               swqe->length = cpu_to_le32(length);
+               qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
+               qp->prev_wqe_size = swqe->wqe_size;
+               qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
+               break;
+
+       case IB_WR_RDMA_WRITE_WITH_IMM:
+               wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
+               rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
+
+               rwqe->wqe_size = 2;
+               rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
+               rwqe2 = qed_chain_produce(&qp->sq.pbl);
+               length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
+                                                  wr, bad_wr);
+               rwqe->length = cpu_to_le32(length);
+               qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
+               qp->prev_wqe_size = rwqe->wqe_size;
+               qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
+               break;
+       case IB_WR_RDMA_WRITE:
+               wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
+               rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
+
+               rwqe->wqe_size = 2;
+               rwqe2 = qed_chain_produce(&qp->sq.pbl);
+               length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
+                                                  wr, bad_wr);
+               rwqe->length = cpu_to_le32(length);
+               qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
+               qp->prev_wqe_size = rwqe->wqe_size;
+               qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
+               break;
+       case IB_WR_RDMA_READ_WITH_INV:
+               DP_ERR(dev,
+                      "RDMA READ WITH INVALIDATE not supported\n");
+               *bad_wr = wr;
+               rc = -EINVAL;
+               break;
+
+       case IB_WR_RDMA_READ:
+               wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
+               rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
+
+               rwqe->wqe_size = 2;
+               rwqe2 = qed_chain_produce(&qp->sq.pbl);
+               length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
+                                                  wr, bad_wr);
+               rwqe->length = cpu_to_le32(length);
+               qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
+               qp->prev_wqe_size = rwqe->wqe_size;
+               qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
+               break;
+
+       case IB_WR_ATOMIC_CMP_AND_SWP:
+       case IB_WR_ATOMIC_FETCH_AND_ADD:
+               awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
+               awqe1->wqe_size = 4;
+
+               awqe2 = qed_chain_produce(&qp->sq.pbl);
+               DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
+               awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
+
+               awqe3 = qed_chain_produce(&qp->sq.pbl);
+
+               if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
+                       wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
+                       DMA_REGPAIR_LE(awqe3->swap_data,
+                                      atomic_wr(wr)->compare_add);
+               } else {
+                       wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
+                       DMA_REGPAIR_LE(awqe3->swap_data,
+                                      atomic_wr(wr)->swap);
+                       DMA_REGPAIR_LE(awqe3->cmp_data,
+                                      atomic_wr(wr)->compare_add);
+               }
+
+               qedr_prepare_sq_sges(qp, NULL, wr);
+
+               qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
+               qp->prev_wqe_size = awqe1->wqe_size;
+               break;
+
+       case IB_WR_LOCAL_INV:
+               iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
+               iwqe->wqe_size = 1;
+
+               iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
+               iwqe->inv_l_key = wr->ex.invalidate_rkey;
+               qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
+               qp->prev_wqe_size = iwqe->wqe_size;
+               break;
+       case IB_WR_REG_MR:
+               DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
+               wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
+               fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
+               fwqe1->wqe_size = 2;
+
+               rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
+               if (rc) {
+                       DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
+                       *bad_wr = wr;
+                       break;
+               }
+
+               qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
+               qp->prev_wqe_size = fwqe1->wqe_size;
+               break;
+       default:
+               DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
+               rc = -EINVAL;
+               *bad_wr = wr;
+               break;
+       }
+
+       if (*bad_wr) {
+               u16 value;
+
+               /* Restore prod to its position before
+                * this WR was processed
+                */
+               value = le16_to_cpu(qp->sq.db_data.data.value);
+               qed_chain_set_prod(&qp->sq.pbl, value, wqe);
+
+               /* Restore prev_wqe_size */
+               qp->prev_wqe_size = wqe->prev_wqe_size;
+               rc = -EINVAL;
+               DP_ERR(dev, "POST SEND FAILED\n");
+       }
+
+       return rc;
+}
+
+int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+                  struct ib_send_wr **bad_wr)
+{
+       struct qedr_dev *dev = get_qedr_dev(ibqp->device);
+       struct qedr_qp *qp = get_qedr_qp(ibqp);
+       unsigned long flags;
+       int rc = 0;
+
+       *bad_wr = NULL;
+
+       if (qp->qp_type == IB_QPT_GSI)
+               return qedr_gsi_post_send(ibqp, wr, bad_wr);
+
+       spin_lock_irqsave(&qp->q_lock, flags);
+
+       if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
+           (qp->state != QED_ROCE_QP_STATE_ERR) &&
+           (qp->state != QED_ROCE_QP_STATE_SQD)) {
+               spin_unlock_irqrestore(&qp->q_lock, flags);
+               *bad_wr = wr;
+               DP_DEBUG(dev, QEDR_MSG_CQ,
+                        "QP in wrong state! QP icid=0x%x state %d\n",
+                        qp->icid, qp->state);
+               return -EINVAL;
+       }
+
+       while (wr) {
+               rc = __qedr_post_send(ibqp, wr, bad_wr);
+               if (rc)
+                       break;
+
+               qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
+
+               qedr_inc_sw_prod(&qp->sq);
+
+               qp->sq.db_data.data.value++;
+
+               wr = wr->next;
+       }
+
+       /* Trigger doorbell
+        * If there was a failure in the first WR then it will be triggered in
+        * vane. However this is not harmful (as long as the producer value is
+        * unchanged). For performance reasons we avoid checking for this
+        * redundant doorbell.
+        */
+       wmb();
+       writel(qp->sq.db_data.raw, qp->sq.db);
+
+       /* Make sure write sticks */
+       mmiowb();
+
+       spin_unlock_irqrestore(&qp->q_lock, flags);
+
+       return rc;
+}
+
+int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+                  struct ib_recv_wr **bad_wr)
+{
+       struct qedr_qp *qp = get_qedr_qp(ibqp);
+       struct qedr_dev *dev = qp->dev;
+       unsigned long flags;
+       int status = 0;
+
+       if (qp->qp_type == IB_QPT_GSI)
+               return qedr_gsi_post_recv(ibqp, wr, bad_wr);
+
+       spin_lock_irqsave(&qp->q_lock, flags);
+
+       if (qp->state == QED_ROCE_QP_STATE_RESET) {
+               spin_unlock_irqrestore(&qp->q_lock, flags);
+               *bad_wr = wr;
+               return -EINVAL;
+       }
+
+       while (wr) {
+               int i;
+
+               if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
+                   QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
+                   wr->num_sge > qp->rq.max_sges) {
+                       DP_ERR(dev, "Can't post WR  (%d < %d) || (%d > %d)\n",
+                              qed_chain_get_elem_left_u32(&qp->rq.pbl),
+                              QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
+                              qp->rq.max_sges);
+                       status = -ENOMEM;
+                       *bad_wr = wr;
+                       break;
+               }
+               for (i = 0; i < wr->num_sge; i++) {
+                       u32 flags = 0;
+                       struct rdma_rq_sge *rqe =
+                           qed_chain_produce(&qp->rq.pbl);
+
+                       /* First one must include the number
+                        * of SGE in the list
+                        */
+                       if (!i)
+                               SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
+                                         wr->num_sge);
+
+                       SET_FIELD(flags, RDMA_RQ_SGE_L_KEY,
+                                 wr->sg_list[i].lkey);
+
+                       RQ_SGE_SET(rqe, wr->sg_list[i].addr,
+                                  wr->sg_list[i].length, flags);
+               }
+
+               /* Special case of no sges. FW requires between 1-4 sges...
+                * in this case we need to post 1 sge with length zero. this is
+                * because rdma write with immediate consumes an RQ.
+                */
+               if (!wr->num_sge) {
+                       u32 flags = 0;
+                       struct rdma_rq_sge *rqe =
+                           qed_chain_produce(&qp->rq.pbl);
+
+                       /* First one must include the number
+                        * of SGE in the list
+                        */
+                       SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0);
+                       SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
+
+                       RQ_SGE_SET(rqe, 0, 0, flags);
+                       i = 1;
+               }
+
+               qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
+               qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
+
+               qedr_inc_sw_prod(&qp->rq);
+
+               /* Flush all the writes before signalling doorbell */
+               wmb();
+
+               qp->rq.db_data.data.value++;
+
+               writel(qp->rq.db_data.raw, qp->rq.db);
+
+               /* Make sure write sticks */
+               mmiowb();
+
+               wr = wr->next;
+       }
+
+       spin_unlock_irqrestore(&qp->q_lock, flags);
+
+       return status;
+}
+
+static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
+{
+       struct rdma_cqe_requester *resp_cqe = &cqe->req;
+
+       return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
+               cq->pbl_toggle;
+}
+
+static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
+{
+       struct rdma_cqe_requester *resp_cqe = &cqe->req;
+       struct qedr_qp *qp;
+
+       qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
+                                                  resp_cqe->qp_handle.lo,
+                                                  u64);
+       return qp;
+}
+
+static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
+{
+       struct rdma_cqe_requester *resp_cqe = &cqe->req;
+
+       return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
+}
+
+/* Return latest CQE (needs processing) */
+static union rdma_cqe *get_cqe(struct qedr_cq *cq)
+{
+       return cq->latest_cqe;
+}
+
+/* In fmr we need to increase the number of fmr completed counter for the fmr
+ * algorithm determining whether we can free a pbl or not.
+ * we need to perform this whether the work request was signaled or not. for
+ * this purpose we call this function from the condition that checks if a wr
+ * should be skipped, to make sure we don't miss it ( possibly this fmr
+ * operation was not signalted)
+ */
+static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
+{
+       if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
+               qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
+}
+
+static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
+                      struct qedr_cq *cq, int num_entries,
+                      struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
+                      int force)
+{
+       u16 cnt = 0;
+
+       while (num_entries && qp->sq.wqe_cons != hw_cons) {
+               if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
+                       qedr_chk_if_fmr(qp);
+                       /* skip WC */
+                       goto next_cqe;
+               }
+
+               /* fill WC */
+               wc->status = status;
+               wc->vendor_err = 0;
+               wc->wc_flags = 0;
+               wc->src_qp = qp->id;
+               wc->qp = &qp->ibqp;
+
+               wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
+               wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
+
+               switch (wc->opcode) {
+               case IB_WC_RDMA_WRITE:
+                       wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
+                       break;
+               case IB_WC_COMP_SWAP:
+               case IB_WC_FETCH_ADD:
+                       wc->byte_len = 8;
+                       break;
+               case IB_WC_REG_MR:
+                       qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
+                       break;
+               default:
+                       break;
+               }
+
+               num_entries--;
+               wc++;
+               cnt++;
+next_cqe:
+               while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
+                       qed_chain_consume(&qp->sq.pbl);
+               qedr_inc_sw_cons(&qp->sq);
+       }
+
+       return cnt;
+}
+
+static int qedr_poll_cq_req(struct qedr_dev *dev,
+                           struct qedr_qp *qp, struct qedr_cq *cq,
+                           int num_entries, struct ib_wc *wc,
+                           struct rdma_cqe_requester *req)
+{
+       int cnt = 0;
+
+       switch (req->status) {
+       case RDMA_CQE_REQ_STS_OK:
+               cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
+                                 IB_WC_SUCCESS, 0);
+               break;
+       case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
+               if (qp->state != QED_ROCE_QP_STATE_ERR)
+                       DP_ERR(dev,
+                              "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+                              cq->icid, qp->icid);
+               cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
+                                 IB_WC_WR_FLUSH_ERR, 1);
+               break;
+       default:
+               /* process all WQE before the cosumer */
+               qp->state = QED_ROCE_QP_STATE_ERR;
+               cnt = process_req(dev, qp, cq, num_entries, wc,
+                                 req->sq_cons - 1, IB_WC_SUCCESS, 0);
+               wc += cnt;
+               /* if we have extra WC fill it with actual error info */
+               if (cnt < num_entries) {
+                       enum ib_wc_status wc_status;
+
+                       switch (req->status) {
+                       case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
+                               DP_ERR(dev,
+                                      "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+                                      cq->icid, qp->icid);
+                               wc_status = IB_WC_BAD_RESP_ERR;
+                               break;
+                       case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
+                               DP_ERR(dev,
+                                      "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+                                      cq->icid, qp->icid);
+                               wc_status = IB_WC_LOC_LEN_ERR;
+                               break;
+                       case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
+                               DP_ERR(dev,
+                                      "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+                                      cq->icid, qp->icid);
+                               wc_status = IB_WC_LOC_QP_OP_ERR;
+                               break;
+                       case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
+                               DP_ERR(dev,
+                                      "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+                                      cq->icid, qp->icid);
+                               wc_status = IB_WC_LOC_PROT_ERR;
+                               break;
+                       case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
+                               DP_ERR(dev,
+                                      "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+                                      cq->icid, qp->icid);
+                               wc_status = IB_WC_MW_BIND_ERR;
+                               break;
+                       case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
+                               DP_ERR(dev,
+                                      "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+                                      cq->icid, qp->icid);
+                               wc_status = IB_WC_REM_INV_REQ_ERR;
+                               break;
+                       case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
+                               DP_ERR(dev,
+                                      "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+                                      cq->icid, qp->icid);
+                               wc_status = IB_WC_REM_ACCESS_ERR;
+                               break;
+                       case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
+                               DP_ERR(dev,
+                                      "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+                                      cq->icid, qp->icid);
+                               wc_status = IB_WC_REM_OP_ERR;
+                               break;
+                       case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
+                               DP_ERR(dev,
+                                      "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+                                      cq->icid, qp->icid);
+                               wc_status = IB_WC_RNR_RETRY_EXC_ERR;
+                               break;
+                       case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
+                               DP_ERR(dev,
+                                      "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+                                      cq->icid, qp->icid);
+                               wc_status = IB_WC_RETRY_EXC_ERR;
+                               break;
+                       default:
+                               DP_ERR(dev,
+                                      "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+                                      cq->icid, qp->icid);
+                               wc_status = IB_WC_GENERAL_ERR;
+                       }
+                       cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
+                                          wc_status, 1);
+               }
+       }
+
+       return cnt;
+}
+
+static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
+                              struct qedr_cq *cq, struct ib_wc *wc,
+                              struct rdma_cqe_responder *resp, u64 wr_id)
+{
+       enum ib_wc_status wc_status = IB_WC_SUCCESS;
+       u8 flags;
+
+       wc->opcode = IB_WC_RECV;
+       wc->wc_flags = 0;
+
+       switch (resp->status) {
+       case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
+               wc_status = IB_WC_LOC_ACCESS_ERR;
+               break;
+       case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
+               wc_status = IB_WC_LOC_LEN_ERR;
+               break;
+       case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
+               wc_status = IB_WC_LOC_QP_OP_ERR;
+               break;
+       case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
+               wc_status = IB_WC_LOC_PROT_ERR;
+               break;
+       case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
+               wc_status = IB_WC_MW_BIND_ERR;
+               break;
+       case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
+               wc_status = IB_WC_REM_INV_RD_REQ_ERR;
+               break;
+       case RDMA_CQE_RESP_STS_OK:
+               wc_status = IB_WC_SUCCESS;
+               wc->byte_len = le32_to_cpu(resp->length);
+
+               flags = resp->flags & QEDR_RESP_RDMA_IMM;
+
+               if (flags == QEDR_RESP_RDMA_IMM)
+                       wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
+
+               if (flags == QEDR_RESP_RDMA_IMM || flags == QEDR_RESP_IMM) {
+                       wc->ex.imm_data =
+                               le32_to_cpu(resp->imm_data_or_inv_r_Key);
+                       wc->wc_flags |= IB_WC_WITH_IMM;
+               }
+               break;
+       default:
+               wc->status = IB_WC_GENERAL_ERR;
+               DP_ERR(dev, "Invalid CQE status detected\n");
+       }
+
+       /* fill WC */
+       wc->status = wc_status;
+       wc->vendor_err = 0;
+       wc->src_qp = qp->id;
+       wc->qp = &qp->ibqp;
+       wc->wr_id = wr_id;
+}
+
+static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
+                           struct qedr_cq *cq, struct ib_wc *wc,
+                           struct rdma_cqe_responder *resp)
+{
+       u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
+
+       __process_resp_one(dev, qp, cq, wc, resp, wr_id);
+
+       while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
+               qed_chain_consume(&qp->rq.pbl);
+       qedr_inc_sw_cons(&qp->rq);
+
+       return 1;
+}
+
+static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
+                             int num_entries, struct ib_wc *wc, u16 hw_cons)
+{
+       u16 cnt = 0;
+
+       while (num_entries && qp->rq.wqe_cons != hw_cons) {
+               /* fill WC */
+               wc->status = IB_WC_WR_FLUSH_ERR;
+               wc->vendor_err = 0;
+               wc->wc_flags = 0;
+               wc->src_qp = qp->id;
+               wc->byte_len = 0;
+               wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
+               wc->qp = &qp->ibqp;
+               num_entries--;
+               wc++;
+               cnt++;
+               while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
+                       qed_chain_consume(&qp->rq.pbl);
+               qedr_inc_sw_cons(&qp->rq);
+       }
+
+       return cnt;
+}
+
+static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
+                                struct rdma_cqe_responder *resp, int *update)
+{
+       if (le16_to_cpu(resp->rq_cons) == qp->rq.wqe_cons) {
+               consume_cqe(cq);
+               *update |= 1;
+       }
+}
+
+static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
+                            struct qedr_cq *cq, int num_entries,
+                            struct ib_wc *wc, struct rdma_cqe_responder *resp,
+                            int *update)
+{
+       int cnt;
+
+       if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
+               cnt = process_resp_flush(qp, cq, num_entries, wc,
+                                        resp->rq_cons);
+               try_consume_resp_cqe(cq, qp, resp, update);
+       } else {
+               cnt = process_resp_one(dev, qp, cq, wc, resp);
+               consume_cqe(cq);
+               *update |= 1;
+       }
+
+       return cnt;
+}
+
+static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
+                               struct rdma_cqe_requester *req, int *update)
+{
+       if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
+               consume_cqe(cq);
+               *update |= 1;
+       }
+}
+
+int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+{
+       struct qedr_dev *dev = get_qedr_dev(ibcq->device);
+       struct qedr_cq *cq = get_qedr_cq(ibcq);
+       union rdma_cqe *cqe = cq->latest_cqe;
+       u32 old_cons, new_cons;
+       unsigned long flags;
+       int update = 0;
+       int done = 0;
+
+       if (cq->cq_type == QEDR_CQ_TYPE_GSI)
+               return qedr_gsi_poll_cq(ibcq, num_entries, wc);
+
+       spin_lock_irqsave(&cq->cq_lock, flags);
+       old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
+       while (num_entries && is_valid_cqe(cq, cqe)) {
+               struct qedr_qp *qp;
+               int cnt = 0;
+
+               /* prevent speculative reads of any field of CQE */
+               rmb();
+
+               qp = cqe_get_qp(cqe);
+               if (!qp) {
+                       WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
+                       break;
+               }
+
+               wc->qp = &qp->ibqp;
+
+               switch (cqe_get_type(cqe)) {
+               case RDMA_CQE_TYPE_REQUESTER:
+                       cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
+                                              &cqe->req);
+                       try_consume_req_cqe(cq, qp, &cqe->req, &update);
+                       break;
+               case RDMA_CQE_TYPE_RESPONDER_RQ:
+                       cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
+                                               &cqe->resp, &update);
+                       break;
+               case RDMA_CQE_TYPE_INVALID:
+               default:
+                       DP_ERR(dev, "Error: invalid CQE type = %d\n",
+                              cqe_get_type(cqe));
+               }
+               num_entries -= cnt;
+               wc += cnt;
+               done += cnt;
+
+               cqe = get_cqe(cq);
+       }
+       new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
+
+       cq->cq_cons += new_cons - old_cons;
+
+       if (update)
+               /* doorbell notifies abount latest VALID entry,
+                * but chain already point to the next INVALID one
+                */
+               doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
+
+       spin_unlock_irqrestore(&cq->cq_lock, flags);
+       return done;
+}
+
+int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
+                    u8 port_num,
+                    const struct ib_wc *in_wc,
+                    const struct ib_grh *in_grh,
+                    const struct ib_mad_hdr *mad_hdr,
+                    size_t in_mad_size, struct ib_mad_hdr *out_mad,
+                    size_t *out_mad_size, u16 *out_mad_pkey_index)
+{
+       struct qedr_dev *dev = get_qedr_dev(ibdev);
+
+       DP_DEBUG(dev, QEDR_MSG_GSI,
+                "QEDR_PROCESS_MAD in_mad %x %x %x %x %x %x %x %x\n",
+                mad_hdr->attr_id, mad_hdr->base_version, mad_hdr->attr_mod,
+                mad_hdr->class_specific, mad_hdr->class_version,
+                mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status);
+       return IB_MAD_RESULT_SUCCESS;
+}
+
+int qedr_port_immutable(struct ib_device *ibdev, u8 port_num,
+                       struct ib_port_immutable *immutable)
+{
+       struct ib_port_attr attr;
+       int err;
+
+       err = qedr_query_port(ibdev, port_num, &attr);
+       if (err)
+               return err;
+
+       immutable->pkey_tbl_len = attr.pkey_tbl_len;
+       immutable->gid_tbl_len = attr.gid_tbl_len;
+       immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
+                                   RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+       immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+
+       return 0;
+}
diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h
new file mode 100644 (file)
index 0000000..a9b5e67
--- /dev/null
@@ -0,0 +1,101 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __QEDR_VERBS_H__
+#define __QEDR_VERBS_H__
+
+int qedr_query_device(struct ib_device *ibdev,
+                     struct ib_device_attr *attr, struct ib_udata *udata);
+int qedr_query_port(struct ib_device *, u8 port, struct ib_port_attr *props);
+int qedr_modify_port(struct ib_device *, u8 port, int mask,
+                    struct ib_port_modify *props);
+
+int qedr_query_gid(struct ib_device *, u8 port, int index, union ib_gid *gid);
+
+int qedr_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey);
+
+struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *, struct ib_udata *);
+int qedr_dealloc_ucontext(struct ib_ucontext *);
+
+int qedr_mmap(struct ib_ucontext *, struct vm_area_struct *vma);
+int qedr_del_gid(struct ib_device *device, u8 port_num,
+                unsigned int index, void **context);
+int qedr_add_gid(struct ib_device *device, u8 port_num,
+                unsigned int index, const union ib_gid *gid,
+                const struct ib_gid_attr *attr, void **context);
+struct ib_pd *qedr_alloc_pd(struct ib_device *,
+                           struct ib_ucontext *, struct ib_udata *);
+int qedr_dealloc_pd(struct ib_pd *pd);
+
+struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
+                            const struct ib_cq_init_attr *attr,
+                            struct ib_ucontext *ib_ctx,
+                            struct ib_udata *udata);
+int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
+int qedr_destroy_cq(struct ib_cq *);
+int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
+struct ib_qp *qedr_create_qp(struct ib_pd *, struct ib_qp_init_attr *attrs,
+                            struct ib_udata *);
+int qedr_modify_qp(struct ib_qp *, struct ib_qp_attr *attr,
+                  int attr_mask, struct ib_udata *udata);
+int qedr_query_qp(struct ib_qp *, struct ib_qp_attr *qp_attr,
+                 int qp_attr_mask, struct ib_qp_init_attr *);
+int qedr_destroy_qp(struct ib_qp *ibqp);
+
+struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr);
+int qedr_destroy_ah(struct ib_ah *ibah);
+
+int qedr_dereg_mr(struct ib_mr *);
+struct ib_mr *qedr_get_dma_mr(struct ib_pd *, int acc);
+
+struct ib_mr *qedr_reg_user_mr(struct ib_pd *, u64 start, u64 length,
+                              u64 virt, int acc, struct ib_udata *);
+
+int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+                  int sg_nents, unsigned int *sg_offset);
+
+struct ib_mr *qedr_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+                           u32 max_num_sg);
+int qedr_poll_cq(struct ib_cq *, int num_entries, struct ib_wc *wc);
+int qedr_post_send(struct ib_qp *, struct ib_send_wr *,
+                  struct ib_send_wr **bad_wr);
+int qedr_post_recv(struct ib_qp *, struct ib_recv_wr *,
+                  struct ib_recv_wr **bad_wr);
+int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
+                    u8 port_num, const struct ib_wc *in_wc,
+                    const struct ib_grh *in_grh,
+                    const struct ib_mad_hdr *in_mad,
+                    size_t in_mad_size, struct ib_mad_hdr *out_mad,
+                    size_t *out_mad_size, u16 *out_mad_pkey_index);
+
+int qedr_port_immutable(struct ib_device *ibdev, u8 port_num,
+                       struct ib_port_immutable *immutable);
+#endif
index 6ba48406899e75286db243a31985e78c62b4bdc1..73bbf1acd6af86cfdf148e7f379470e4f21ba556 100644 (file)
@@ -88,6 +88,9 @@ config QED
        ---help---
          This enables the support for ...
 
+config QED_LL2
+       bool
+
 config QED_SRIOV
        bool "QLogic QED 25/40/100Gb SR-IOV support"
        depends on QED && PCI_IOV
@@ -101,7 +104,29 @@ config QED_SRIOV
 config QEDE
        tristate "QLogic QED 25/40/100Gb Ethernet NIC"
        depends on QED
+       imply PTP_1588_CLOCK
        ---help---
          This enables the support for ...
 
+config QED_RDMA
+       bool
+
+config INFINIBAND_QEDR
+       tristate "QLogic qede RoCE sources [debug]"
+       depends on QEDE && 64BIT
+       select QED_LL2
+       select QED_RDMA
+       default n
+       ---help---
+         This provides a temporary node that allows the compilation
+         and logical testing of the InfiniBand over Ethernet support
+         for QLogic QED. This would be replaced by the 'real' option
+         once the QEDR driver is added [+relocated].
+
+config QED_ISCSI
+       bool
+
+config QED_FCOE
+       bool
+
 endif # NET_VENDOR_QLOGIC
index d1f157e439cf4d7214b46be0ed6b620b54ff7c5d..974929dcc74ea510a81dd85a00d48619651f4ce2 100644 (file)
@@ -2,5 +2,9 @@ obj-$(CONFIG_QED) := qed.o
 
 qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
         qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \
-        qed_selftest.o qed_dcbx.o
+        qed_selftest.o qed_dcbx.o qed_debug.o qed_ptp.o
 qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
+qed-$(CONFIG_QED_LL2) += qed_ll2.o
+qed-$(CONFIG_QED_RDMA) += qed_roce.o
+qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o qed_ooo.o
+qed-$(CONFIG_QED_FCOE) += qed_fcoe.o
index 45ab746765737ae2d9af3d5cb77c5343b9aa0c04..c539ba138db9c65f33b868eeadc0fb2e330dfbd5 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef _QED_H
 #include <linux/zlib.h>
 #include <linux/hashtable.h>
 #include <linux/qed/qed_if.h>
+#include "qed_debug.h"
 #include "qed_hsi.h"
 
 extern const struct qed_common_ops qed_common_ops_pass;
-#define DRV_MODULE_VERSION "8.7.1.20"
+
+#define QED_MAJOR_VERSION               8
+#define QED_MINOR_VERSION               10
+#define QED_REVISION_VERSION            10
+#define QED_ENGINEERING_VERSION 21
+
+#define QED_VERSION                                             \
+       ((QED_MAJOR_VERSION << 24) | (QED_MINOR_VERSION << 16) | \
+        (QED_REVISION_VERSION << 8) | QED_ENGINEERING_VERSION)
+
+#define STORM_FW_VERSION                                      \
+       ((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \
+        (FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION)
 
 #define MAX_HWFNS_PER_DEVICE    (4)
 #define NAME_SIZE 16
@@ -34,6 +71,9 @@ extern const struct qed_common_ops qed_common_ops_pass;
 
 #define QED_WFQ_UNIT   100
 
+#define QED_WID_SIZE            (1024)
+#define QED_PF_DEMS_SIZE        (4)
+
 /* cau states */
 enum qed_coalescing_mode {
        QED_COAL_MODE_DISABLE,
@@ -42,9 +82,28 @@ enum qed_coalescing_mode {
 
 struct qed_eth_cb_ops;
 struct qed_dev_info;
+union qed_mcp_protocol_stats;
+enum qed_mcp_protocol_type;
 
 /* helpers */
+#define QED_MFW_GET_FIELD(name, field) \
+       (((name) & (field ## _MASK)) >> (field ## _SHIFT))
+
+#define QED_MFW_SET_FIELD(name, field, value)                                 \
+       do {                                                                   \
+               (name)  &= ~((field ## _MASK) << (field ## _SHIFT));           \
+               (name)  |= (((value) << (field ## _SHIFT)) & (field ## _MASK));\
+       } while (0)
+
 static inline u32 qed_db_addr(u32 cid, u32 DEMS)
+{
+       u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
+                     (cid * QED_PF_DEMS_SIZE);
+
+       return db_addr;
+}
+
+static inline u32 qed_db_addr_vf(u32 cid, u32 DEMS)
 {
        u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
                      FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid);
@@ -69,6 +128,7 @@ struct qed_sb_info;
 struct qed_sb_attn_info;
 struct qed_cxt_mngr;
 struct qed_sb_sp_info;
+struct qed_ll2_info;
 struct qed_mcp_info;
 
 struct qed_rt_data {
@@ -127,6 +187,7 @@ struct qed_tunn_update_params {
  */
 enum qed_pci_personality {
        QED_PCI_ETH,
+       QED_PCI_FCOE,
        QED_PCI_ISCSI,
        QED_PCI_ETH_ROCE,
        QED_PCI_DEFAULT /* default in shmem */
@@ -139,7 +200,10 @@ struct qed_qm_iids {
        u32 tids;
 };
 
-enum QED_RESOURCES {
+/* HW / FW resources, output of features supported below, most information
+ * is received from MFW.
+ */
+enum qed_resources {
        QED_SB,
        QED_L2_QUEUE,
        QED_VPORT,
@@ -148,13 +212,22 @@ enum QED_RESOURCES {
        QED_RL,
        QED_MAC,
        QED_VLAN,
+       QED_RDMA_CNQ_RAM,
        QED_ILT,
+       QED_LL2_QUEUE,
+       QED_CMDQS_CQS,
+       QED_RDMA_STATS_QUEUE,
+       QED_BDQ,
        QED_MAX_RESC,
 };
 
 enum QED_FEATURE {
        QED_PF_L2_QUE,
        QED_VF,
+       QED_RDMA_CNQ,
+       QED_ISCSI_CQ,
+       QED_FCOE_CQ,
+       QED_VF_L2_QUE,
        QED_MAX_FEATURES,
 };
 
@@ -167,15 +240,23 @@ enum QED_PORT_MODE {
        QED_PORT_MODE_DE_4X20G,
        QED_PORT_MODE_DE_1X40G,
        QED_PORT_MODE_DE_2X25G,
-       QED_PORT_MODE_DE_1X25G
+       QED_PORT_MODE_DE_1X25G,
+       QED_PORT_MODE_DE_4X25G,
+       QED_PORT_MODE_DE_2X10G,
 };
 
 enum qed_dev_cap {
        QED_DEV_CAP_ETH,
+       QED_DEV_CAP_FCOE,
        QED_DEV_CAP_ISCSI,
        QED_DEV_CAP_ROCE,
 };
 
+enum qed_wol_support {
+       QED_WOL_SUPPORT_NONE,
+       QED_WOL_SUPPORT_PME,
+};
+
 struct qed_hw_info {
        /* PCI personality */
        enum qed_pci_personality        personality;
@@ -191,9 +272,14 @@ struct qed_hw_info {
                                 RESC_NUM(_p_hwfn, resc))
 #define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
 
-       u8                              num_tc;
+       /* Amount of traffic classes HW supports */
+       u8 num_hw_tc;
+
+       /* Amount of TCs which should be active according to DCBx or upper
+        * layer driver configuration.
+        */
+       u8 num_active_tc;
        u8                              offload_tc;
-       u8                              non_offload_tc;
 
        u32                             concrete_fid;
        u16                             opaque_fid;
@@ -201,21 +287,19 @@ struct qed_hw_info {
        u32                             part_num[4];
 
        unsigned char                   hw_mac_addr[ETH_ALEN];
+       u64                             node_wwn;
+       u64                             port_wwn;
+
+       u16                             num_fcoe_conns;
 
        struct qed_igu_info             *p_igu_info;
 
        u32                             port_mode;
        u32                             hw_mode;
        unsigned long           device_capabilities;
-};
-
-struct qed_hw_cid_data {
-       u32     cid;
-       bool    b_cid_allocated;
+       u16                             mtu;
 
-       /* Additional identifiers */
-       u16     opaque_fid;
-       u8      vport_id;
+       enum qed_wol_support b_wol_support;
 };
 
 /* maximun size of read/write commands (HW limit) */
@@ -258,15 +342,19 @@ struct qed_qm_info {
        struct init_qm_port_params      *qm_port_params;
        u16                             start_pq;
        u8                              start_vport;
-       u8                              pure_lb_pq;
-       u8                              offload_pq;
-       u8                              pure_ack_pq;
-       u8 ooo_pq;
-       u8                              vf_queues_offset;
+       u16                              pure_lb_pq;
+       u16                             offload_pq;
+       u16                             low_latency_pq;
+       u16                             pure_ack_pq;
+       u16                             ooo_pq;
+       u16                             first_vf_pq;
+       u16                             first_mcos_pq;
+       u16                             first_rl_pq;
        u16                             num_pqs;
        u16                             num_vf_pqs;
        u8                              num_vports;
        u8                              max_phys_tcs_per_port;
+       u8                              ooo_tc;
        bool                            pf_rl_en;
        bool                            pf_wfq_en;
        bool                            vport_rl_en;
@@ -297,6 +385,12 @@ struct qed_fw_data {
        u32                     init_ops_size;
 };
 
+#define DRV_MODULE_VERSION                   \
+       __stringify(QED_MAJOR_VERSION) "."    \
+       __stringify(QED_MINOR_VERSION) "."    \
+       __stringify(QED_REVISION_VERSION) "." \
+       __stringify(QED_ENGINEERING_VERSION)
+
 struct qed_simd_fp_handler {
        void    *token;
        void    (*func)(void *);
@@ -308,7 +402,8 @@ struct qed_hwfn {
 #define IS_LEAD_HWFN(edev)              (!((edev)->my_id))
        u8                              rel_pf_id;      /* Relative to engine*/
        u8                              abs_pf_id;
-#define QED_PATH_ID(_p_hwfn)           ((_p_hwfn)->abs_pf_id & 1)
+#define QED_PATH_ID(_p_hwfn) \
+       (QED_IS_K2((_p_hwfn)->cdev) ? 0 : ((_p_hwfn)->abs_pf_id & 1))
        u8                              port_id;
        bool                            b_active;
 
@@ -357,6 +452,12 @@ struct qed_hwfn {
        struct qed_sb_attn_info         *p_sb_attn;
 
        /* Protocol related */
+       bool                            using_ll2;
+       struct qed_ll2_info             *p_ll2_info;
+       struct qed_ooo_info             *p_ooo_info;
+       struct qed_rdma_info            *p_rdma_info;
+       struct qed_iscsi_info           *p_iscsi_info;
+       struct qed_fcoe_info            *p_fcoe_info;
        struct qed_pf_params            pf_params;
 
        bool b_rdma_enabled_in_prs;
@@ -381,9 +482,6 @@ struct qed_hwfn {
 
        struct qed_dcbx_info            *p_dcbx_info;
 
-       struct qed_hw_cid_data          *p_tx_cids;
-       struct qed_hw_cid_data          *p_rx_cids;
-
        struct qed_dmae_info            dmae_info;
 
        /* QM init */
@@ -393,6 +491,23 @@ struct qed_hwfn {
        /* Buffer for unzipping firmware data */
        void                            *unzip_buf;
 
+       struct dbg_tools_data           dbg_info;
+
+       /* PWM region specific data */
+       u32                             dpi_size;
+       u32                             dpi_count;
+
+       /* This is used to calculate the doorbell address */
+       u32 dpi_start_offset;
+
+       /* If one of the following is set then EDPM shouldn't be used */
+       u8 dcbx_no_edpm;
+       u8 db_bar_no_edpm;
+
+       struct qed_ptt *p_arfs_ptt;
+
+       /* p_ptp_ptt is valid for leading HWFN only */
+       struct qed_ptt *p_ptp_ptt;
        struct qed_simd_fp_handler      simd_proto_handler[64];
 
 #ifdef CONFIG_QED_SRIOV
@@ -402,6 +517,7 @@ struct qed_hwfn {
 #endif
 
        struct z_stream_s               *stream;
+       struct qed_roce_ll2_info        *ll2;
 };
 
 struct pci_params {
@@ -426,6 +542,21 @@ struct qed_int_params {
        bool                    fp_initialized;
        u8                      fp_msix_base;
        u8                      fp_msix_cnt;
+       u8                      rdma_msix_base;
+       u8                      rdma_msix_cnt;
+};
+
+struct qed_dbg_feature {
+       struct dentry *dentry;
+       u8 *dump_buf;
+       u32 buf_size;
+       u32 dumped_dwords;
+};
+
+struct qed_dbg_params {
+       struct qed_dbg_feature features[DBG_FEATURE_NUM];
+       u8 engine_for_debug;
+       bool print_data;
 };
 
 struct qed_dev {
@@ -433,21 +564,24 @@ struct qed_dev {
        u8      dp_level;
        char    name[NAME_SIZE];
 
-       u8      type;
-#define QED_DEV_TYPE_BB (0 << 0)
-#define QED_DEV_TYPE_AH BIT(0)
+       enum    qed_dev_type type;
 /* Translate type/revision combo into the proper conditions */
 #define QED_IS_BB(dev)  ((dev)->type == QED_DEV_TYPE_BB)
 #define QED_IS_BB_A0(dev)       (QED_IS_BB(dev) && \
                                 CHIP_REV_IS_A0(dev))
 #define QED_IS_BB_B0(dev)       (QED_IS_BB(dev) && \
                                 CHIP_REV_IS_B0(dev))
+#define QED_IS_AH(dev)  ((dev)->type == QED_DEV_TYPE_AH)
+#define QED_IS_K2(dev)  QED_IS_AH(dev)
 
 #define QED_GET_TYPE(dev)       (QED_IS_BB_A0(dev) ? CHIP_BB_A0 : \
                                 QED_IS_BB_B0(dev) ? CHIP_BB_B0 : CHIP_K2)
 
        u16     vendor_id;
        u16     device_id;
+#define QED_DEV_ID_MASK                0xff00
+#define QED_DEV_ID_MASK_BB     0x1600
+#define QED_DEV_ID_MASK_AH     0x8000
 
        u16     chip_num;
 #define CHIP_NUM_MASK                   0xffff
@@ -485,7 +619,9 @@ struct qed_dev {
        u8                              mcp_rev;
        u8                              boot_mode;
 
-       u8                              wol;
+       /* WoL related configurations */
+       u8 wol_config;
+       u8 wol_mac[ETH_ALEN];
 
        u32                             int_mode;
        enum qed_coalescing_mode        int_coalescing_mode;
@@ -517,7 +653,6 @@ struct qed_dev {
 
        bool                            b_is_vf;
        u32                             drv_type;
-
        struct qed_eth_stats            *reset_stats;
        struct qed_fw_data              *fw_data;
 
@@ -526,6 +661,8 @@ struct qed_dev {
        /* Linux specific here */
        struct  qede_dev                *edev;
        struct  pci_dev                 *pdev;
+       u32 flags;
+#define QED_FLAG_STORAGE_STARTED       (BIT(0))
        int                             msg_enable;
 
        struct pci_params               pci_params;
@@ -534,21 +671,41 @@ struct qed_dev {
 
        u8                              protocol;
 #define IS_QED_ETH_IF(cdev)     ((cdev)->protocol == QED_PROTOCOL_ETH)
+#define IS_QED_FCOE_IF(cdev)    ((cdev)->protocol == QED_PROTOCOL_FCOE)
 
        /* Callbacks to protocol driver */
        union {
                struct qed_common_cb_ops        *common;
                struct qed_eth_cb_ops           *eth;
+               struct qed_fcoe_cb_ops          *fcoe;
+               struct qed_iscsi_cb_ops         *iscsi;
        } protocol_ops;
        void                            *ops_cookie;
 
+       struct qed_dbg_params           dbg_params;
+
+#ifdef CONFIG_QED_LL2
+       struct qed_cb_ll2_info          *ll2;
+       u8                              ll2_mac_address[ETH_ALEN];
+#endif
+       DECLARE_HASHTABLE(connections, 10);
        const struct firmware           *firmware;
+
+       u32 rdma_max_sge;
+       u32 rdma_max_inline;
+       u32 rdma_max_srq_sge;
 };
 
-#define NUM_OF_VFS(dev)         MAX_NUM_VFS_BB
-#define NUM_OF_L2_QUEUES(dev)  MAX_NUM_L2_QUEUES_BB
-#define NUM_OF_SBS(dev)         MAX_SB_PER_PATH_BB
-#define NUM_OF_ENG_PFS(dev)     MAX_NUM_PFS_BB
+#define NUM_OF_VFS(dev)         (QED_IS_BB(dev) ? MAX_NUM_VFS_BB \
+                                               : MAX_NUM_VFS_K2)
+#define NUM_OF_L2_QUEUES(dev)   (QED_IS_BB(dev) ? MAX_NUM_L2_QUEUES_BB \
+                                               : MAX_NUM_L2_QUEUES_K2)
+#define NUM_OF_PORTS(dev)       (QED_IS_BB(dev) ? MAX_NUM_PORTS_BB \
+                                               : MAX_NUM_PORTS_K2)
+#define NUM_OF_SBS(dev)         (QED_IS_BB(dev) ? MAX_SB_PER_PATH_BB \
+                                               : MAX_SB_PER_PATH_K2)
+#define NUM_OF_ENG_PFS(dev)     (QED_IS_BB(dev) ? MAX_NUM_PFS_BB \
+                                               : MAX_NUM_PFS_K2)
 
 /**
  * @brief qed_concrete_to_sw_fid - get the sw function id from
@@ -579,9 +736,30 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
 #define OOO_LB_TC 9
 
 int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
-void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate);
+void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
+                                        struct qed_ptt *p_ptt,
+                                        u32 min_pf_rate);
 
 void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+int qed_device_num_engines(struct qed_dev *cdev);
+
+#define QED_LEADING_HWFN(dev)   (&dev->hwfns[0])
+
+/* Flags for indication of required queues */
+#define PQ_FLAGS_RLS    (BIT(0))
+#define PQ_FLAGS_MCOS   (BIT(1))
+#define PQ_FLAGS_LB     (BIT(2))
+#define PQ_FLAGS_OOO    (BIT(3))
+#define PQ_FLAGS_ACK    (BIT(4))
+#define PQ_FLAGS_OFLD   (BIT(5))
+#define PQ_FLAGS_VFS    (BIT(6))
+#define PQ_FLAGS_LLT    (BIT(7))
+
+/* physical queue index for cm context intialization */
+u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags);
+u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc);
+u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf);
+
 #define QED_LEADING_HWFN(dev)   (&dev->hwfns[0])
 
 /* Other Linux specific common definitions */
@@ -606,7 +784,10 @@ void qed_link_update(struct qed_hwfn *hwfn);
 u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
                   u32 input_len, u8 *input_buf,
                   u32 max_size, u8 *unzip_buf);
-
+void qed_get_protocol_stats(struct qed_dev *cdev,
+                           enum qed_mcp_protocol_type type,
+                           union qed_mcp_protocol_stats *stats);
 int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
+void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn);
 
 #endif /* _QED_H */
index 1c35f376143e3f4d0e6605937423decacd277f2c..b3aaa985956e4a937af56ccf2410cf9eeb884985 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #include <linux/types.h>
@@ -47,8 +71,8 @@
 #define TM_ALIGN        BIT(TM_SHIFT)
 #define TM_ELEM_SIZE    4
 
-/* ILT constants */
-#define ILT_DEFAULT_HW_P_SIZE          3
+#define ILT_DEFAULT_HW_P_SIZE  4
+
 #define ILT_PAGE_IN_BYTES(hw_p_size)   (1U << ((hw_p_size) + 12))
 #define ILT_CFG_REG(cli, reg)  PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
 
@@ -65,12 +89,14 @@ union conn_context {
        struct core_conn_context core_ctx;
        struct eth_conn_context eth_ctx;
        struct iscsi_conn_context iscsi_ctx;
+       struct fcoe_conn_context fcoe_ctx;
        struct roce_conn_context roce_ctx;
 };
 
-/* TYPE-0 task context - iSCSI */
+/* TYPE-0 task context - iSCSI, FCOE */
 union type0_task_context {
        struct iscsi_task_context iscsi_ctx;
+       struct fcoe_task_context fcoe_ctx;
 };
 
 /* TYPE-1 task context - ROCE */
@@ -193,9 +219,6 @@ struct qed_cxt_mngr {
         */
        u32                             vf_count;
 
-       /* total number of SRQ's for this hwfn */
-       u32 srq_count;
-
        /* Acquired CIDs */
        struct qed_cid_acquired_map     acquired[MAX_CONN_TYPES];
 
@@ -211,19 +234,31 @@ struct qed_cxt_mngr {
        u32 t2_num_pages;
        u64 first_free;
        u64 last_free;
+
+       /* total number of SRQ's for this hwfn */
+       u32 srq_count;
+
+       /* Maximal number of L2 steering filters */
+       u32 arfs_count;
 };
 static bool src_proto(enum protocol_type type)
 {
        return type == PROTOCOLID_ISCSI ||
-              type == PROTOCOLID_ROCE;
+              type == PROTOCOLID_FCOE;
 }
 
 static bool tm_cid_proto(enum protocol_type type)
 {
        return type == PROTOCOLID_ISCSI ||
+              type == PROTOCOLID_FCOE ||
               type == PROTOCOLID_ROCE;
 }
 
+static bool tm_tid_proto(enum protocol_type type)
+{
+       return type == PROTOCOLID_FCOE;
+}
+
 /* counts the iids for the CDU/CDUC ILT client configuration */
 struct qed_cdu_iids {
        u32 pf_cids;
@@ -259,6 +294,9 @@ static void qed_cxt_src_iids(struct qed_cxt_mngr *p_mngr,
                iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
                iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf;
        }
+
+       /* Add L2 filtering filters in addition */
+       iids->pf_cids += p_mngr->arfs_count;
 }
 
 /* counts the iids for the Timers block configuration */
@@ -270,18 +308,52 @@ struct qed_tm_iids {
        u32 per_vf_tids;
 };
 
-static void qed_cxt_tm_iids(struct qed_cxt_mngr *p_mngr,
+static void qed_cxt_tm_iids(struct qed_hwfn *p_hwfn,
+                           struct qed_cxt_mngr *p_mngr,
                            struct qed_tm_iids *iids)
 {
-       u32 i, j;
-
-       for (i = 0; i < MAX_CONN_TYPES; i++) {
+       bool tm_vf_required = false;
+       bool tm_required = false;
+       int i, j;
+
+       /* Timers is a special case -> we don't count how many cids require
+        * timers but what's the max cid that will be used by the timer block.
+        * therefore we traverse in reverse order, and once we hit a protocol
+        * that requires the timers memory, we'll sum all the protocols up
+        * to that one.
+        */
+       for (i = MAX_CONN_TYPES - 1; i >= 0; i--) {
                struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i];
 
-               if (tm_cid_proto(i)) {
+               if (tm_cid_proto(i) || tm_required) {
+                       if (p_cfg->cid_count)
+                               tm_required = true;
+
                        iids->pf_cids += p_cfg->cid_count;
+               }
+
+               if (tm_cid_proto(i) || tm_vf_required) {
+                       if (p_cfg->cids_per_vf)
+                               tm_vf_required = true;
+
                        iids->per_vf_cids += p_cfg->cids_per_vf;
                }
+
+               if (tm_tid_proto(i)) {
+                       struct qed_tid_seg *segs = p_cfg->tid_seg;
+
+                       /* for each segment there is at most one
+                        * protocol for which count is not 0.
+                        */
+                       for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
+                               iids->pf_tids[j] += segs[j].count;
+
+                       /* The last array elelment is for the VFs. As for PF
+                        * segments there can be only one protocol for
+                        * which this value is not 0.
+                        */
+                       iids->per_vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
+               }
        }
 
        iids->pf_cids = roundup(iids->pf_cids, TM_ALIGN);
@@ -343,14 +415,14 @@ static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn,
        return NULL;
 }
 
-void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs)
+static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs)
 {
        struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
 
        p_mgr->srq_count = num_srqs;
 }
 
-u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
+static u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
 {
        struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
 
@@ -372,14 +444,14 @@ static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
                u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val;
                u32 cxt_size = CONN_CXT_SIZE(p_hwfn);
                u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+               u32 align = elems_per_page * DQ_RANGE_ALIGN;
 
-               p_conn->cid_count = roundup(p_conn->cid_count, elems_per_page);
+               p_conn->cid_count = roundup(p_conn->cid_count, align);
        }
 }
 
-u32 qed_cxt_get_proto_cid_count(struct qed_hwfn                *p_hwfn,
-                               enum protocol_type      type,
-                               u32                     *vf_cid)
+u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
+                               enum protocol_type type, u32 *vf_cid)
 {
        if (vf_cid)
                *vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
@@ -405,10 +477,10 @@ u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
        return cnt;
 }
 
-static void
-qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn,
-                           enum protocol_type proto,
-                           u8 seg, u8 seg_type, u32 count, bool has_fl)
+static void qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn,
+                                       enum protocol_type proto,
+                                       u8 seg,
+                                       u8 seg_type, u32 count, bool has_fl)
 {
        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
        struct qed_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg];
@@ -420,8 +492,7 @@ qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn,
 
 static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
                                 struct qed_ilt_cli_blk *p_blk,
-                                u32 start_line, u32 total_size,
-                                u32 elem_size)
+                                u32 start_line, u32 total_size, u32 elem_size)
 {
        u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
 
@@ -448,8 +519,7 @@ static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn,
                p_cli->first.val = *p_line;
 
        p_cli->active = true;
-       *p_line += DIV_ROUND_UP(p_blk->total_size,
-                               p_blk->real_size_in_page);
+       *p_line += DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
        p_cli->last.val = *p_line - 1;
 
        DP_VERBOSE(p_hwfn, QED_MSG_ILT,
@@ -479,7 +549,22 @@ static u32 qed_ilt_get_dynamic_line_cnt(struct qed_hwfn *p_hwfn,
        return lines_to_skip;
 }
 
-int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
+static struct qed_ilt_client_cfg *qed_cxt_set_cli(struct qed_ilt_client_cfg
+                                                 *p_cli)
+{
+       p_cli->active = false;
+       p_cli->first.val = 0;
+       p_cli->last.val = 0;
+       return p_cli;
+}
+
+static struct qed_ilt_cli_blk *qed_cxt_set_blk(struct qed_ilt_cli_blk *p_blk)
+{
+       p_blk->total_size = 0;
+       return p_blk;
+}
+
+int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
 {
        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
        u32 curr_line, total, i, task_size, line;
@@ -503,7 +588,8 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
                   p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
 
        /* CDUC */
-       p_cli = &p_mngr->clients[ILT_CLI_CDUC];
+       p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUC]);
+
        curr_line = p_mngr->pf_start_line;
 
        /* CDUC PF */
@@ -512,7 +598,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
        /* get the counters for the CDUC and QM clients  */
        qed_cxt_cdu_iids(p_mngr, &cdu_iids);
 
-       p_blk = &p_cli->pf_blks[CDUC_BLK];
+       p_blk = qed_cxt_set_blk(&p_cli->pf_blks[CDUC_BLK]);
 
        total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
 
@@ -526,7 +612,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
                                                               ILT_CLI_CDUC);
 
        /* CDUC VF */
-       p_blk = &p_cli->vf_blks[CDUC_BLK];
+       p_blk = qed_cxt_set_blk(&p_cli->vf_blks[CDUC_BLK]);
        total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
 
        qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
@@ -540,7 +626,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
                                     ILT_CLI_CDUC);
 
        /* CDUT PF */
-       p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+       p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUT]);
        p_cli->first.val = curr_line;
 
        /* first the 'working' task memory */
@@ -549,7 +635,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
                if (!p_seg || p_seg->count == 0)
                        continue;
 
-               p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)];
+               p_blk = qed_cxt_set_blk(&p_cli->pf_blks[CDUT_SEG_BLK(i)]);
                total = p_seg->count * p_mngr->task_type_size[p_seg->type];
                qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total,
                                     p_mngr->task_type_size[p_seg->type]);
@@ -564,7 +650,8 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
                if (!p_seg || p_seg->count == 0)
                        continue;
 
-               p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)];
+               p_blk =
+                   qed_cxt_set_blk(&p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)]);
 
                if (!p_seg->has_fl_mem) {
                        /* The segment is active (total size pf 'working'
@@ -609,7 +696,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
                /* 'working' memory */
                total = p_seg->count * p_mngr->task_type_size[p_seg->type];
 
-               p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
+               p_blk = qed_cxt_set_blk(&p_cli->vf_blks[CDUT_SEG_BLK(0)]);
                qed_ilt_cli_blk_fill(p_cli, p_blk,
                                     curr_line, total,
                                     p_mngr->task_type_size[p_seg->type]);
@@ -618,7 +705,8 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
                                     ILT_CLI_CDUT);
 
                /* 'init' memory */
-               p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
+               p_blk =
+                   qed_cxt_set_blk(&p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]);
                if (!p_seg->has_fl_mem) {
                        /* see comment above */
                        line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line;
@@ -646,8 +734,8 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
        }
 
        /* QM */
-       p_cli = &p_mngr->clients[ILT_CLI_QM];
-       p_blk = &p_cli->pf_blks[0];
+       p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_QM]);
+       p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
 
        qed_cxt_qm_iids(p_hwfn, &qm_iids);
        total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
@@ -671,7 +759,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
        p_cli->pf_total_lines = curr_line - p_blk->start_line;
 
        /* SRC */
-       p_cli = &p_mngr->clients[ILT_CLI_SRC];
+       p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_SRC]);
        qed_cxt_src_iids(p_mngr, &src_iids);
 
        /* Both the PF and VFs searcher connections are stored in the per PF
@@ -685,7 +773,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
 
                total = roundup_pow_of_two(local_max);
 
-               p_blk = &p_cli->pf_blks[0];
+               p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
                qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
                                     total * sizeof(struct src_ent),
                                     sizeof(struct src_ent));
@@ -696,11 +784,11 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
        }
 
        /* TM PF */
-       p_cli = &p_mngr->clients[ILT_CLI_TM];
-       qed_cxt_tm_iids(p_mngr, &tm_iids);
+       p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TM]);
+       qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
        total = tm_iids.pf_cids + tm_iids.pf_tids_total;
        if (total) {
-               p_blk = &p_cli->pf_blks[0];
+               p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
                qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
                                     total * TM_ELEM_SIZE, TM_ELEM_SIZE);
 
@@ -712,14 +800,14 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
        /* TM VF */
        total = tm_iids.per_vf_cids + tm_iids.per_vf_tids;
        if (total) {
-               p_blk = &p_cli->vf_blks[0];
+               p_blk = qed_cxt_set_blk(&p_cli->vf_blks[0]);
                qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
                                     total * TM_ELEM_SIZE, TM_ELEM_SIZE);
 
                qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
                                     ILT_CLI_TM);
-               p_cli->pf_total_lines = curr_line - p_blk->start_line;
 
+               p_cli->vf_total_lines = curr_line - p_blk->start_line;
                for (i = 1; i < p_mngr->vf_count; i++)
                        qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
                                             ILT_CLI_TM);
@@ -729,8 +817,8 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
        total = qed_cxt_get_srq_count(p_hwfn);
 
        if (total) {
-               p_cli = &p_mngr->clients[ILT_CLI_TSDM];
-               p_blk = &p_cli->pf_blks[SRQ_BLK];
+               p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TSDM]);
+               p_blk = qed_cxt_set_blk(&p_cli->pf_blks[SRQ_BLK]);
                qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
                                     total * SRQ_CXT_SIZE, SRQ_CXT_SIZE);
 
@@ -739,13 +827,50 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
                p_cli->pf_total_lines = curr_line - p_blk->start_line;
        }
 
+       *line_count = curr_line - p_hwfn->p_cxt_mngr->pf_start_line;
+
        if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
-           RESC_NUM(p_hwfn, QED_ILT)) {
-               DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n",
-                      curr_line - p_hwfn->p_cxt_mngr->pf_start_line);
+           RESC_NUM(p_hwfn, QED_ILT))
                return -EINVAL;
+
+       return 0;
+}
+
+u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines)
+{
+       struct qed_ilt_client_cfg *p_cli;
+       u32 excess_lines, available_lines;
+       struct qed_cxt_mngr *p_mngr;
+       u32 ilt_page_size, elem_size;
+       struct qed_tid_seg *p_seg;
+       int i;
+
+       available_lines = RESC_NUM(p_hwfn, QED_ILT);
+       excess_lines = used_lines - available_lines;
+
+       if (!excess_lines)
+               return 0;
+
+       if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
+               return 0;
+
+       p_mngr = p_hwfn->p_cxt_mngr;
+       p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+       ilt_page_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
+
+       for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+               p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
+               if (!p_seg || p_seg->count == 0)
+                       continue;
+
+               elem_size = p_mngr->task_type_size[p_seg->type];
+               if (!elem_size)
+                       continue;
+
+               return (ilt_page_size / elem_size) * excess_lines;
        }
 
+       DP_NOTICE(p_hwfn, "failed computing excess ILT lines\n");
        return 0;
 }
 
@@ -795,10 +920,9 @@ static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
        p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz);
 
        /* allocate t2 */
-       p_mngr->t2 = kzalloc(p_mngr->t2_num_pages * sizeof(struct qed_dma_mem),
+       p_mngr->t2 = kcalloc(p_mngr->t2_num_pages, sizeof(struct qed_dma_mem),
                             GFP_KERNEL);
        if (!p_mngr->t2) {
-               DP_NOTICE(p_hwfn, "Failed to allocate t2 table\n");
                rc = -ENOMEM;
                goto t2_fail;
        }
@@ -926,12 +1050,9 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
                void *p_virt;
                u32 size;
 
-               size = min_t(u32, sz_left,
-                            p_blk->real_size_in_page);
+               size = min_t(u32, sz_left, p_blk->real_size_in_page);
                p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
-                                           size,
-                                           &p_phys,
-                                           GFP_KERNEL);
+                                           size, &p_phys, GFP_KERNEL);
                if (!p_virt)
                        return -ENOMEM;
                memset(p_virt, 0, size);
@@ -963,7 +1084,6 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
        p_mngr->ilt_shadow = kcalloc(size, sizeof(struct qed_dma_mem),
                                     GFP_KERNEL);
        if (!p_mngr->ilt_shadow) {
-               DP_NOTICE(p_hwfn, "Failed to allocate ilt shadow table\n");
                rc = -ENOMEM;
                goto ilt_shadow_fail;
        }
@@ -976,7 +1096,7 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
                for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
                        p_blk = &clients[i].pf_blks[j];
                        rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
-                       if (rc != 0)
+                       if (rc)
                                goto ilt_shadow_fail;
                }
                for (k = 0; k < p_mngr->vf_count; k++) {
@@ -985,7 +1105,7 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
 
                                p_blk = &clients[i].vf_blks[j];
                                rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines);
-                               if (rc != 0)
+                               if (rc)
                                        goto ilt_shadow_fail;
                        }
                }
@@ -1056,10 +1176,8 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
        u32 i;
 
        p_mngr = kzalloc(sizeof(*p_mngr), GFP_KERNEL);
-       if (!p_mngr) {
-               DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_cxt_mngr'\n");
+       if (!p_mngr)
                return -ENOMEM;
-       }
 
        /* Initialize ILT client registers */
        clients = p_mngr->clients;
@@ -1086,7 +1204,7 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
        clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT);
        clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
        clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
-       /* default ILT page size for all clients is 32K */
+       /* default ILT page size for all clients is 64K */
        for (i = 0; i < ILT_CLI_MAX; i++)
                p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
 
@@ -1111,24 +1229,18 @@ int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn)
 
        /* Allocate the ILT shadow table */
        rc = qed_ilt_shadow_alloc(p_hwfn);
-       if (rc) {
-               DP_NOTICE(p_hwfn, "Failed to allocate ilt memory\n");
+       if (rc)
                goto tables_alloc_fail;
-       }
 
        /* Allocate the T2  table */
        rc = qed_cxt_src_t2_alloc(p_hwfn);
-       if (rc) {
-               DP_NOTICE(p_hwfn, "Failed to allocate T2 memory\n");
+       if (rc)
                goto tables_alloc_fail;
-       }
 
        /* Allocate and initialize the acquired cids bitmaps */
        rc = qed_cid_map_alloc(p_hwfn);
-       if (rc) {
-               DP_NOTICE(p_hwfn, "Failed to allocate cid maps\n");
+       if (rc)
                goto tables_alloc_fail;
-       }
 
        return 0;
 
@@ -1332,7 +1444,7 @@ static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn)
        }
 }
 
-void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
        struct qed_qm_pf_rt_init_params params;
        struct qed_qm_info *qm_info = &p_hwfn->qm_info;
@@ -1358,22 +1470,15 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
        params.pq_params = qm_info->qm_pq_params;
        params.vport_params = qm_info->qm_vport_params;
 
-       qed_qm_pf_rt_init(p_hwfn, p_hwfn->p_main_ptt, &params);
+       qed_qm_pf_rt_init(p_hwfn, p_ptt, &params);
 }
 
 /* CM PF */
-static int qed_cm_init_pf(struct qed_hwfn *p_hwfn)
+void qed_cm_init_pf(struct qed_hwfn *p_hwfn)
 {
-       union qed_qm_pq_params pq_params;
-       u16 pq;
-
        /* XCM pure-LB queue */
-       memset(&pq_params, 0, sizeof(pq_params));
-       pq_params.core.tc = LB_TC;
-       pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
-       STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, pq);
-
-       return 0;
+       STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET,
+                    qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB));
 }
 
 /* DQ PF */
@@ -1605,7 +1710,7 @@ static void qed_tm_init_pf(struct qed_hwfn *p_hwfn)
        u8 i;
 
        memset(&tm_iids, 0, sizeof(tm_iids));
-       qed_cxt_tm_iids(p_mngr, &tm_iids);
+       qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
 
        /* @@@TBD No pre-scan for now */
 
@@ -1672,7 +1777,7 @@ static void qed_tm_init_pf(struct qed_hwfn *p_hwfn)
                     p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i);
 
                STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
-               active_seg_mask |= (tm_iids.pf_tids[i] ? (1 << i) : 0);
+               active_seg_mask |= (tm_iids.pf_tids[i] ? BIT(i) : 0);
 
                tm_offset += tm_iids.pf_tids[i];
        }
@@ -1685,25 +1790,58 @@ static void qed_tm_init_pf(struct qed_hwfn *p_hwfn)
        /* @@@TBD how to enable the scan for the VFs */
 }
 
+static void qed_prs_init_common(struct qed_hwfn *p_hwfn)
+{
+       if ((p_hwfn->hw_info.personality == QED_PCI_FCOE) &&
+           p_hwfn->pf_params.fcoe_pf_params.is_target)
+               STORE_RT_REG(p_hwfn,
+                            PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET, 0);
+}
+
+static void qed_prs_init_pf(struct qed_hwfn *p_hwfn)
+{
+       struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       struct qed_conn_type_cfg *p_fcoe;
+       struct qed_tid_seg *p_tid;
+
+       p_fcoe = &p_mngr->conn_cfg[PROTOCOLID_FCOE];
+
+       /* If FCoE is active set the MAX OX_ID (tid) in the Parser */
+       if (!p_fcoe->cid_count)
+               return;
+
+       p_tid = &p_fcoe->tid_seg[QED_CXT_FCOE_TID_SEG];
+       if (p_hwfn->pf_params.fcoe_pf_params.is_target) {
+               STORE_RT_REG_AGG(p_hwfn,
+                                PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET,
+                                p_tid->count);
+       } else {
+               STORE_RT_REG_AGG(p_hwfn,
+                                PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET,
+                                p_tid->count);
+       }
+}
+
 void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
 {
        qed_cdu_init_common(p_hwfn);
+       qed_prs_init_common(p_hwfn);
 }
 
-void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn)
+void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-       qed_qm_init_pf(p_hwfn);
+       qed_qm_init_pf(p_hwfn, p_ptt);
        qed_cm_init_pf(p_hwfn);
        qed_dq_init_pf(p_hwfn);
        qed_cdu_init_pf(p_hwfn);
        qed_ilt_init_pf(p_hwfn);
        qed_src_init_pf(p_hwfn);
        qed_tm_init_pf(p_hwfn);
+       qed_prs_init_pf(p_hwfn);
 }
 
 int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
-                       enum protocol_type type,
-                       u32 *p_cid)
+                       enum protocol_type type, u32 *p_cid)
 {
        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
        u32 rel_cid;
@@ -1717,8 +1855,7 @@ int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
                                      p_mngr->acquired[type].max_count);
 
        if (rel_cid >= p_mngr->acquired[type].max_count) {
-               DP_NOTICE(p_hwfn, "no CID available for protocol %d\n",
-                         type);
+               DP_NOTICE(p_hwfn, "no CID available for protocol %d\n", type);
                return -EINVAL;
        }
 
@@ -1730,8 +1867,7 @@ int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
 }
 
 static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
-                                     u32 cid,
-                                     enum protocol_type *p_type)
+                                     u32 cid, enum protocol_type *p_type)
 {
        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
        struct qed_cid_acquired_map *p_map;
@@ -1763,8 +1899,7 @@ static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
        return true;
 }
 
-void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
-                        u32 cid)
+void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid)
 {
        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
        enum protocol_type type;
@@ -1781,8 +1916,7 @@ void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
        __clear_bit(rel_cid, p_mngr->acquired[type].cid_map);
 }
 
-int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
-                        struct qed_cxt_info *p_info)
+int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
 {
        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
        u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
@@ -1819,14 +1953,13 @@ int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
        return 0;
 }
 
-void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
-                           struct qed_rdma_pf_params *p_params)
+static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
+                                  struct qed_rdma_pf_params *p_params,
+                                  u32 num_tasks)
 {
-       u32 num_cons, num_tasks, num_qps, num_mrs, num_srqs;
+       u32 num_cons, num_qps, num_srqs;
        enum protocol_type proto;
 
-       num_mrs = min_t(u32, RDMA_MAX_TIDS, p_params->num_mrs);
-       num_tasks = num_mrs;    /* each mr uses a single task id */
        num_srqs = min_t(u32, 32 * 1024, p_params->num_srqs);
 
        switch (p_hwfn->hw_info.personality) {
@@ -1855,19 +1988,22 @@ void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
        }
 }
 
-int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
+int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks)
 {
        /* Set the number of required CORE connections */
        u32 core_cids = 1; /* SPQ */
 
+       if (p_hwfn->using_ll2)
+               core_cids += 4;
        qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
 
        switch (p_hwfn->hw_info.personality) {
        case QED_PCI_ETH_ROCE:
        {
-               qed_rdma_set_pf_params(p_hwfn,
-                                      &p_hwfn->
-                                      pf_params.rdma_pf_params);
+                       qed_rdma_set_pf_params(p_hwfn,
+                                              &p_hwfn->
+                                              pf_params.rdma_pf_params,
+                                              rdma_tasks);
                /* no need for break since RoCE coexist with Ethernet */
        }
        case QED_PCI_ETH:
@@ -1877,6 +2013,28 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
 
                qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
                                            p_params->num_cons, 1);
+               p_hwfn->p_cxt_mngr->arfs_count = p_params->num_arfs_filters;
+               break;
+       }
+       case QED_PCI_FCOE:
+       {
+               struct qed_fcoe_pf_params *p_params;
+
+               p_params = &p_hwfn->pf_params.fcoe_pf_params;
+
+               if (p_params->num_cons && p_params->num_tasks) {
+                       qed_cxt_set_proto_cid_count(p_hwfn,
+                                                   PROTOCOLID_FCOE,
+                                                   p_params->num_cons,
+                                                   0);
+
+                       qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_FCOE,
+                                                   QED_CXT_FCOE_TID_SEG, 0,
+                                                   p_params->num_tasks, true);
+               } else {
+                       DP_INFO(p_hwfn->cdev,
+                               "Fcoe personality used without setting params!\n");
+               }
                break;
        }
        case QED_PCI_ISCSI:
@@ -1921,6 +2079,10 @@ int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
 
        /* Verify the personality */
        switch (p_hwfn->hw_info.personality) {
+       case QED_PCI_FCOE:
+               proto = PROTOCOLID_FCOE;
+               seg = QED_CXT_FCOE_TID_SEG;
+               break;
        case QED_PCI_ISCSI:
                proto = PROTOCOLID_ISCSI;
                seg = QED_CXT_ISCSI_TID_SEG;
@@ -2209,15 +2371,19 @@ int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
 {
        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
        struct qed_ilt_client_cfg *p_cli;
-       struct qed_ilt_cli_blk *p_seg;
        struct qed_tid_seg *p_seg_info;
-       u32 proto, seg;
-       u32 total_lines;
-       u32 tid_size, ilt_idx;
+       struct qed_ilt_cli_blk *p_seg;
        u32 num_tids_per_block;
+       u32 tid_size, ilt_idx;
+       u32 total_lines;
+       u32 proto, seg;
 
        /* Verify the personality */
        switch (p_hwfn->hw_info.personality) {
+       case QED_PCI_FCOE:
+               proto = PROTOCOLID_FCOE;
+               seg = QED_CXT_FCOE_TID_SEG;
+               break;
        case QED_PCI_ISCSI:
                proto = PROTOCOLID_ISCSI;
                seg = QED_CXT_ISCSI_TID_SEG;
index c6f6f2e8192df4784314c5aa1d41fb7b7266388a..53ad532dc21223e4a6fa15039e5ab17acb5e6a01 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef _QED_CXT_H
@@ -67,6 +91,7 @@ int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
 
 #define QED_CXT_ISCSI_TID_SEG  PROTOCOLID_ISCSI
 #define QED_CXT_ROCE_TID_SEG   PROTOCOLID_ROCE
+#define QED_CXT_FCOE_TID_SEG   PROTOCOLID_FCOE
 enum qed_cxt_elem_type {
        QED_ELEM_CXT,
        QED_ELEM_SRQ,
@@ -80,19 +105,28 @@ u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
  * @brief qed_cxt_set_pf_params - Set the PF params for cxt init
  *
  * @param p_hwfn
- *
+ * @param rdma_tasks - requested maximum
  * @return int
  */
-int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn);
+int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks);
 
 /**
  * @brief qed_cxt_cfg_ilt_compute - compute ILT init parameters
  *
  * @param p_hwfn
+ * @param last_line
  *
  * @return int
  */
-int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn);
+int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *last_line);
+
+/**
+ * @brief qed_cxt_cfg_ilt_compute_excess - how many lines can be decreased
+ *
+ * @param p_hwfn
+ * @param used_lines
+ */
+u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines);
 
 /**
  * @brief qed_cxt_mngr_alloc - Allocate and init the context manager struct
@@ -138,19 +172,18 @@ void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn);
 /**
  * @brief qed_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path.
  *
- *
- *
  * @param p_hwfn
+ * @param p_ptt
  */
-void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn);
+void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
 
 /**
  * @brief qed_qm_init_pf - Initailze the QM PF phase, per path
  *
  * @param p_hwfn
+ * @param p_ptt
  */
-
-void qed_qm_init_pf(struct qed_hwfn *p_hwfn);
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
 
 /**
  * @brief Reconfigures QM pf on the fly
@@ -170,7 +203,16 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
  */
 void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
                         u32 cid);
+int qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
+                             enum qed_cxt_elem_type elem_type, u32 iid);
+u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
+                               enum protocol_type type);
+u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn,
+                               enum protocol_type type);
+int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto);
 
 #define QED_CTX_WORKING_MEM 0
 #define QED_CTX_FL_MEM 1
+int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
+                        u32 tid, u8 ctx_type, void **task_ctx);
 #endif
index 3656d2fd673d28a37d1bfd4bb54c8046b6f45987..cca9f641a82596ca6429036a28c56ee08edff2dc 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #include <linux/types.h>
        ((u32)(prio_tc_tbl >> ((7 - prio) * 4)) & 0x7)
 
 static const struct qed_dcbx_app_metadata qed_dcbx_app_update[] = {
-       {DCBX_PROTOCOL_ISCSI, "ISCSI", QED_PCI_DEFAULT},
-       {DCBX_PROTOCOL_FCOE, "FCOE", QED_PCI_DEFAULT},
-       {DCBX_PROTOCOL_ROCE, "ROCE", QED_PCI_DEFAULT},
-       {DCBX_PROTOCOL_ROCE_V2, "ROCE_V2", QED_PCI_DEFAULT},
+       {DCBX_PROTOCOL_ISCSI, "ISCSI", QED_PCI_ISCSI},
+       {DCBX_PROTOCOL_FCOE, "FCOE", QED_PCI_FCOE},
+       {DCBX_PROTOCOL_ROCE, "ROCE", QED_PCI_ETH_ROCE},
+       {DCBX_PROTOCOL_ROCE_V2, "ROCE_V2", QED_PCI_ETH_ROCE},
        {DCBX_PROTOCOL_ETH, "ETH", QED_PCI_ETH}
 };
 
@@ -159,7 +183,7 @@ qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data)
                           "%s info: update %d, enable %d, prio %d, tc %d, num_tc %d\n",
                           qed_dcbx_app_update[i].name, p_data->arr[id].update,
                           p_data->arr[id].enable, p_data->arr[id].priority,
-                          p_data->arr[id].tc, p_hwfn->hw_info.num_tc);
+                          p_data->arr[id].tc, p_hwfn->hw_info.num_active_tc);
        }
 }
 
@@ -180,12 +204,8 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data,
        p_data->arr[type].tc = tc;
 
        /* QM reconf data */
-       if (p_info->personality == personality) {
-               if (personality == QED_PCI_ETH)
-                       p_info->non_offload_tc = tc;
-               else
-                       p_info->offload_tc = tc;
-       }
+       if (p_info->personality == personality)
+               p_info->offload_tc = tc;
 }
 
 /* Update app protocol data and hw_info fields with the TLV info */
@@ -352,7 +372,9 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
        if (rc)
                return rc;
 
-       p_info->num_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS);
+       p_info->num_active_tc = QED_MFW_GET_FIELD(p_ets->flags,
+                                                 DCBX_ETS_MAX_TCS);
+       p_hwfn->qm_info.ooo_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_OOO_TC);
        data.pf_id = p_hwfn->rel_pf_id;
        data.dcbx_enabled = !!dcbx_version;
 
@@ -408,7 +430,6 @@ qed_dcbx_copy_mib(struct qed_hwfn *p_hwfn,
        return rc;
 }
 
-#ifdef CONFIG_DCB
 static void
 qed_dcbx_get_priority_info(struct qed_hwfn *p_hwfn,
                           struct qed_dcbx_app_prio *p_prio,
@@ -725,7 +746,6 @@ qed_dcbx_get_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
 
        return 0;
 }
-#endif
 
 static int
 qed_dcbx_read_local_lldp_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
@@ -840,6 +860,15 @@ static int qed_dcbx_read_mib(struct qed_hwfn *p_hwfn,
        return rc;
 }
 
+void qed_dcbx_aen(struct qed_hwfn *hwfn, u32 mib_type)
+{
+       struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
+       void *cookie = hwfn->cdev->ops_cookie;
+
+       if (cookie && op->dcbx_aen)
+               op->dcbx_aen(cookie, &hwfn->p_dcbx_info->get, mib_type);
+}
+
 /* Read updated MIB.
  * Reconfigure QM and invoke PF update ramrod command if operational MIB
  * change is detected.
@@ -866,6 +895,8 @@ qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn,
                        qed_sp_pf_update(p_hwfn);
                }
        }
+       qed_dcbx_get_params(p_hwfn, p_ptt, &p_hwfn->p_dcbx_info->get, type);
+       qed_dcbx_aen(p_hwfn, type);
 
        return rc;
 }
@@ -875,11 +906,8 @@ int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn)
        int rc = 0;
 
        p_hwfn->p_dcbx_info = kzalloc(sizeof(*p_hwfn->p_dcbx_info), GFP_KERNEL);
-       if (!p_hwfn->p_dcbx_info) {
-               DP_NOTICE(p_hwfn,
-                         "Failed to allocate 'struct qed_dcbx_info'\n");
+       if (!p_hwfn->p_dcbx_info)
                rc = -ENOMEM;
-       }
 
        return rc;
 }
@@ -1190,11 +1218,10 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
        }
 
        dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL);
-       if (!dcbx_info) {
-               DP_ERR(p_hwfn, "Failed to allocate struct qed_dcbx_info\n");
+       if (!dcbx_info)
                return -ENOMEM;
-       }
 
+       memset(dcbx_info, 0, sizeof(*dcbx_info));
        rc = qed_dcbx_query_params(p_hwfn, dcbx_info, QED_DCBX_OPERATIONAL_MIB);
        if (rc) {
                kfree(dcbx_info);
@@ -1227,11 +1254,10 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
        struct qed_dcbx_get *dcbx_info;
 
        dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL);
-       if (!dcbx_info) {
-               DP_ERR(hwfn->cdev, "Failed to allocate memory for dcbx_info\n");
+       if (!dcbx_info)
                return NULL;
-       }
 
+       memset(dcbx_info, 0, sizeof(*dcbx_info));
        if (qed_dcbx_query_params(hwfn, dcbx_info, type)) {
                kfree(dcbx_info);
                return NULL;
@@ -1982,6 +2008,7 @@ static int qed_dcbnl_get_ieee_pfc(struct qed_dev *cdev,
 
        if (!dcbx_info->operational.ieee) {
                DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+               kfree(dcbx_info);
                return -EINVAL;
        }
 
@@ -2150,17 +2177,19 @@ static int qed_dcbnl_ieee_setets(struct qed_dev *cdev, struct ieee_ets *ets)
        return rc;
 }
 
-int qed_dcbnl_ieee_peer_getets(struct qed_dev *cdev, struct ieee_ets *ets)
+static int
+qed_dcbnl_ieee_peer_getets(struct qed_dev *cdev, struct ieee_ets *ets)
 {
        return qed_dcbnl_get_ieee_ets(cdev, ets, true);
 }
 
-int qed_dcbnl_ieee_peer_getpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
+static int
+qed_dcbnl_ieee_peer_getpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
 {
        return qed_dcbnl_get_ieee_pfc(cdev, pfc, true);
 }
 
-int qed_dcbnl_ieee_getapp(struct qed_dev *cdev, struct dcb_app *app)
+static int qed_dcbnl_ieee_getapp(struct qed_dev *cdev, struct dcb_app *app)
 {
        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
        struct qed_dcbx_get *dcbx_info;
@@ -2204,7 +2233,7 @@ int qed_dcbnl_ieee_getapp(struct qed_dev *cdev, struct dcb_app *app)
        return 0;
 }
 
-int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app)
+static int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app)
 {
        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
        struct qed_dcbx_get *dcbx_info;
index 9ba681643d058a3ab8510f933d388b9190796eb6..2eb988fe1298dfd043fbd8c18d49187930a5c970 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef _QED_DCBX_H
@@ -33,7 +57,6 @@ struct qed_dcbx_app_data {
        u8 tc;                  /* Traffic Class */
 };
 
-#ifdef CONFIG_DCB
 #define QED_DCBX_VERSION_DISABLED       0
 #define QED_DCBX_VERSION_IEEE           1
 #define QED_DCBX_VERSION_CEE            2
@@ -49,7 +72,6 @@ struct qed_dcbx_set {
        struct qed_dcbx_admin_params config;
        u32 ver_num;
 };
-#endif
 
 struct qed_dcbx_results {
        bool dcbx_enabled;
@@ -63,9 +85,6 @@ struct qed_dcbx_app_metadata {
        enum qed_pci_personality personality;
 };
 
-#define QED_MFW_GET_FIELD(name, field) \
-       (((name) & (field ## _MASK)) >> (field ## _SHIFT))
-
 struct qed_dcbx_info {
        struct lldp_status_params_s lldp_remote[LLDP_MAX_LLDP_AGENTS];
        struct lldp_config_params_s lldp_local[LLDP_MAX_LLDP_AGENTS];
@@ -73,9 +92,8 @@ struct qed_dcbx_info {
        struct qed_dcbx_results results;
        struct dcbx_mib operational;
        struct dcbx_mib remote;
-#ifdef CONFIG_DCB
        struct qed_dcbx_set set;
-#endif
+       struct qed_dcbx_get get;
        u8 dcbx_cap;
 };
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
new file mode 100644 (file)
index 0000000..483241b
--- /dev/null
@@ -0,0 +1,7203 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/crc32.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+
+/* Chip IDs enum */
+enum chip_ids {
+       CHIP_BB_B0,
+       CHIP_K2,
+       MAX_CHIP_IDS
+};
+
+/* Memory groups enum */
+enum mem_groups {
+       MEM_GROUP_PXP_MEM,
+       MEM_GROUP_DMAE_MEM,
+       MEM_GROUP_CM_MEM,
+       MEM_GROUP_QM_MEM,
+       MEM_GROUP_TM_MEM,
+       MEM_GROUP_BRB_RAM,
+       MEM_GROUP_BRB_MEM,
+       MEM_GROUP_PRS_MEM,
+       MEM_GROUP_SDM_MEM,
+       MEM_GROUP_PBUF,
+       MEM_GROUP_IOR,
+       MEM_GROUP_RAM,
+       MEM_GROUP_BTB_RAM,
+       MEM_GROUP_RDIF_CTX,
+       MEM_GROUP_TDIF_CTX,
+       MEM_GROUP_CFC_MEM,
+       MEM_GROUP_CONN_CFC_MEM,
+       MEM_GROUP_TASK_CFC_MEM,
+       MEM_GROUP_CAU_PI,
+       MEM_GROUP_CAU_MEM,
+       MEM_GROUP_PXP_ILT,
+       MEM_GROUP_MULD_MEM,
+       MEM_GROUP_BTB_MEM,
+       MEM_GROUP_IGU_MEM,
+       MEM_GROUP_IGU_MSIX,
+       MEM_GROUP_CAU_SB,
+       MEM_GROUP_BMB_RAM,
+       MEM_GROUP_BMB_MEM,
+       MEM_GROUPS_NUM
+};
+
+/* Memory groups names */
+static const char * const s_mem_group_names[] = {
+       "PXP_MEM",
+       "DMAE_MEM",
+       "CM_MEM",
+       "QM_MEM",
+       "TM_MEM",
+       "BRB_RAM",
+       "BRB_MEM",
+       "PRS_MEM",
+       "SDM_MEM",
+       "PBUF",
+       "IOR",
+       "RAM",
+       "BTB_RAM",
+       "RDIF_CTX",
+       "TDIF_CTX",
+       "CFC_MEM",
+       "CONN_CFC_MEM",
+       "TASK_CFC_MEM",
+       "CAU_PI",
+       "CAU_MEM",
+       "PXP_ILT",
+       "MULD_MEM",
+       "BTB_MEM",
+       "IGU_MEM",
+       "IGU_MSIX",
+       "CAU_SB",
+       "BMB_RAM",
+       "BMB_MEM",
+};
+
+/* Idle check conditions */
+static u32 cond4(const u32 *r, const u32 *imm)
+{
+       return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]);
+}
+
+static u32 cond6(const u32 *r, const u32 *imm)
+{
+       return ((r[0] >> imm[0]) & imm[1]) != imm[2];
+}
+
+static u32 cond5(const u32 *r, const u32 *imm)
+{
+       return (r[0] & imm[0]) != imm[1];
+}
+
+static u32 cond8(const u32 *r, const u32 *imm)
+{
+       return ((r[0] & imm[0]) >> imm[1]) !=
+           (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
+}
+
+static u32 cond9(const u32 *r, const u32 *imm)
+{
+       return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
+}
+
+static u32 cond1(const u32 *r, const u32 *imm)
+{
+       return (r[0] & ~imm[0]) != imm[1];
+}
+
+static u32 cond0(const u32 *r, const u32 *imm)
+{
+       return r[0] != imm[0];
+}
+
+static u32 cond10(const u32 *r, const u32 *imm)
+{
+       return r[0] != r[1] && r[2] == imm[0];
+}
+
+static u32 cond11(const u32 *r, const u32 *imm)
+{
+       return r[0] != r[1] && r[2] > imm[0];
+}
+
+static u32 cond3(const u32 *r, const u32 *imm)
+{
+       return r[0] != r[1];
+}
+
+static u32 cond12(const u32 *r, const u32 *imm)
+{
+       return r[0] & imm[0];
+}
+
+static u32 cond7(const u32 *r, const u32 *imm)
+{
+       return r[0] < (r[1] - imm[0]);
+}
+
+static u32 cond2(const u32 *r, const u32 *imm)
+{
+       return r[0] > imm[0];
+}
+
+/* Array of Idle Check conditions */
+static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
+       cond0,
+       cond1,
+       cond2,
+       cond3,
+       cond4,
+       cond5,
+       cond6,
+       cond7,
+       cond8,
+       cond9,
+       cond10,
+       cond11,
+       cond12,
+};
+
+/******************************* Data Types **********************************/
+
+enum platform_ids {
+       PLATFORM_ASIC,
+       PLATFORM_RESERVED,
+       PLATFORM_RESERVED2,
+       PLATFORM_RESERVED3,
+       MAX_PLATFORM_IDS
+};
+
+struct dbg_array {
+       const u32 *ptr;
+       u32 size_in_dwords;
+};
+
+struct chip_platform_defs {
+       u8 num_ports;
+       u8 num_pfs;
+       u8 num_vfs;
+};
+
+/* Chip constant definitions */
+struct chip_defs {
+       const char *name;
+       struct chip_platform_defs per_platform[MAX_PLATFORM_IDS];
+};
+
+/* Platform constant definitions */
+struct platform_defs {
+       const char *name;
+       u32 delay_factor;
+};
+
+/* Storm constant definitions */
+struct storm_defs {
+       char letter;
+       enum block_id block_id;
+       enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
+       bool has_vfc;
+       u32 sem_fast_mem_addr;
+       u32 sem_frame_mode_addr;
+       u32 sem_slow_enable_addr;
+       u32 sem_slow_mode_addr;
+       u32 sem_slow_mode1_conf_addr;
+       u32 sem_sync_dbg_empty_addr;
+       u32 sem_slow_dbg_empty_addr;
+       u32 cm_ctx_wr_addr;
+       u32 cm_conn_ag_ctx_lid_size; /* In quad-regs */
+       u32 cm_conn_ag_ctx_rd_addr;
+       u32 cm_conn_st_ctx_lid_size; /* In quad-regs */
+       u32 cm_conn_st_ctx_rd_addr;
+       u32 cm_task_ag_ctx_lid_size; /* In quad-regs */
+       u32 cm_task_ag_ctx_rd_addr;
+       u32 cm_task_st_ctx_lid_size; /* In quad-regs */
+       u32 cm_task_st_ctx_rd_addr;
+};
+
+/* Block constant definitions */
+struct block_defs {
+       const char *name;
+       bool has_dbg_bus[MAX_CHIP_IDS];
+       bool associated_to_storm;
+       u32 storm_id; /* Valid only if associated_to_storm is true */
+       enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
+       u32 dbg_select_addr;
+       u32 dbg_cycle_enable_addr;
+       u32 dbg_shift_addr;
+       u32 dbg_force_valid_addr;
+       u32 dbg_force_frame_addr;
+       bool has_reset_bit;
+       bool unreset; /* If true, the block is taken out of reset before dump */
+       enum dbg_reset_regs reset_reg;
+       u8 reset_bit_offset; /* Bit offset in reset register */
+};
+
+/* Reset register definitions */
+struct reset_reg_defs {
+       u32 addr;
+       u32 unreset_val;
+       bool exists[MAX_CHIP_IDS];
+};
+
+struct grc_param_defs {
+       u32 default_val[MAX_CHIP_IDS];
+       u32 min;
+       u32 max;
+       bool is_preset;
+       u32 exclude_all_preset_val;
+       u32 crash_preset_val;
+};
+
+struct rss_mem_defs {
+       const char *mem_name;
+       const char *type_name;
+       u32 addr; /* In 128b units */
+       u32 num_entries[MAX_CHIP_IDS];
+       u32 entry_width[MAX_CHIP_IDS]; /* In bits */
+};
+
+struct vfc_ram_defs {
+       const char *mem_name;
+       const char *type_name;
+       u32 base_row;
+       u32 num_rows;
+};
+
+struct big_ram_defs {
+       const char *instance_name;
+       enum mem_groups mem_group_id;
+       enum mem_groups ram_mem_group_id;
+       enum dbg_grc_params grc_param;
+       u32 addr_reg_addr;
+       u32 data_reg_addr;
+       u32 num_of_blocks[MAX_CHIP_IDS];
+};
+
+struct phy_defs {
+       const char *phy_name;
+       u32 base_addr;
+       u32 tbus_addr_lo_addr;
+       u32 tbus_addr_hi_addr;
+       u32 tbus_data_lo_addr;
+       u32 tbus_data_hi_addr;
+};
+
+/******************************** Constants **********************************/
+
+#define MAX_LCIDS                      320
+#define MAX_LTIDS                      320
+#define NUM_IOR_SETS                   2
+#define IORS_PER_SET                   176
+#define IOR_SET_OFFSET(set_id)         ((set_id) * 256)
+#define BYTES_IN_DWORD                 sizeof(u32)
+
+/* In the macros below, size and offset are specified in bits */
+#define CEIL_DWORDS(size)              DIV_ROUND_UP(size, 32)
+#define FIELD_BIT_OFFSET(type, field)  type ## _ ## field ## _ ## OFFSET
+#define FIELD_BIT_SIZE(type, field)    type ## _ ## field ## _ ## SIZE
+#define FIELD_DWORD_OFFSET(type, field) \
+        (int)(FIELD_BIT_OFFSET(type, field) / 32)
+#define FIELD_DWORD_SHIFT(type, field) (FIELD_BIT_OFFSET(type, field) % 32)
+#define FIELD_BIT_MASK(type, field) \
+       (((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
+        FIELD_DWORD_SHIFT(type, field))
+#define SET_VAR_FIELD(var, type, field, val) \
+       do { \
+               var[FIELD_DWORD_OFFSET(type, field)] &= \
+               (~FIELD_BIT_MASK(type, field)); \
+               var[FIELD_DWORD_OFFSET(type, field)] |= \
+               (val) << FIELD_DWORD_SHIFT(type, field); \
+       } while (0)
+#define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
+       do { \
+               for (i = 0; i < (arr_size); i++) \
+                       qed_wr(dev, ptt, addr,  (arr)[i]); \
+       } while (0)
+#define ARR_REG_RD(dev, ptt, addr, arr, arr_size) \
+       do { \
+               for (i = 0; i < (arr_size); i++) \
+                       (arr)[i] = qed_rd(dev, ptt, addr); \
+       } while (0)
+
+#define DWORDS_TO_BYTES(dwords)                ((dwords) * BYTES_IN_DWORD)
+#define BYTES_TO_DWORDS(bytes)         ((bytes) / BYTES_IN_DWORD)
+#define RAM_LINES_TO_DWORDS(lines)     ((lines) * 2)
+#define RAM_LINES_TO_BYTES(lines) \
+       DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
+#define REG_DUMP_LEN_SHIFT             24
+#define MEM_DUMP_ENTRY_SIZE_DWORDS \
+       BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
+#define IDLE_CHK_RULE_SIZE_DWORDS \
+       BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
+#define IDLE_CHK_RESULT_HDR_DWORDS \
+       BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
+#define IDLE_CHK_RESULT_REG_HDR_DWORDS \
+       BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
+#define IDLE_CHK_MAX_ENTRIES_SIZE      32
+
+/* The sizes and offsets below are specified in bits */
+#define VFC_CAM_CMD_STRUCT_SIZE                64
+#define VFC_CAM_CMD_ROW_OFFSET         48
+#define VFC_CAM_CMD_ROW_SIZE           9
+#define VFC_CAM_ADDR_STRUCT_SIZE       16
+#define VFC_CAM_ADDR_OP_OFFSET         0
+#define VFC_CAM_ADDR_OP_SIZE           4
+#define VFC_CAM_RESP_STRUCT_SIZE       256
+#define VFC_RAM_ADDR_STRUCT_SIZE       16
+#define VFC_RAM_ADDR_OP_OFFSET         0
+#define VFC_RAM_ADDR_OP_SIZE           2
+#define VFC_RAM_ADDR_ROW_OFFSET                2
+#define VFC_RAM_ADDR_ROW_SIZE          10
+#define VFC_RAM_RESP_STRUCT_SIZE       256
+#define VFC_CAM_CMD_DWORDS             CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
+#define VFC_CAM_ADDR_DWORDS            CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
+#define VFC_CAM_RESP_DWORDS            CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
+#define VFC_RAM_CMD_DWORDS             VFC_CAM_CMD_DWORDS
+#define VFC_RAM_ADDR_DWORDS            CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
+#define VFC_RAM_RESP_DWORDS            CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
+#define NUM_VFC_RAM_TYPES              4
+#define VFC_CAM_NUM_ROWS               512
+#define VFC_OPCODE_CAM_RD              14
+#define VFC_OPCODE_RAM_RD              0
+#define NUM_RSS_MEM_TYPES              5
+#define NUM_BIG_RAM_TYPES              3
+#define BIG_RAM_BLOCK_SIZE_BYTES       128
+#define BIG_RAM_BLOCK_SIZE_DWORDS \
+       BYTES_TO_DWORDS(BIG_RAM_BLOCK_SIZE_BYTES)
+#define NUM_PHY_TBUS_ADDRESSES         2048
+#define PHY_DUMP_SIZE_DWORDS           (NUM_PHY_TBUS_ADDRESSES / 2)
+#define RESET_REG_UNRESET_OFFSET       4
+#define STALL_DELAY_MS                 500
+#define STATIC_DEBUG_LINE_DWORDS       9
+#define NUM_DBG_BUS_LINES              256
+#define NUM_COMMON_GLOBAL_PARAMS       8
+#define FW_IMG_MAIN                    1
+#define REG_FIFO_DEPTH_ELEMENTS                32
+#define REG_FIFO_ELEMENT_DWORDS                2
+#define REG_FIFO_DEPTH_DWORDS \
+       (REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
+#define IGU_FIFO_DEPTH_ELEMENTS                64
+#define IGU_FIFO_ELEMENT_DWORDS                4
+#define IGU_FIFO_DEPTH_DWORDS \
+       (IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
+#define PROTECTION_OVERRIDE_DEPTH_ELEMENTS     20
+#define PROTECTION_OVERRIDE_ELEMENT_DWORDS     2
+#define PROTECTION_OVERRIDE_DEPTH_DWORDS \
+       (PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
+        PROTECTION_OVERRIDE_ELEMENT_DWORDS)
+#define MCP_SPAD_TRACE_OFFSIZE_ADDR \
+       (MCP_REG_SCRATCH + \
+        offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
+#define MCP_TRACE_META_IMAGE_SIGNATURE  0x669955aa
+#define EMPTY_FW_VERSION_STR           "???_???_???_???"
+#define EMPTY_FW_IMAGE_STR             "???????????????"
+
+/***************************** Constant Arrays *******************************/
+
+/* Debug arrays */
+static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {0} };
+
+/* Chip constant definitions array */
+static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
+       { "bb_b0",
+         { {MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB}, {0, 0, 0},
+           {0, 0, 0}, {0, 0, 0} } },
+       { "k2",
+         { {MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2}, {0, 0, 0},
+           {0, 0, 0}, {0, 0, 0} } }
+};
+
+/* Storm constant definitions array */
+static struct storm_defs s_storm_defs[] = {
+       /* Tstorm */
+       {'T', BLOCK_TSEM,
+        {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, true,
+        TSEM_REG_FAST_MEMORY,
+        TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE,
+        TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG,
+        TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY,
+        TCM_REG_CTX_RBC_ACCS,
+        4, TCM_REG_AGG_CON_CTX,
+        16, TCM_REG_SM_CON_CTX,
+        2, TCM_REG_AGG_TASK_CTX,
+        4, TCM_REG_SM_TASK_CTX},
+       /* Mstorm */
+       {'M', BLOCK_MSEM,
+        {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, false,
+        MSEM_REG_FAST_MEMORY,
+        MSEM_REG_DBG_FRAME_MODE, MSEM_REG_SLOW_DBG_ACTIVE,
+        MSEM_REG_SLOW_DBG_MODE, MSEM_REG_DBG_MODE1_CFG,
+        MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY,
+        MCM_REG_CTX_RBC_ACCS,
+        1, MCM_REG_AGG_CON_CTX,
+        10, MCM_REG_SM_CON_CTX,
+        2, MCM_REG_AGG_TASK_CTX,
+        7, MCM_REG_SM_TASK_CTX},
+       /* Ustorm */
+       {'U', BLOCK_USEM,
+        {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, false,
+        USEM_REG_FAST_MEMORY,
+        USEM_REG_DBG_FRAME_MODE, USEM_REG_SLOW_DBG_ACTIVE,
+        USEM_REG_SLOW_DBG_MODE, USEM_REG_DBG_MODE1_CFG,
+        USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY,
+        UCM_REG_CTX_RBC_ACCS,
+        2, UCM_REG_AGG_CON_CTX,
+        13, UCM_REG_SM_CON_CTX,
+        3, UCM_REG_AGG_TASK_CTX,
+        3, UCM_REG_SM_TASK_CTX},
+       /* Xstorm */
+       {'X', BLOCK_XSEM,
+        {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, false,
+        XSEM_REG_FAST_MEMORY,
+        XSEM_REG_DBG_FRAME_MODE, XSEM_REG_SLOW_DBG_ACTIVE,
+        XSEM_REG_SLOW_DBG_MODE, XSEM_REG_DBG_MODE1_CFG,
+        XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY,
+        XCM_REG_CTX_RBC_ACCS,
+        9, XCM_REG_AGG_CON_CTX,
+        15, XCM_REG_SM_CON_CTX,
+        0, 0,
+        0, 0},
+       /* Ystorm */
+       {'Y', BLOCK_YSEM,
+        {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, false,
+        YSEM_REG_FAST_MEMORY,
+        YSEM_REG_DBG_FRAME_MODE, YSEM_REG_SLOW_DBG_ACTIVE,
+        YSEM_REG_SLOW_DBG_MODE, YSEM_REG_DBG_MODE1_CFG,
+        YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY,
+        YCM_REG_CTX_RBC_ACCS,
+        2, YCM_REG_AGG_CON_CTX,
+        3, YCM_REG_SM_CON_CTX,
+        2, YCM_REG_AGG_TASK_CTX,
+        12, YCM_REG_SM_TASK_CTX},
+       /* Pstorm */
+       {'P', BLOCK_PSEM,
+        {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, true,
+        PSEM_REG_FAST_MEMORY,
+        PSEM_REG_DBG_FRAME_MODE, PSEM_REG_SLOW_DBG_ACTIVE,
+        PSEM_REG_SLOW_DBG_MODE, PSEM_REG_DBG_MODE1_CFG,
+        PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY,
+        PCM_REG_CTX_RBC_ACCS,
+        0, 0,
+        10, PCM_REG_SM_CON_CTX,
+        0, 0,
+        0, 0}
+};
+
+/* Block definitions array */
+static struct block_defs block_grc_defs = {
+       "grc",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
+       GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
+       GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
+       GRC_REG_DBG_FORCE_FRAME,
+       true, false, DBG_RESET_REG_MISC_PL_UA, 1
+};
+
+static struct block_defs block_miscs_defs = {
+       "miscs", {false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       0, 0, 0, 0, 0,
+       false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_misc_defs = {
+       "misc", {false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       0, 0, 0, 0, 0,
+       false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_dbu_defs = {
+       "dbu", {false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       0, 0, 0, 0, 0,
+       false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_pglue_b_defs = {
+       "pglue_b",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
+       PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
+       PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
+       PGLUE_B_REG_DBG_FORCE_FRAME,
+       true, false, DBG_RESET_REG_MISCS_PL_HV, 1
+};
+
+static struct block_defs block_cnig_defs = {
+       "cnig",
+       {false, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
+       CNIG_REG_DBG_SELECT_K2, CNIG_REG_DBG_DWORD_ENABLE_K2,
+       CNIG_REG_DBG_SHIFT_K2, CNIG_REG_DBG_FORCE_VALID_K2,
+       CNIG_REG_DBG_FORCE_FRAME_K2,
+       true, false, DBG_RESET_REG_MISCS_PL_HV, 0
+};
+
+static struct block_defs block_cpmu_defs = {
+       "cpmu", {false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       0, 0, 0, 0, 0,
+       true, false, DBG_RESET_REG_MISCS_PL_HV, 8
+};
+
+static struct block_defs block_ncsi_defs = {
+       "ncsi",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+       NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
+       NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
+       NCSI_REG_DBG_FORCE_FRAME,
+       true, false, DBG_RESET_REG_MISCS_PL_HV, 5
+};
+
+static struct block_defs block_opte_defs = {
+       "opte", {false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       0, 0, 0, 0, 0,
+       true, false, DBG_RESET_REG_MISCS_PL_HV, 4
+};
+
+static struct block_defs block_bmb_defs = {
+       "bmb",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB},
+       BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
+       BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
+       BMB_REG_DBG_FORCE_FRAME,
+       true, false, DBG_RESET_REG_MISCS_PL_UA, 7
+};
+
+static struct block_defs block_pcie_defs = {
+       "pcie",
+       {false, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+       PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE,
+       PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID,
+       PCIE_REG_DBG_COMMON_FORCE_FRAME,
+       false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_mcp_defs = {
+       "mcp", {false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       0, 0, 0, 0, 0,
+       false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_mcp2_defs = {
+       "mcp2",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+       MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
+       MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
+       MCP2_REG_DBG_FORCE_FRAME,
+       false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_pswhst_defs = {
+       "pswhst",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
+       PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
+       PSWHST_REG_DBG_FORCE_FRAME,
+       true, false, DBG_RESET_REG_MISC_PL_HV, 0
+};
+
+static struct block_defs block_pswhst2_defs = {
+       "pswhst2",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
+       PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
+       PSWHST2_REG_DBG_FORCE_FRAME,
+       true, false, DBG_RESET_REG_MISC_PL_HV, 0
+};
+
+static struct block_defs block_pswrd_defs = {
+       "pswrd",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
+       PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
+       PSWRD_REG_DBG_FORCE_FRAME,
+       true, false, DBG_RESET_REG_MISC_PL_HV, 2
+};
+
+static struct block_defs block_pswrd2_defs = {
+       "pswrd2",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
+       PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
+       PSWRD2_REG_DBG_FORCE_FRAME,
+       true, false, DBG_RESET_REG_MISC_PL_HV, 2
+};
+
+static struct block_defs block_pswwr_defs = {
+       "pswwr",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
+       PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
+       PSWWR_REG_DBG_FORCE_FRAME,
+       true, false, DBG_RESET_REG_MISC_PL_HV, 3
+};
+
+static struct block_defs block_pswwr2_defs = {
+       "pswwr2", {false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       0, 0, 0, 0, 0,
+       true, false, DBG_RESET_REG_MISC_PL_HV, 3
+};
+
+static struct block_defs block_pswrq_defs = {
+       "pswrq",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
+       PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
+       PSWRQ_REG_DBG_FORCE_FRAME,
+       true, false, DBG_RESET_REG_MISC_PL_HV, 1
+};
+
+static struct block_defs block_pswrq2_defs = {
+       "pswrq2",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
+       PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
+       PSWRQ2_REG_DBG_FORCE_FRAME,
+       true, false, DBG_RESET_REG_MISC_PL_HV, 1
+};
+
+static struct block_defs block_pglcs_defs = {
+       "pglcs",
+       {false, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+       PGLCS_REG_DBG_SELECT, PGLCS_REG_DBG_DWORD_ENABLE,
+       PGLCS_REG_DBG_SHIFT, PGLCS_REG_DBG_FORCE_VALID,
+       PGLCS_REG_DBG_FORCE_FRAME,
+       true, false, DBG_RESET_REG_MISCS_PL_HV, 2
+};
+
+static struct block_defs block_ptu_defs = {
+       "ptu",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
+       PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
+       PTU_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 20
+};
+
+static struct block_defs block_dmae_defs = {
+       "dmae",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
+       DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
+       DMAE_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 28
+};
+
+static struct block_defs block_tcm_defs = {
+       "tcm",
+       {true, true}, true, DBG_TSTORM_ID,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+       TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
+       TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
+       TCM_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 5
+};
+
+static struct block_defs block_mcm_defs = {
+       "mcm",
+       {true, true}, true, DBG_MSTORM_ID,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+       MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
+       MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
+       MCM_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 3
+};
+
+static struct block_defs block_ucm_defs = {
+       "ucm",
+       {true, true}, true, DBG_USTORM_ID,
+       {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+       UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
+       UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
+       UCM_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 8
+};
+
+static struct block_defs block_xcm_defs = {
+       "xcm",
+       {true, true}, true, DBG_XSTORM_ID,
+       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+       XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
+       XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
+       XCM_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 19
+};
+
+static struct block_defs block_ycm_defs = {
+       "ycm",
+       {true, true}, true, DBG_YSTORM_ID,
+       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+       YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
+       YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
+       YCM_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 5
+};
+
+static struct block_defs block_pcm_defs = {
+       "pcm",
+       {true, true}, true, DBG_PSTORM_ID,
+       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+       PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
+       PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
+       PCM_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 4
+};
+
+static struct block_defs block_qm_defs = {
+       "qm",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ},
+       QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
+       QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
+       QM_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 16
+};
+
+static struct block_defs block_tm_defs = {
+       "tm",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+       TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
+       TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
+       TM_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 17
+};
+
+static struct block_defs block_dorq_defs = {
+       "dorq",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+       DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
+       DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
+       DORQ_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 18
+};
+
+static struct block_defs block_brb_defs = {
+       "brb",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
+       BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
+       BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
+       BRB_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 0
+};
+
+static struct block_defs block_src_defs = {
+       "src",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+       SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
+       SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
+       SRC_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 2
+};
+
+static struct block_defs block_prs_defs = {
+       "prs",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
+       PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
+       PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
+       PRS_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 1
+};
+
+static struct block_defs block_tsdm_defs = {
+       "tsdm",
+       {true, true}, true, DBG_TSTORM_ID,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+       TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
+       TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
+       TSDM_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 3
+};
+
+static struct block_defs block_msdm_defs = {
+       "msdm",
+       {true, true}, true, DBG_MSTORM_ID,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+       MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
+       MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
+       MSDM_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 6
+};
+
+static struct block_defs block_usdm_defs = {
+       "usdm",
+       {true, true}, true, DBG_USTORM_ID,
+       {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+       USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
+       USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
+       USDM_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 7
+};
+
+static struct block_defs block_xsdm_defs = {
+       "xsdm",
+       {true, true}, true, DBG_XSTORM_ID,
+       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+       XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
+       XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
+       XSDM_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 20
+};
+
+static struct block_defs block_ysdm_defs = {
+       "ysdm",
+       {true, true}, true, DBG_YSTORM_ID,
+       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+       YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
+       YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
+       YSDM_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 8
+};
+
+static struct block_defs block_psdm_defs = {
+       "psdm",
+       {true, true}, true, DBG_PSTORM_ID,
+       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+       PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
+       PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
+       PSDM_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 7
+};
+
+static struct block_defs block_tsem_defs = {
+       "tsem",
+       {true, true}, true, DBG_TSTORM_ID,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+       TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
+       TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
+       TSEM_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 4
+};
+
+static struct block_defs block_msem_defs = {
+       "msem",
+       {true, true}, true, DBG_MSTORM_ID,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+       MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
+       MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
+       MSEM_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 9
+};
+
+static struct block_defs block_usem_defs = {
+       "usem",
+       {true, true}, true, DBG_USTORM_ID,
+       {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+       USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
+       USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
+       USEM_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 9
+};
+
+static struct block_defs block_xsem_defs = {
+       "xsem",
+       {true, true}, true, DBG_XSTORM_ID,
+       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+       XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
+       XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
+       XSEM_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 21
+};
+
+static struct block_defs block_ysem_defs = {
+       "ysem",
+       {true, true}, true, DBG_YSTORM_ID,
+       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+       YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
+       YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
+       YSEM_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 11
+};
+
+static struct block_defs block_psem_defs = {
+       "psem",
+       {true, true}, true, DBG_PSTORM_ID,
+       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+       PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
+       PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
+       PSEM_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 10
+};
+
+static struct block_defs block_rss_defs = {
+       "rss",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+       RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
+       RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
+       RSS_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 18
+};
+
+static struct block_defs block_tmld_defs = {
+       "tmld",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+       TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
+       TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
+       TMLD_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 13
+};
+
+static struct block_defs block_muld_defs = {
+       "muld",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+       MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
+       MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
+       MULD_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 14
+};
+
+static struct block_defs block_yuld_defs = {
+       "yuld",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+       YULD_REG_DBG_SELECT, YULD_REG_DBG_DWORD_ENABLE,
+       YULD_REG_DBG_SHIFT, YULD_REG_DBG_FORCE_VALID,
+       YULD_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 15
+};
+
+static struct block_defs block_xyld_defs = {
+       "xyld",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+       XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
+       XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
+       XYLD_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12
+};
+
+static struct block_defs block_prm_defs = {
+       "prm",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+       PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
+       PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
+       PRM_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 21
+};
+
+static struct block_defs block_pbf_pb1_defs = {
+       "pbf_pb1",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+       PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
+       PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
+       PBF_PB1_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
+       11
+};
+
+static struct block_defs block_pbf_pb2_defs = {
+       "pbf_pb2",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+       PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
+       PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
+       PBF_PB2_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
+       12
+};
+
+static struct block_defs block_rpb_defs = {
+       "rpb",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+       RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
+       RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
+       RPB_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 13
+};
+
+static struct block_defs block_btb_defs = {
+       "btb",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV},
+       BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
+       BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
+       BTB_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 10
+};
+
+static struct block_defs block_pbf_defs = {
+       "pbf",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+       PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
+       PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
+       PBF_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 15
+};
+
+static struct block_defs block_rdif_defs = {
+       "rdif",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+       RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
+       RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
+       RDIF_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 16
+};
+
+static struct block_defs block_tdif_defs = {
+       "tdif",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+       TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
+       TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
+       TDIF_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 17
+};
+
+static struct block_defs block_cdu_defs = {
+       "cdu",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+       CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
+       CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
+       CDU_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 23
+};
+
+static struct block_defs block_ccfc_defs = {
+       "ccfc",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+       CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
+       CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
+       CCFC_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 24
+};
+
+static struct block_defs block_tcfc_defs = {
+       "tcfc",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+       TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
+       TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
+       TCFC_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 25
+};
+
+static struct block_defs block_igu_defs = {
+       "igu",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
+       IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
+       IGU_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 27
+};
+
+static struct block_defs block_cau_defs = {
+       "cau",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
+       CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
+       CAU_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19
+};
+
+static struct block_defs block_umac_defs = {
+       "umac",
+       {false, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
+       UMAC_REG_DBG_SELECT, UMAC_REG_DBG_DWORD_ENABLE,
+       UMAC_REG_DBG_SHIFT, UMAC_REG_DBG_FORCE_VALID,
+       UMAC_REG_DBG_FORCE_FRAME,
+       true, false, DBG_RESET_REG_MISCS_PL_HV, 6
+};
+
+static struct block_defs block_xmac_defs = {
+       "xmac", {false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       0, 0, 0, 0, 0,
+       false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_dbg_defs = {
+       "dbg", {false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       0, 0, 0, 0, 0,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3
+};
+
+static struct block_defs block_nig_defs = {
+       "nig",
+       {true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
+       NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
+       NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
+       NIG_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 0
+};
+
+static struct block_defs block_wol_defs = {
+       "wol",
+       {false, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
+       WOL_REG_DBG_SELECT, WOL_REG_DBG_DWORD_ENABLE,
+       WOL_REG_DBG_SHIFT, WOL_REG_DBG_FORCE_VALID,
+       WOL_REG_DBG_FORCE_FRAME,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7
+};
+
+static struct block_defs block_bmbn_defs = {
+       "bmbn",
+       {false, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB},
+       BMBN_REG_DBG_SELECT, BMBN_REG_DBG_DWORD_ENABLE,
+       BMBN_REG_DBG_SHIFT, BMBN_REG_DBG_FORCE_VALID,
+       BMBN_REG_DBG_FORCE_FRAME,
+       false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_ipc_defs = {
+       "ipc", {false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       0, 0, 0, 0, 0,
+       true, false, DBG_RESET_REG_MISCS_PL_UA, 8
+};
+
+static struct block_defs block_nwm_defs = {
+       "nwm",
+       {false, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
+       NWM_REG_DBG_SELECT, NWM_REG_DBG_DWORD_ENABLE,
+       NWM_REG_DBG_SHIFT, NWM_REG_DBG_FORCE_VALID,
+       NWM_REG_DBG_FORCE_FRAME,
+       true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0
+};
+
+static struct block_defs block_nws_defs = {
+       "nws",
+       {false, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
+       NWS_REG_DBG_SELECT, NWS_REG_DBG_DWORD_ENABLE,
+       NWS_REG_DBG_SHIFT, NWS_REG_DBG_FORCE_VALID,
+       NWS_REG_DBG_FORCE_FRAME,
+       true, false, DBG_RESET_REG_MISCS_PL_HV, 12
+};
+
+static struct block_defs block_ms_defs = {
+       "ms",
+       {false, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
+       MS_REG_DBG_SELECT, MS_REG_DBG_DWORD_ENABLE,
+       MS_REG_DBG_SHIFT, MS_REG_DBG_FORCE_VALID,
+       MS_REG_DBG_FORCE_FRAME,
+       true, false, DBG_RESET_REG_MISCS_PL_HV, 13
+};
+
+static struct block_defs block_phy_pcie_defs = {
+       "phy_pcie",
+       {false, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+       PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE,
+       PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID,
+       PCIE_REG_DBG_COMMON_FORCE_FRAME,
+       false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_led_defs = {
+       "led", {false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       0, 0, 0, 0, 0,
+       true, false, DBG_RESET_REG_MISCS_PL_HV, 14
+};
+
+static struct block_defs block_avs_wrap_defs = {
+       "avs_wrap", {false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       0, 0, 0, 0, 0,
+       true, false, DBG_RESET_REG_MISCS_PL_UA, 11
+};
+
+static struct block_defs block_rgfs_defs = {
+       "rgfs", {false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       0, 0, 0, 0, 0,
+       false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_tgfs_defs = {
+       "tgfs", {false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       0, 0, 0, 0, 0,
+       false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_ptld_defs = {
+       "ptld", {false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       0, 0, 0, 0, 0,
+       false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_ypld_defs = {
+       "ypld", {false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       0, 0, 0, 0, 0,
+       false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_misc_aeu_defs = {
+       "misc_aeu", {false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       0, 0, 0, 0, 0,
+       false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_bar0_map_defs = {
+       "bar0_map", {false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       0, 0, 0, 0, 0,
+       false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
+       &block_grc_defs,
+       &block_miscs_defs,
+       &block_misc_defs,
+       &block_dbu_defs,
+       &block_pglue_b_defs,
+       &block_cnig_defs,
+       &block_cpmu_defs,
+       &block_ncsi_defs,
+       &block_opte_defs,
+       &block_bmb_defs,
+       &block_pcie_defs,
+       &block_mcp_defs,
+       &block_mcp2_defs,
+       &block_pswhst_defs,
+       &block_pswhst2_defs,
+       &block_pswrd_defs,
+       &block_pswrd2_defs,
+       &block_pswwr_defs,
+       &block_pswwr2_defs,
+       &block_pswrq_defs,
+       &block_pswrq2_defs,
+       &block_pglcs_defs,
+       &block_dmae_defs,
+       &block_ptu_defs,
+       &block_tcm_defs,
+       &block_mcm_defs,
+       &block_ucm_defs,
+       &block_xcm_defs,
+       &block_ycm_defs,
+       &block_pcm_defs,
+       &block_qm_defs,
+       &block_tm_defs,
+       &block_dorq_defs,
+       &block_brb_defs,
+       &block_src_defs,
+       &block_prs_defs,
+       &block_tsdm_defs,
+       &block_msdm_defs,
+       &block_usdm_defs,
+       &block_xsdm_defs,
+       &block_ysdm_defs,
+       &block_psdm_defs,
+       &block_tsem_defs,
+       &block_msem_defs,
+       &block_usem_defs,
+       &block_xsem_defs,
+       &block_ysem_defs,
+       &block_psem_defs,
+       &block_rss_defs,
+       &block_tmld_defs,
+       &block_muld_defs,
+       &block_yuld_defs,
+       &block_xyld_defs,
+       &block_prm_defs,
+       &block_pbf_pb1_defs,
+       &block_pbf_pb2_defs,
+       &block_rpb_defs,
+       &block_btb_defs,
+       &block_pbf_defs,
+       &block_rdif_defs,
+       &block_tdif_defs,
+       &block_cdu_defs,
+       &block_ccfc_defs,
+       &block_tcfc_defs,
+       &block_igu_defs,
+       &block_cau_defs,
+       &block_umac_defs,
+       &block_xmac_defs,
+       &block_dbg_defs,
+       &block_nig_defs,
+       &block_wol_defs,
+       &block_bmbn_defs,
+       &block_ipc_defs,
+       &block_nwm_defs,
+       &block_nws_defs,
+       &block_ms_defs,
+       &block_phy_pcie_defs,
+       &block_led_defs,
+       &block_avs_wrap_defs,
+       &block_rgfs_defs,
+       &block_tgfs_defs,
+       &block_ptld_defs,
+       &block_ypld_defs,
+       &block_misc_aeu_defs,
+       &block_bar0_map_defs,
+};
+
+static struct platform_defs s_platform_defs[] = {
+       {"asic", 1},
+       {"reserved", 0},
+       {"reserved2", 0},
+       {"reserved3", 0}
+};
+
+static struct grc_param_defs s_grc_param_defs[] = {
+       {{1, 1}, 0, 1, false, 1, 1},    /* DBG_GRC_PARAM_DUMP_TSTORM */
+       {{1, 1}, 0, 1, false, 1, 1},    /* DBG_GRC_PARAM_DUMP_MSTORM */
+       {{1, 1}, 0, 1, false, 1, 1},    /* DBG_GRC_PARAM_DUMP_USTORM */
+       {{1, 1}, 0, 1, false, 1, 1},    /* DBG_GRC_PARAM_DUMP_XSTORM */
+       {{1, 1}, 0, 1, false, 1, 1},    /* DBG_GRC_PARAM_DUMP_YSTORM */
+       {{1, 1}, 0, 1, false, 1, 1},    /* DBG_GRC_PARAM_DUMP_PSTORM */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_REGS */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_RAM */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_PBUF */
+       {{0, 0}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_IOR */
+       {{0, 0}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_VFC */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_CM_CTX */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_ILT */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_RSS */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_CAU */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_QM */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_MCP */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_RESERVED */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_CFC */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_IGU */
+       {{0, 0}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_BRB */
+       {{0, 0}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_BTB */
+       {{0, 0}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_BMB */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_NIG */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_MULD */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_PRS */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_DMAE */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_TM */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_SDM */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_DIF */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_STATIC */
+       {{0, 0}, 0, 1, false, 0, 0},    /* DBG_GRC_PARAM_UNSTALL */
+       {{MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
+        MAX_LCIDS},                    /* DBG_GRC_PARAM_NUM_LCIDS */
+       {{MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
+        MAX_LTIDS},                    /* DBG_GRC_PARAM_NUM_LTIDS */
+       {{0, 0}, 0, 1, true, 0, 0},     /* DBG_GRC_PARAM_EXCLUDE_ALL */
+       {{0, 0}, 0, 1, true, 0, 0},     /* DBG_GRC_PARAM_CRASH */
+       {{0, 0}, 0, 1, false, 1, 0},    /* DBG_GRC_PARAM_PARITY_SAFE */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_CM */
+       {{1, 1}, 0, 1, false, 0, 1},    /* DBG_GRC_PARAM_DUMP_PHY */
+       {{0, 0}, 0, 1, false, 0, 0},    /* DBG_GRC_PARAM_NO_MCP */
+       {{0, 0}, 0, 1, false, 0, 0}     /* DBG_GRC_PARAM_NO_FW_VER */
+};
+
+static struct rss_mem_defs s_rss_mem_defs[] = {
+       { "rss_mem_cid", "rss_cid", 0,
+         {256, 320},
+         {32, 32} },
+       { "rss_mem_key_msb", "rss_key", 1024,
+         {128, 208},
+         {256, 256} },
+       { "rss_mem_key_lsb", "rss_key", 2048,
+         {128, 208},
+         {64, 64} },
+       { "rss_mem_info", "rss_info", 3072,
+         {128, 208},
+         {16, 16} },
+       { "rss_mem_ind", "rss_ind", 4096,
+         {(128 * 128), (128 * 208)},
+         {16, 16} }
+};
+
+static struct vfc_ram_defs s_vfc_ram_defs[] = {
+       {"vfc_ram_tt1", "vfc_ram", 0, 512},
+       {"vfc_ram_mtt2", "vfc_ram", 512, 128},
+       {"vfc_ram_stt2", "vfc_ram", 640, 32},
+       {"vfc_ram_ro_vect", "vfc_ram", 672, 32}
+};
+
+static struct big_ram_defs s_big_ram_defs[] = {
+       { "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
+         BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
+         {4800, 5632} },
+       { "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
+         BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
+         {2880, 3680} },
+       { "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
+         BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
+         {1152, 1152} }
+};
+
+static struct reset_reg_defs s_reset_regs_defs[] = {
+       { MISCS_REG_RESET_PL_UA, 0x0,
+         {true, true} },               /* DBG_RESET_REG_MISCS_PL_UA */
+       { MISCS_REG_RESET_PL_HV, 0x0,
+         {true, true} },               /* DBG_RESET_REG_MISCS_PL_HV */
+       { MISCS_REG_RESET_PL_HV_2, 0x0,
+         {false, true} },      /* DBG_RESET_REG_MISCS_PL_HV_2 */
+       { MISC_REG_RESET_PL_UA, 0x0,
+         {true, true} },               /* DBG_RESET_REG_MISC_PL_UA */
+       { MISC_REG_RESET_PL_HV, 0x0,
+         {true, true} },               /* DBG_RESET_REG_MISC_PL_HV */
+       { MISC_REG_RESET_PL_PDA_VMAIN_1, 0x4404040,
+         {true, true} },               /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
+       { MISC_REG_RESET_PL_PDA_VMAIN_2, 0x7c00007,
+         {true, true} },               /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
+       { MISC_REG_RESET_PL_PDA_VAUX, 0x2,
+         {true, true} },               /* DBG_RESET_REG_MISC_PL_PDA_VAUX */
+};
+
+static struct phy_defs s_phy_defs[] = {
+       {"nw_phy", NWS_REG_NWS_CMU, PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0,
+        PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8,
+        PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0,
+        PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8},
+       {"sgmii_phy", MS_REG_MS_CMU, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132,
+        PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133,
+        PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130,
+        PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131},
+       {"pcie_phy0", PHY_PCIE_REG_PHY0, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132,
+        PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133,
+        PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130,
+        PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131},
+       {"pcie_phy1", PHY_PCIE_REG_PHY1, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132,
+        PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133,
+        PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130,
+        PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131},
+};
+
+/**************************** Private Functions ******************************/
+
+/* Reads and returns a single dword from the specified unaligned buffer */
+static u32 qed_read_unaligned_dword(u8 *buf)
+{
+       u32 dword;
+
+       memcpy((u8 *)&dword, buf, sizeof(dword));
+       return dword;
+}
+
+/* Returns the value of the specified GRC param */
+static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
+                            enum dbg_grc_params grc_param)
+{
+       struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+       return dev_data->grc.param_val[grc_param];
+}
+
+/* Initializes the GRC parameters */
+static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn)
+{
+       struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+       if (!dev_data->grc.params_initialized) {
+               qed_dbg_grc_set_params_default(p_hwfn);
+               dev_data->grc.params_initialized = 1;
+       }
+}
+
+/* Initializes debug data for the specified device */
+static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
+                                       struct qed_ptt *p_ptt)
+{
+       struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+       if (dev_data->initialized)
+               return DBG_STATUS_OK;
+
+       if (QED_IS_K2(p_hwfn->cdev)) {
+               dev_data->chip_id = CHIP_K2;
+               dev_data->mode_enable[MODE_K2] = 1;
+       } else if (QED_IS_BB_B0(p_hwfn->cdev)) {
+               dev_data->chip_id = CHIP_BB_B0;
+               dev_data->mode_enable[MODE_BB] = 1;
+       } else {
+               return DBG_STATUS_UNKNOWN_CHIP;
+       }
+
+       dev_data->platform_id = PLATFORM_ASIC;
+       dev_data->mode_enable[MODE_ASIC] = 1;
+
+       /* Initializes the GRC parameters */
+       qed_dbg_grc_init_params(p_hwfn);
+
+       dev_data->initialized = true;
+       return DBG_STATUS_OK;
+}
+
+/* Reads the FW info structure for the specified Storm from the chip,
+ * and writes it to the specified fw_info pointer.
+ */
+static void qed_read_fw_info(struct qed_hwfn *p_hwfn,
+                            struct qed_ptt *p_ptt,
+                            u8 storm_id, struct fw_info *fw_info)
+{
+       /* Read first the address that points to fw_info location.
+        * The address is located in the last line of the Storm RAM.
+        */
+       u32 addr = s_storm_defs[storm_id].sem_fast_mem_addr +
+                  SEM_FAST_REG_INT_RAM +
+                  DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) -
+                  sizeof(struct fw_info_location);
+       struct fw_info_location fw_info_location;
+       u32 *dest = (u32 *)&fw_info_location;
+       u32 i;
+
+       memset(&fw_info_location, 0, sizeof(fw_info_location));
+       memset(fw_info, 0, sizeof(*fw_info));
+       for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location));
+            i++, addr += BYTES_IN_DWORD)
+               dest[i] = qed_rd(p_hwfn, p_ptt, addr);
+       if (fw_info_location.size > 0 && fw_info_location.size <=
+           sizeof(*fw_info)) {
+               /* Read FW version info from Storm RAM */
+               addr = fw_info_location.grc_addr;
+               dest = (u32 *)fw_info;
+               for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size);
+                    i++, addr += BYTES_IN_DWORD)
+                       dest[i] = qed_rd(p_hwfn, p_ptt, addr);
+       }
+}
+
+/* Dumps the specified string to the specified buffer. Returns the dumped size
+ * in bytes (actual length + 1 for the null character termination).
+ */
+static u32 qed_dump_str(char *dump_buf, bool dump, const char *str)
+{
+       if (dump)
+               strcpy(dump_buf, str);
+       return (u32)strlen(str) + 1;
+}
+
+/* Dumps zeros to align the specified buffer to dwords. Returns the dumped size
+ * in bytes.
+ */
+static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset)
+{
+       u8 offset_in_dword = (u8)(byte_offset & 0x3), align_size;
+
+       align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
+
+       if (dump && align_size)
+               memset(dump_buf, 0, align_size);
+       return align_size;
+}
+
+/* Writes the specified string param to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_str_param(u32 *dump_buf,
+                             bool dump,
+                             const char *param_name, const char *param_val)
+{
+       char *char_buf = (char *)dump_buf;
+       u32 offset = 0;
+
+       /* Dump param name */
+       offset += qed_dump_str(char_buf + offset, dump, param_name);
+
+       /* Indicate a string param value */
+       if (dump)
+               *(char_buf + offset) = 1;
+       offset++;
+
+       /* Dump param value */
+       offset += qed_dump_str(char_buf + offset, dump, param_val);
+
+       /* Align buffer to next dword */
+       offset += qed_dump_align(char_buf + offset, dump, offset);
+       return BYTES_TO_DWORDS(offset);
+}
+
+/* Writes the specified numeric param to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_num_param(u32 *dump_buf,
+                             bool dump, const char *param_name, u32 param_val)
+{
+       char *char_buf = (char *)dump_buf;
+       u32 offset = 0;
+
+       /* Dump param name */
+       offset += qed_dump_str(char_buf + offset, dump, param_name);
+
+       /* Indicate a numeric param value */
+       if (dump)
+               *(char_buf + offset) = 0;
+       offset++;
+
+       /* Align buffer to next dword */
+       offset += qed_dump_align(char_buf + offset, dump, offset);
+
+       /* Dump param value (and change offset from bytes to dwords) */
+       offset = BYTES_TO_DWORDS(offset);
+       if (dump)
+               *(dump_buf + offset) = param_val;
+       offset++;
+       return offset;
+}
+
+/* Reads the FW version and writes it as a param to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
+                                struct qed_ptt *p_ptt,
+                                u32 *dump_buf, bool dump)
+{
+       struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+       char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
+       char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
+       struct fw_info fw_info = { {0}, {0} };
+       int printed_chars;
+       u32 offset = 0;
+
+       if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
+               /* Read FW image/version from PRAM in a non-reset SEMI */
+               bool found = false;
+               u8 storm_id;
+
+               for (storm_id = 0; storm_id < MAX_DBG_STORMS && !found;
+                    storm_id++) {
+                       /* Read FW version/image  */
+                       if (!dev_data->block_in_reset
+                           [s_storm_defs[storm_id].block_id]) {
+                               /* read FW info for the current Storm */
+                               qed_read_fw_info(p_hwfn,
+                                                p_ptt, storm_id, &fw_info);
+
+                               /* Create FW version/image strings */
+                               printed_chars =
+                                   snprintf(fw_ver_str,
+                                            sizeof(fw_ver_str),
+                                            "%d_%d_%d_%d",
+                                            fw_info.ver.num.major,
+                                            fw_info.ver.num.minor,
+                                            fw_info.ver.num.rev,
+                                            fw_info.ver.num.eng);
+                               if (printed_chars < 0 || printed_chars >=
+                                   sizeof(fw_ver_str))
+                                       DP_NOTICE(p_hwfn,
+                                                 "Unexpected debug error: invalid FW version string\n");
+                               switch (fw_info.ver.image_id) {
+                               case FW_IMG_MAIN:
+                                       strcpy(fw_img_str, "main");
+                                       break;
+                               default:
+                                       strcpy(fw_img_str, "unknown");
+                                       break;
+                               }
+
+                               found = true;
+                       }
+               }
+       }
+
+       /* Dump FW version, image and timestamp */
+       offset += qed_dump_str_param(dump_buf + offset,
+                                    dump, "fw-version", fw_ver_str);
+       offset += qed_dump_str_param(dump_buf + offset,
+                                    dump, "fw-image", fw_img_str);
+       offset += qed_dump_num_param(dump_buf + offset,
+                                    dump,
+                                    "fw-timestamp", fw_info.ver.timestamp);
+       return offset;
+}
+
+/* Reads the MFW version and writes it as a param to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
+                                 struct qed_ptt *p_ptt,
+                                 u32 *dump_buf, bool dump)
+{
+       char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
+
+       if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
+               u32 global_section_offsize, global_section_addr, mfw_ver;
+               u32 public_data_addr, global_section_offsize_addr;
+               int printed_chars;
+
+               /* Find MCP public data GRC address.
+                * Needs to be ORed with MCP_REG_SCRATCH due to a HW bug.
+                */
+               public_data_addr = qed_rd(p_hwfn, p_ptt,
+                                         MISC_REG_SHARED_MEM_ADDR) |
+                                         MCP_REG_SCRATCH;
+
+               /* Find MCP public global section offset */
+               global_section_offsize_addr = public_data_addr +
+                                             offsetof(struct mcp_public_data,
+                                                      sections) +
+                                             sizeof(offsize_t) * PUBLIC_GLOBAL;
+               global_section_offsize = qed_rd(p_hwfn, p_ptt,
+                                               global_section_offsize_addr);
+               global_section_addr = MCP_REG_SCRATCH +
+                                     (global_section_offsize &
+                                      OFFSIZE_OFFSET_MASK) * 4;
+
+               /* Read MFW version from MCP public global section */
+               mfw_ver = qed_rd(p_hwfn, p_ptt,
+                                global_section_addr +
+                                offsetof(struct public_global, mfw_ver));
+
+               /* Dump MFW version param */
+               printed_chars = snprintf(mfw_ver_str, sizeof(mfw_ver_str),
+                                        "%d_%d_%d_%d",
+                                        (u8) (mfw_ver >> 24),
+                                        (u8) (mfw_ver >> 16),
+                                        (u8) (mfw_ver >> 8),
+                                        (u8) mfw_ver);
+               if (printed_chars < 0 || printed_chars >= sizeof(mfw_ver_str))
+                       DP_NOTICE(p_hwfn,
+                                 "Unexpected debug error: invalid MFW version string\n");
+       }
+
+       return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
+}
+
+/* Writes a section header to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_section_hdr(u32 *dump_buf,
+                               bool dump, const char *name, u32 num_params)
+{
+       return qed_dump_num_param(dump_buf, dump, name, num_params);
+}
+
+/* Writes the common global params to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
+                                        struct qed_ptt *p_ptt,
+                                        u32 *dump_buf,
+                                        bool dump,
+                                        u8 num_specific_global_params)
+{
+       u8 num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
+       struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+       u32 offset = 0;
+
+       /* Find platform string and dump global params section header */
+       offset += qed_dump_section_hdr(dump_buf + offset,
+                                      dump, "global_params", num_params);
+
+       /* Store params */
+       offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
+       offset += qed_dump_mfw_ver_param(p_hwfn,
+                                        p_ptt, dump_buf + offset, dump);
+       offset += qed_dump_num_param(dump_buf + offset,
+                                    dump, "tools-version", TOOLS_VERSION);
+       offset += qed_dump_str_param(dump_buf + offset,
+                                    dump,
+                                    "chip",
+                                    s_chip_defs[dev_data->chip_id].name);
+       offset += qed_dump_str_param(dump_buf + offset,
+                                    dump,
+                                    "platform",
+                                    s_platform_defs[dev_data->platform_id].
+                                    name);
+       offset +=
+           qed_dump_num_param(dump_buf + offset, dump, "pci-func",
+                              p_hwfn->abs_pf_id);
+       return offset;
+}
+
+/* Writes the last section to the specified buffer at the given offset.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
+{
+       u32 start_offset = offset, crc = ~0;
+
+       /* Dump CRC section header */
+       offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
+
+       /* Calculate CRC32 and add it to the dword following the "last" section.
+        */
+       if (dump)
+               *(dump_buf + offset) = ~crc32(crc, (u8 *)dump_buf,
+                                             DWORDS_TO_BYTES(offset));
+       offset++;
+       return offset - start_offset;
+}
+
+/* Update blocks reset state  */
+static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn,
+                                         struct qed_ptt *p_ptt)
+{
+       struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+       u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
+       u32 i;
+
+       /* Read reset registers */
+       for (i = 0; i < MAX_DBG_RESET_REGS; i++)
+               if (s_reset_regs_defs[i].exists[dev_data->chip_id])
+                       reg_val[i] = qed_rd(p_hwfn,
+                                           p_ptt, s_reset_regs_defs[i].addr);
+
+       /* Check if blocks are in reset */
+       for (i = 0; i < MAX_BLOCK_ID; i++)
+               dev_data->block_in_reset[i] =
+                   s_block_defs[i]->has_reset_bit &&
+                   !(reg_val[s_block_defs[i]->reset_reg] &
+                     BIT(s_block_defs[i]->reset_bit_offset));
+}
+
+/* Enable / disable the Debug block */
+static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn,
+                                    struct qed_ptt *p_ptt, bool enable)
+{
+       qed_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
+}
+
+/* Resets the Debug block */
+static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn,
+                                   struct qed_ptt *p_ptt)
+{
+       u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
+
+       dbg_reset_reg_addr =
+               s_reset_regs_defs[s_block_defs[BLOCK_DBG]->reset_reg].addr;
+       old_reset_reg_val = qed_rd(p_hwfn, p_ptt, dbg_reset_reg_addr);
+       new_reset_reg_val = old_reset_reg_val &
+                           ~BIT(s_block_defs[BLOCK_DBG]->reset_bit_offset);
+
+       qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val);
+       qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val);
+}
+
+static void qed_bus_set_framing_mode(struct qed_hwfn *p_hwfn,
+                                    struct qed_ptt *p_ptt,
+                                    enum dbg_bus_frame_modes mode)
+{
+       qed_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode);
+}
+
+/* Enable / disable Debug Bus clients according to the specified mask.
+ * (1 = enable, 0 = disable)
+ */
+static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
+                                  struct qed_ptt *p_ptt, u32 client_mask)
+{
+       qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
+}
+
+static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
+{
+       const u32 *ptr = s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
+       struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+       u8 tree_val = ((u8 *)ptr)[(*modes_buf_offset)++];
+       bool arg1, arg2;
+
+       switch (tree_val) {
+       case INIT_MODE_OP_NOT:
+               return !qed_is_mode_match(p_hwfn, modes_buf_offset);
+       case INIT_MODE_OP_OR:
+       case INIT_MODE_OP_AND:
+               arg1 = qed_is_mode_match(p_hwfn, modes_buf_offset);
+               arg2 = qed_is_mode_match(p_hwfn, modes_buf_offset);
+               return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
+                                                       arg2) : (arg1 && arg2);
+       default:
+               return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
+       }
+}
+
+/* Returns true if the specified entity (indicated by GRC param) should be
+ * included in the dump, false otherwise.
+ */
+static bool qed_grc_is_included(struct qed_hwfn *p_hwfn,
+                               enum dbg_grc_params grc_param)
+{
+       return qed_grc_get_param(p_hwfn, grc_param) > 0;
+}
+
+/* Returns true of the specified Storm should be included in the dump, false
+ * otherwise.
+ */
+static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn,
+                                     enum dbg_storms storm)
+{
+       return qed_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
+}
+
+/* Returns true if the specified memory should be included in the dump, false
+ * otherwise.
+ */
+static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
+                                   enum block_id block_id, u8 mem_group_id)
+{
+       u8 i;
+
+       /* Check Storm match */
+       if (s_block_defs[block_id]->associated_to_storm &&
+           !qed_grc_is_storm_included(p_hwfn,
+                       (enum dbg_storms)s_block_defs[block_id]->storm_id))
+               return false;
+
+       for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
+               if (mem_group_id == s_big_ram_defs[i].mem_group_id ||
+                   mem_group_id == s_big_ram_defs[i].ram_mem_group_id)
+                       return qed_grc_is_included(p_hwfn,
+                                                  s_big_ram_defs[i].grc_param);
+       if (mem_group_id == MEM_GROUP_PXP_ILT || mem_group_id ==
+           MEM_GROUP_PXP_MEM)
+               return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
+       if (mem_group_id == MEM_GROUP_RAM)
+               return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
+       if (mem_group_id == MEM_GROUP_PBUF)
+               return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
+       if (mem_group_id == MEM_GROUP_CAU_MEM ||
+           mem_group_id == MEM_GROUP_CAU_SB ||
+           mem_group_id == MEM_GROUP_CAU_PI)
+               return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
+       if (mem_group_id == MEM_GROUP_QM_MEM)
+               return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
+       if (mem_group_id == MEM_GROUP_CONN_CFC_MEM ||
+           mem_group_id == MEM_GROUP_TASK_CFC_MEM)
+               return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC);
+       if (mem_group_id == MEM_GROUP_IGU_MEM || mem_group_id ==
+           MEM_GROUP_IGU_MSIX)
+               return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
+       if (mem_group_id == MEM_GROUP_MULD_MEM)
+               return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
+       if (mem_group_id == MEM_GROUP_PRS_MEM)
+               return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
+       if (mem_group_id == MEM_GROUP_DMAE_MEM)
+               return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
+       if (mem_group_id == MEM_GROUP_TM_MEM)
+               return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
+       if (mem_group_id == MEM_GROUP_SDM_MEM)
+               return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
+       if (mem_group_id == MEM_GROUP_TDIF_CTX || mem_group_id ==
+           MEM_GROUP_RDIF_CTX)
+               return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
+       if (mem_group_id == MEM_GROUP_CM_MEM)
+               return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
+       if (mem_group_id == MEM_GROUP_IOR)
+               return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
+
+       return true;
+}
+
+/* Stalls all Storms */
+static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
+                                struct qed_ptt *p_ptt, bool stall)
+{
+       u8 reg_val = stall ? 1 : 0;
+       u8 storm_id;
+
+       for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+               if (qed_grc_is_storm_included(p_hwfn,
+                                             (enum dbg_storms)storm_id)) {
+                       u32 reg_addr =
+                           s_storm_defs[storm_id].sem_fast_mem_addr +
+                           SEM_FAST_REG_STALL_0;
+
+                       qed_wr(p_hwfn, p_ptt, reg_addr, reg_val);
+               }
+       }
+
+       msleep(STALL_DELAY_MS);
+}
+
+/* Takes all blocks out of reset */
+static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
+                                  struct qed_ptt *p_ptt)
+{
+       struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+       u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
+       u32 i;
+
+       /* Fill reset regs values */
+       for (i = 0; i < MAX_BLOCK_ID; i++)
+               if (s_block_defs[i]->has_reset_bit && s_block_defs[i]->unreset)
+                       reg_val[s_block_defs[i]->reset_reg] |=
+                           BIT(s_block_defs[i]->reset_bit_offset);
+
+       /* Write reset registers */
+       for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
+               if (s_reset_regs_defs[i].exists[dev_data->chip_id]) {
+                       reg_val[i] |= s_reset_regs_defs[i].unreset_val;
+                       if (reg_val[i])
+                               qed_wr(p_hwfn,
+                                      p_ptt,
+                                      s_reset_regs_defs[i].addr +
+                                      RESET_REG_UNRESET_OFFSET, reg_val[i]);
+               }
+       }
+}
+
+/* Returns the attention block data of the specified block */
+static const struct dbg_attn_block_type_data *
+qed_get_block_attn_data(enum block_id block_id, enum dbg_attn_type attn_type)
+{
+       const struct dbg_attn_block *base_attn_block_arr =
+               (const struct dbg_attn_block *)
+               s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
+
+       return &base_attn_block_arr[block_id].per_type_data[attn_type];
+}
+
+/* Returns the attention registers of the specified block */
+static const struct dbg_attn_reg *
+qed_get_block_attn_regs(enum block_id block_id, enum dbg_attn_type attn_type,
+                       u8 *num_attn_regs)
+{
+       const struct dbg_attn_block_type_data *block_type_data =
+               qed_get_block_attn_data(block_id, attn_type);
+
+       *num_attn_regs = block_type_data->num_regs;
+       return &((const struct dbg_attn_reg *)
+                s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data->
+                                                         regs_offset];
+}
+
+/* For each block, clear the status of all parities */
+static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
+                                  struct qed_ptt *p_ptt)
+{
+       struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+       u8 reg_idx, num_attn_regs;
+       u32 block_id;
+
+       for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
+               const struct dbg_attn_reg *attn_reg_arr;
+
+               if (dev_data->block_in_reset[block_id])
+                       continue;
+
+               attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
+                                                      ATTN_TYPE_PARITY,
+                                                      &num_attn_regs);
+               for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
+                       const struct dbg_attn_reg *reg_data =
+                               &attn_reg_arr[reg_idx];
+
+                       /* Check mode */
+                       bool eval_mode = GET_FIELD(reg_data->mode.data,
+                                                  DBG_MODE_HDR_EVAL_MODE) > 0;
+                       u16 modes_buf_offset =
+                               GET_FIELD(reg_data->mode.data,
+                                         DBG_MODE_HDR_MODES_BUF_OFFSET);
+
+                       if (!eval_mode ||
+                           qed_is_mode_match(p_hwfn, &modes_buf_offset))
+                               /* Mode match - read parity status read-clear
+                                * register.
+                                */
+                               qed_rd(p_hwfn, p_ptt,
+                                      DWORDS_TO_BYTES(reg_data->
+                                                      sts_clr_address));
+               }
+       }
+}
+
+/* Dumps GRC registers section header. Returns the dumped size in dwords.
+ * The following parameters are dumped:
+ * - 'count' = num_dumped_entries
+ * - 'split' = split_type
+ * - 'id' = split_id (dumped only if split_id >= 0)
+ * - 'param_name' = param_val (user param, dumped only if param_name != NULL and
+ *     param_val != NULL)
+ */
+static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
+                                bool dump,
+                                u32 num_reg_entries,
+                                const char *split_type,
+                                int split_id,
+                                const char *param_name, const char *param_val)
+{
+       u8 num_params = 2 + (split_id >= 0 ? 1 : 0) + (param_name ? 1 : 0);
+       u32 offset = 0;
+
+       offset += qed_dump_section_hdr(dump_buf + offset,
+                                      dump, "grc_regs", num_params);
+       offset += qed_dump_num_param(dump_buf + offset,
+                                    dump, "count", num_reg_entries);
+       offset += qed_dump_str_param(dump_buf + offset,
+                                    dump, "split", split_type);
+       if (split_id >= 0)
+               offset += qed_dump_num_param(dump_buf + offset,
+                                            dump, "id", split_id);
+       if (param_name && param_val)
+               offset += qed_dump_str_param(dump_buf + offset,
+                                            dump, param_name, param_val);
+       return offset;
+}
+
+/* Dumps the GRC registers in the specified address range.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
+                                  struct qed_ptt *p_ptt, u32 *dump_buf,
+                                  bool dump, u32 addr, u32 len)
+{
+       u32 byte_addr = DWORDS_TO_BYTES(addr), offset = 0, i;
+
+       if (dump)
+               for (i = 0; i < len; i++, byte_addr += BYTES_IN_DWORD, offset++)
+                       *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr);
+       else
+               offset += len;
+       return offset;
+}
+
+/* Dumps GRC registers sequence header. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf, bool dump, u32 addr,
+                                     u32 len)
+{
+       if (dump)
+               *dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
+       return 1;
+}
+
+/* Dumps GRC registers sequence. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
+                                 struct qed_ptt *p_ptt, u32 *dump_buf,
+                                 bool dump, u32 addr, u32 len)
+{
+       u32 offset = 0;
+
+       offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
+       offset += qed_grc_dump_addr_range(p_hwfn,
+                                         p_ptt,
+                                         dump_buf + offset, dump, addr, len);
+       return offset;
+}
+
+/* Dumps GRC registers sequence with skip cycle.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
+                                      struct qed_ptt *p_ptt, u32 *dump_buf,
+                                      bool dump, u32 addr, u32 total_len,
+                                      u32 read_len, u32 skip_len)
+{
+       u32 offset = 0, reg_offset = 0;
+
+       offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
+       if (dump) {
+               while (reg_offset < total_len) {
+                       u32 curr_len = min_t(u32,
+                                            read_len,
+                                            total_len - reg_offset);
+                       offset += qed_grc_dump_addr_range(p_hwfn,
+                                                         p_ptt,
+                                                         dump_buf + offset,
+                                                         dump, addr, curr_len);
+                       reg_offset += curr_len;
+                       addr += curr_len;
+                       if (reg_offset < total_len) {
+                               curr_len = min_t(u32,
+                                                skip_len,
+                                                total_len - skip_len);
+                               memset(dump_buf + offset, 0,
+                                      DWORDS_TO_BYTES(curr_len));
+                               offset += curr_len;
+                               reg_offset += curr_len;
+                               addr += curr_len;
+                       }
+               }
+       } else {
+               offset += total_len;
+       }
+
+       return offset;
+}
+
+/* Dumps GRC registers entries. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
+                                    struct qed_ptt *p_ptt,
+                                    struct dbg_array input_regs_arr,
+                                    u32 *dump_buf,
+                                    bool dump,
+                                    bool block_enable[MAX_BLOCK_ID],
+                                    u32 *num_dumped_reg_entries)
+{
+       u32 i, offset = 0, input_offset = 0;
+       bool mode_match = true;
+
+       *num_dumped_reg_entries = 0;
+       while (input_offset < input_regs_arr.size_in_dwords) {
+               const struct dbg_dump_cond_hdr *cond_hdr =
+                   (const struct dbg_dump_cond_hdr *)
+                   &input_regs_arr.ptr[input_offset++];
+               bool eval_mode = GET_FIELD(cond_hdr->mode.data,
+                                          DBG_MODE_HDR_EVAL_MODE) > 0;
+
+               /* Check mode/block */
+               if (eval_mode) {
+                       u16 modes_buf_offset =
+                               GET_FIELD(cond_hdr->mode.data,
+                                         DBG_MODE_HDR_MODES_BUF_OFFSET);
+                       mode_match = qed_is_mode_match(p_hwfn,
+                                                      &modes_buf_offset);
+               }
+
+               if (mode_match && block_enable[cond_hdr->block_id]) {
+                       for (i = 0; i < cond_hdr->data_size;
+                            i++, input_offset++) {
+                               const struct dbg_dump_reg *reg =
+                                   (const struct dbg_dump_reg *)
+                                   &input_regs_arr.ptr[input_offset];
+                               u32 addr, len;
+
+                               addr = GET_FIELD(reg->data,
+                                                DBG_DUMP_REG_ADDRESS);
+                               len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
+                               offset +=
+                                   qed_grc_dump_reg_entry(p_hwfn, p_ptt,
+                                                          dump_buf + offset,
+                                                          dump,
+                                                          addr,
+                                                          len);
+                               (*num_dumped_reg_entries)++;
+                       }
+               } else {
+                       input_offset += cond_hdr->data_size;
+               }
+       }
+
+       return offset;
+}
+
+/* Dumps GRC registers entries. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
+                                  struct qed_ptt *p_ptt,
+                                  struct dbg_array input_regs_arr,
+                                  u32 *dump_buf,
+                                  bool dump,
+                                  bool block_enable[MAX_BLOCK_ID],
+                                  const char *split_type_name,
+                                  u32 split_id,
+                                  const char *param_name,
+                                  const char *param_val)
+{
+       u32 num_dumped_reg_entries, offset;
+
+       /* Calculate register dump header size (and skip it for now) */
+       offset = qed_grc_dump_regs_hdr(dump_buf,
+                                      false,
+                                      0,
+                                      split_type_name,
+                                      split_id, param_name, param_val);
+
+       /* Dump registers */
+       offset += qed_grc_dump_regs_entries(p_hwfn,
+                                           p_ptt,
+                                           input_regs_arr,
+                                           dump_buf + offset,
+                                           dump,
+                                           block_enable,
+                                           &num_dumped_reg_entries);
+
+       /* Write register dump header */
+       if (dump && num_dumped_reg_entries > 0)
+               qed_grc_dump_regs_hdr(dump_buf,
+                                     dump,
+                                     num_dumped_reg_entries,
+                                     split_type_name,
+                                     split_id, param_name, param_val);
+
+       return num_dumped_reg_entries > 0 ? offset : 0;
+}
+
+/* Dumps registers according to the input registers array.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
+                                 struct qed_ptt *p_ptt,
+                                 u32 *dump_buf,
+                                 bool dump,
+                                 bool block_enable[MAX_BLOCK_ID],
+                                 const char *param_name, const char *param_val)
+{
+       struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+       struct chip_platform_defs *p_platform_defs;
+       u32 offset = 0, input_offset = 0;
+       struct chip_defs *p_chip_defs;
+       u8 port_id, pf_id, vf_id;
+       u16 fid;
+
+       p_chip_defs = &s_chip_defs[dev_data->chip_id];
+       p_platform_defs = &p_chip_defs->per_platform[dev_data->platform_id];
+
+       if (dump)
+               DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumping registers...\n");
+       while (input_offset <
+              s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
+               const struct dbg_dump_split_hdr *split_hdr =
+                       (const struct dbg_dump_split_hdr *)
+                       &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++];
+               u8 split_type_id = GET_FIELD(split_hdr->hdr,
+                                            DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
+               u32 split_data_size = GET_FIELD(split_hdr->hdr,
+                                               DBG_DUMP_SPLIT_HDR_DATA_SIZE);
+               struct dbg_array curr_input_regs_arr = {
+                       &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset],
+                       split_data_size};
+
+               switch (split_type_id) {
+               case SPLIT_TYPE_NONE:
+                       offset += qed_grc_dump_split_data(p_hwfn,
+                                                         p_ptt,
+                                                         curr_input_regs_arr,
+                                                         dump_buf + offset,
+                                                         dump,
+                                                         block_enable,
+                                                         "eng",
+                                                         (u32)(-1),
+                                                         param_name,
+                                                         param_val);
+                       break;
+               case SPLIT_TYPE_PORT:
+                       for (port_id = 0; port_id < p_platform_defs->num_ports;
+                            port_id++) {
+                               if (dump)
+                                       qed_port_pretend(p_hwfn, p_ptt,
+                                                        port_id);
+                               offset +=
+                                   qed_grc_dump_split_data(p_hwfn, p_ptt,
+                                                           curr_input_regs_arr,
+                                                           dump_buf + offset,
+                                                           dump, block_enable,
+                                                           "port", port_id,
+                                                           param_name,
+                                                           param_val);
+                       }
+                       break;
+               case SPLIT_TYPE_PF:
+               case SPLIT_TYPE_PORT_PF:
+                       for (pf_id = 0; pf_id < p_platform_defs->num_pfs;
+                            pf_id++) {
+                               u8 pfid_shift =
+                                       PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
+
+                               if (dump) {
+                                       fid = pf_id << pfid_shift;
+                                       qed_fid_pretend(p_hwfn, p_ptt, fid);
+                               }
+
+                               offset +=
+                                   qed_grc_dump_split_data(p_hwfn, p_ptt,
+                                                           curr_input_regs_arr,
+                                                           dump_buf + offset,
+                                                           dump, block_enable,
+                                                           "pf", pf_id,
+                                                           param_name,
+                                                           param_val);
+                       }
+                       break;
+               case SPLIT_TYPE_VF:
+                       for (vf_id = 0; vf_id < p_platform_defs->num_vfs;
+                            vf_id++) {
+                               u8 vfvalid_shift =
+                                       PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT;
+                               u8 vfid_shift =
+                                       PXP_PRETEND_CONCRETE_FID_VFID_SHIFT;
+
+                               if (dump) {
+                                       fid = BIT(vfvalid_shift) |
+                                             (vf_id << vfid_shift);
+                                       qed_fid_pretend(p_hwfn, p_ptt, fid);
+                               }
+
+                               offset +=
+                                   qed_grc_dump_split_data(p_hwfn, p_ptt,
+                                                           curr_input_regs_arr,
+                                                           dump_buf + offset,
+                                                           dump, block_enable,
+                                                           "vf", vf_id,
+                                                           param_name,
+                                                           param_val);
+                       }
+                       break;
+               default:
+                       break;
+               }
+
+               input_offset += split_data_size;
+       }
+
+       /* Pretend to original PF */
+       if (dump) {
+               fid = p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
+               qed_fid_pretend(p_hwfn, p_ptt, fid);
+       }
+
+       return offset;
+}
+
+/* Dump reset registers. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
+                                  struct qed_ptt *p_ptt,
+                                  u32 *dump_buf, bool dump)
+{
+       struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+       u32 i, offset = 0, num_regs = 0;
+
+       /* Calculate header size */
+       offset += qed_grc_dump_regs_hdr(dump_buf,
+                                       false, 0, "eng", -1, NULL, NULL);
+
+       /* Write reset registers */
+       for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
+               if (s_reset_regs_defs[i].exists[dev_data->chip_id]) {
+                       u32 addr = BYTES_TO_DWORDS(s_reset_regs_defs[i].addr);
+
+                       offset += qed_grc_dump_reg_entry(p_hwfn,
+                                                        p_ptt,
+                                                        dump_buf + offset,
+                                                        dump,
+                                                        addr,
+                                                        1);
+                       num_regs++;
+               }
+       }
+
+       /* Write header */
+       if (dump)
+               qed_grc_dump_regs_hdr(dump_buf,
+                                     true, num_regs, "eng", -1, NULL, NULL);
+       return offset;
+}
+
+/* Dump registers that are modified during GRC Dump and therefore must be dumped
+ * first. Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
+                                     struct qed_ptt *p_ptt,
+                                     u32 *dump_buf, bool dump)
+{
+       struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+       u32 offset = 0, num_reg_entries = 0, block_id;
+       u8 storm_id, reg_idx, num_attn_regs;
+
+       /* Calculate header size */
+       offset += qed_grc_dump_regs_hdr(dump_buf,
+                                       false, 0, "eng", -1, NULL, NULL);
+
+       /* Write parity registers */
+       for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
+               const struct dbg_attn_reg *attn_reg_arr;
+
+               if (dev_data->block_in_reset[block_id] && dump)
+                       continue;
+
+               attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
+                                                      ATTN_TYPE_PARITY,
+                                                      &num_attn_regs);
+               for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
+                       const struct dbg_attn_reg *reg_data =
+                               &attn_reg_arr[reg_idx];
+                       u16 modes_buf_offset;
+                       bool eval_mode;
+                       u32 addr;
+
+                       /* Check mode */
+                       eval_mode = GET_FIELD(reg_data->mode.data,
+                                             DBG_MODE_HDR_EVAL_MODE) > 0;
+                       modes_buf_offset =
+                               GET_FIELD(reg_data->mode.data,
+                                         DBG_MODE_HDR_MODES_BUF_OFFSET);
+                       if (!eval_mode ||
+                           qed_is_mode_match(p_hwfn, &modes_buf_offset)) {
+                               /* Mode match - read and dump registers */
+                               addr = reg_data->mask_address;
+                               offset +=
+                                   qed_grc_dump_reg_entry(p_hwfn,
+                                                          p_ptt,
+                                                          dump_buf + offset,
+                                                          dump,
+                                                          addr,
+                                                          1);
+                               addr = GET_FIELD(reg_data->data,
+                                                DBG_ATTN_REG_STS_ADDRESS);
+                               offset +=
+                                   qed_grc_dump_reg_entry(p_hwfn,
+                                                          p_ptt,
+                                                          dump_buf + offset,
+                                                          dump,
+                                                          addr,
+                                                          1);
+                               num_reg_entries += 2;
+                       }
+               }
+       }
+
+       /* Write storm stall status registers */
+       for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+               u32 addr;
+
+               if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id] &&
+                   dump)
+                       continue;
+
+               addr =
+                   BYTES_TO_DWORDS(s_storm_defs[storm_id].sem_fast_mem_addr +
+                                   SEM_FAST_REG_STALLED);
+               offset += qed_grc_dump_reg_entry(p_hwfn,
+                                                p_ptt,
+                                                dump_buf + offset,
+                                                dump,
+                                                addr,
+                                                1);
+               num_reg_entries++;
+       }
+
+       /* Write header */
+       if (dump)
+               qed_grc_dump_regs_hdr(dump_buf,
+                                     true,
+                                     num_reg_entries, "eng", -1, NULL, NULL);
+       return offset;
+}
+
+/* Dumps registers that can't be represented in the debug arrays */
+static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
+                                    struct qed_ptt *p_ptt,
+                                    u32 *dump_buf, bool dump)
+{
+       u32 offset = 0, addr;
+
+       offset += qed_grc_dump_regs_hdr(dump_buf,
+                                       dump, 2, "eng", -1, NULL, NULL);
+
+       /* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
+        * skipped).
+        */
+       addr = BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO);
+       offset += qed_grc_dump_reg_entry_skip(p_hwfn,
+                                             p_ptt,
+                                             dump_buf + offset,
+                                             dump,
+                                             addr,
+                                             RDIF_REG_DEBUG_ERROR_INFO_SIZE,
+                                             7,
+                                             1);
+       addr = BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO);
+       offset +=
+           qed_grc_dump_reg_entry_skip(p_hwfn,
+                                       p_ptt,
+                                       dump_buf + offset,
+                                       dump,
+                                       addr,
+                                       TDIF_REG_DEBUG_ERROR_INFO_SIZE,
+                                       7,
+                                       1);
+
+       return offset;
+}
+
+/* Dumps a GRC memory header (section and params).
+ * The following parameters are dumped:
+ * name - name is dumped only if it's not NULL.
+ * addr - addr is dumped only if name is NULL.
+ * len - len is always dumped.
+ * width - bit_width is dumped if it's not zero.
+ * packed - packed=1 is dumped if it's not false.
+ * mem_group - mem_group is always dumped.
+ * is_storm - true only if the memory is related to a Storm.
+ * storm_letter - storm letter (valid only if is_storm is true).
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
+                               u32 *dump_buf,
+                               bool dump,
+                               const char *name,
+                               u32 addr,
+                               u32 len,
+                               u32 bit_width,
+                               bool packed,
+                               const char *mem_group,
+                               bool is_storm, char storm_letter)
+{
+       u8 num_params = 3;
+       u32 offset = 0;
+       char buf[64];
+
+       if (!len)
+               DP_NOTICE(p_hwfn,
+                         "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
+       if (bit_width)
+               num_params++;
+       if (packed)
+               num_params++;
+
+       /* Dump section header */
+       offset += qed_dump_section_hdr(dump_buf + offset,
+                                      dump, "grc_mem", num_params);
+       if (name) {
+               /* Dump name */
+               if (is_storm) {
+                       strcpy(buf, "?STORM_");
+                       buf[0] = storm_letter;
+                       strcpy(buf + strlen(buf), name);
+               } else {
+                       strcpy(buf, name);
+               }
+
+               offset += qed_dump_str_param(dump_buf + offset,
+                                            dump, "name", buf);
+               if (dump)
+                       DP_VERBOSE(p_hwfn,
+                                  QED_MSG_DEBUG,
+                                  "Dumping %d registers from %s...\n",
+                                  len, buf);
+       } else {
+               /* Dump address */
+               offset += qed_dump_num_param(dump_buf + offset,
+                                            dump, "addr",
+                                            DWORDS_TO_BYTES(addr));
+               if (dump && len > 64)
+                       DP_VERBOSE(p_hwfn,
+                                  QED_MSG_DEBUG,
+                                  "Dumping %d registers from address 0x%x...\n",
+                                  len, (u32)DWORDS_TO_BYTES(addr));
+       }
+
+       /* Dump len */
+       offset += qed_dump_num_param(dump_buf + offset, dump, "len", len);
+
+       /* Dump bit width */
+       if (bit_width)
+               offset += qed_dump_num_param(dump_buf + offset,
+                                            dump, "width", bit_width);
+
+       /* Dump packed */
+       if (packed)
+               offset += qed_dump_num_param(dump_buf + offset,
+                                            dump, "packed", 1);
+
+       /* Dump reg type */
+       if (is_storm) {
+               strcpy(buf, "?STORM_");
+               buf[0] = storm_letter;
+               strcpy(buf + strlen(buf), mem_group);
+       } else {
+               strcpy(buf, mem_group);
+       }
+
+       offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf);
+       return offset;
+}
+
+/* Dumps a single GRC memory. If name is NULL, the memory is stored by address.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
+                           struct qed_ptt *p_ptt,
+                           u32 *dump_buf,
+                           bool dump,
+                           const char *name,
+                           u32 addr,
+                           u32 len,
+                           u32 bit_width,
+                           bool packed,
+                           const char *mem_group,
+                           bool is_storm, char storm_letter)
+{
+       u32 offset = 0;
+
+       offset += qed_grc_dump_mem_hdr(p_hwfn,
+                                      dump_buf + offset,
+                                      dump,
+                                      name,
+                                      addr,
+                                      len,
+                                      bit_width,
+                                      packed,
+                                      mem_group, is_storm, storm_letter);
+       offset += qed_grc_dump_addr_range(p_hwfn,
+                                         p_ptt,
+                                         dump_buf + offset, dump, addr, len);
+       return offset;
+}
+
+/* Dumps GRC memories entries. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
+                                   struct qed_ptt *p_ptt,
+                                   struct dbg_array input_mems_arr,
+                                   u32 *dump_buf, bool dump)
+{
+       u32 i, offset = 0, input_offset = 0;
+       bool mode_match = true;
+
+       while (input_offset < input_mems_arr.size_in_dwords) {
+               const struct dbg_dump_cond_hdr *cond_hdr;
+               u32 num_entries;
+               bool eval_mode;
+
+               cond_hdr = (const struct dbg_dump_cond_hdr *)
+                          &input_mems_arr.ptr[input_offset++];
+               eval_mode = GET_FIELD(cond_hdr->mode.data,
+                                     DBG_MODE_HDR_EVAL_MODE) > 0;
+
+               /* Check required mode */
+               if (eval_mode) {
+                       u16 modes_buf_offset =
+                               GET_FIELD(cond_hdr->mode.data,
+                                         DBG_MODE_HDR_MODES_BUF_OFFSET);
+
+                       mode_match = qed_is_mode_match(p_hwfn,
+                                                      &modes_buf_offset);
+               }
+
+               if (!mode_match) {
+                       input_offset += cond_hdr->data_size;
+                       continue;
+               }
+
+               num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
+               for (i = 0; i < num_entries;
+                    i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
+                       const struct dbg_dump_mem *mem =
+                               (const struct dbg_dump_mem *)
+                               &input_mems_arr.ptr[input_offset];
+                       u8 mem_group_id;
+
+                       mem_group_id = GET_FIELD(mem->dword0,
+                                                DBG_DUMP_MEM_MEM_GROUP_ID);
+                       if (mem_group_id >= MEM_GROUPS_NUM) {
+                               DP_NOTICE(p_hwfn, "Invalid mem_group_id\n");
+                               return 0;
+                       }
+
+                       if (qed_grc_is_mem_included(p_hwfn,
+                                       (enum block_id)cond_hdr->block_id,
+                                       mem_group_id)) {
+                               u32 mem_addr = GET_FIELD(mem->dword0,
+                                                        DBG_DUMP_MEM_ADDRESS);
+                               u32 mem_len = GET_FIELD(mem->dword1,
+                                                       DBG_DUMP_MEM_LENGTH);
+                               enum dbg_grc_params grc_param;
+                               char storm_letter = 'a';
+                               bool is_storm = false;
+
+                               /* Update memory length for CCFC/TCFC memories
+                                * according to number of LCIDs/LTIDs.
+                                */
+                               if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
+                                       if (mem_len % MAX_LCIDS != 0) {
+                                               DP_NOTICE(p_hwfn,
+                                                         "Invalid CCFC connection memory size\n");
+                                               return 0;
+                                       }
+
+                                       grc_param = DBG_GRC_PARAM_NUM_LCIDS;
+                                       mem_len = qed_grc_get_param(p_hwfn,
+                                                                   grc_param) *
+                                                 (mem_len / MAX_LCIDS);
+                               } else if (mem_group_id ==
+                                          MEM_GROUP_TASK_CFC_MEM) {
+                                       if (mem_len % MAX_LTIDS != 0) {
+                                               DP_NOTICE(p_hwfn,
+                                                         "Invalid TCFC task memory size\n");
+                                               return 0;
+                                       }
+
+                                       grc_param = DBG_GRC_PARAM_NUM_LTIDS;
+                                       mem_len = qed_grc_get_param(p_hwfn,
+                                                                   grc_param) *
+                                                 (mem_len / MAX_LTIDS);
+                               }
+
+                               /* If memory is associated with Storm, update
+                                * Storm details.
+                                */
+                               if (s_block_defs[cond_hdr->block_id]->
+                                                       associated_to_storm) {
+                                       is_storm = true;
+                                       storm_letter =
+                                               s_storm_defs[s_block_defs[
+                                               cond_hdr->block_id]->
+                                               storm_id].letter;
+                               }
+
+                               /* Dump memory */
+                               offset += qed_grc_dump_mem(p_hwfn, p_ptt,
+                                               dump_buf + offset, dump, NULL,
+                                               mem_addr, mem_len, 0,
+                                               false,
+                                               s_mem_group_names[mem_group_id],
+                                               is_storm, storm_letter);
+                               }
+                       }
+       }
+
+       return offset;
+}
+
+/* Dumps GRC memories according to the input array dump_mem.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
+                                struct qed_ptt *p_ptt,
+                                u32 *dump_buf, bool dump)
+{
+       u32 offset = 0, input_offset = 0;
+
+       while (input_offset <
+              s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) {
+               const struct dbg_dump_split_hdr *split_hdr =
+                       (const struct dbg_dump_split_hdr *)
+                       &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++];
+               u8 split_type_id = GET_FIELD(split_hdr->hdr,
+                                            DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
+               u32 split_data_size = GET_FIELD(split_hdr->hdr,
+                                               DBG_DUMP_SPLIT_HDR_DATA_SIZE);
+               struct dbg_array curr_input_mems_arr = {
+                       &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset],
+                       split_data_size};
+
+               switch (split_type_id) {
+               case SPLIT_TYPE_NONE:
+                       offset += qed_grc_dump_mem_entries(p_hwfn,
+                                                          p_ptt,
+                                                          curr_input_mems_arr,
+                                                          dump_buf + offset,
+                                                          dump);
+                       break;
+               default:
+                       DP_NOTICE(p_hwfn,
+                                 "Dumping split memories is currently not supported\n");
+                       break;
+               }
+
+               input_offset += split_data_size;
+       }
+
+       return offset;
+}
+
+/* Dumps GRC context data for the specified Storm.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
+                                struct qed_ptt *p_ptt,
+                                u32 *dump_buf,
+                                bool dump,
+                                const char *name,
+                                u32 num_lids,
+                                u32 lid_size,
+                                u32 rd_reg_addr,
+                                u8 storm_id)
+{
+       u32 i, lid, total_size;
+       u32 offset = 0;
+
+       if (!lid_size)
+               return 0;
+       lid_size *= BYTES_IN_DWORD;
+       total_size = num_lids * lid_size;
+       offset += qed_grc_dump_mem_hdr(p_hwfn,
+                                      dump_buf + offset,
+                                      dump,
+                                      name,
+                                      0,
+                                      total_size,
+                                      lid_size * 32,
+                                      false,
+                                      name,
+                                      true, s_storm_defs[storm_id].letter);
+
+       /* Dump context data */
+       if (dump) {
+               for (lid = 0; lid < num_lids; lid++) {
+                       for (i = 0; i < lid_size; i++, offset++) {
+                               qed_wr(p_hwfn,
+                                      p_ptt,
+                                      s_storm_defs[storm_id].cm_ctx_wr_addr,
+                                      BIT(9) | lid);
+                               *(dump_buf + offset) = qed_rd(p_hwfn,
+                                                             p_ptt,
+                                                             rd_reg_addr);
+                       }
+               }
+       } else {
+               offset += total_size;
+       }
+
+       return offset;
+}
+
+/* Dumps GRC contexts. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
+                           struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+       u32 offset = 0;
+       u8 storm_id;
+
+       for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+               if (!qed_grc_is_storm_included(p_hwfn,
+                                              (enum dbg_storms)storm_id))
+                       continue;
+
+               /* Dump Conn AG context size */
+               offset +=
+                       qed_grc_dump_ctx_data(p_hwfn,
+                                             p_ptt,
+                                             dump_buf + offset,
+                                             dump,
+                                             "CONN_AG_CTX",
+                                             qed_grc_get_param(p_hwfn,
+                                                   DBG_GRC_PARAM_NUM_LCIDS),
+                                             s_storm_defs[storm_id].
+                                                   cm_conn_ag_ctx_lid_size,
+                                             s_storm_defs[storm_id].
+                                                   cm_conn_ag_ctx_rd_addr,
+                                             storm_id);
+
+               /* Dump Conn ST context size */
+               offset +=
+                       qed_grc_dump_ctx_data(p_hwfn,
+                                             p_ptt,
+                                             dump_buf + offset,
+                                             dump,
+                                             "CONN_ST_CTX",
+                                             qed_grc_get_param(p_hwfn,
+                                                   DBG_GRC_PARAM_NUM_LCIDS),
+                                             s_storm_defs[storm_id].
+                                                   cm_conn_st_ctx_lid_size,
+                                             s_storm_defs[storm_id].
+                                                   cm_conn_st_ctx_rd_addr,
+                                             storm_id);
+
+               /* Dump Task AG context size */
+               offset +=
+                       qed_grc_dump_ctx_data(p_hwfn,
+                                             p_ptt,
+                                             dump_buf + offset,
+                                             dump,
+                                             "TASK_AG_CTX",
+                                             qed_grc_get_param(p_hwfn,
+                                                   DBG_GRC_PARAM_NUM_LTIDS),
+                                             s_storm_defs[storm_id].
+                                                   cm_task_ag_ctx_lid_size,
+                                             s_storm_defs[storm_id].
+                                                   cm_task_ag_ctx_rd_addr,
+                                             storm_id);
+
+               /* Dump Task ST context size */
+               offset +=
+                       qed_grc_dump_ctx_data(p_hwfn,
+                                             p_ptt,
+                                             dump_buf + offset,
+                                             dump,
+                                             "TASK_ST_CTX",
+                                             qed_grc_get_param(p_hwfn,
+                                                   DBG_GRC_PARAM_NUM_LTIDS),
+                                             s_storm_defs[storm_id].
+                                                   cm_task_st_ctx_lid_size,
+                                             s_storm_defs[storm_id].
+                                                   cm_task_st_ctx_rd_addr,
+                                             storm_id);
+       }
+
+       return offset;
+}
+
+/* Dumps GRC IORs data. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
+                            struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+       char buf[10] = "IOR_SET_?";
+       u8 storm_id, set_id;
+       u32 offset = 0;
+
+       for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+               struct storm_defs *storm = &s_storm_defs[storm_id];
+
+               if (!qed_grc_is_storm_included(p_hwfn,
+                                              (enum dbg_storms)storm_id))
+                       continue;
+
+               for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
+                       u32 dwords, addr;
+
+                       dwords = storm->sem_fast_mem_addr +
+                                SEM_FAST_REG_STORM_REG_FILE;
+                       addr = BYTES_TO_DWORDS(dwords) + IOR_SET_OFFSET(set_id);
+                       buf[strlen(buf) - 1] = '0' + set_id;
+                       offset += qed_grc_dump_mem(p_hwfn,
+                                                  p_ptt,
+                                                  dump_buf + offset,
+                                                  dump,
+                                                  buf,
+                                                  addr,
+                                                  IORS_PER_SET,
+                                                  32,
+                                                  false,
+                                                  "ior",
+                                                  true,
+                                                  storm->letter);
+               }
+       }
+
+       return offset;
+}
+
+/* Dump VFC CAM. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
+                               struct qed_ptt *p_ptt,
+                               u32 *dump_buf, bool dump, u8 storm_id)
+{
+       u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
+       u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
+       u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
+       u32 offset = 0;
+       u32 row, i;
+
+       offset += qed_grc_dump_mem_hdr(p_hwfn,
+                                      dump_buf + offset,
+                                      dump,
+                                      "vfc_cam",
+                                      0,
+                                      total_size,
+                                      256,
+                                      false,
+                                      "vfc_cam",
+                                      true, s_storm_defs[storm_id].letter);
+       if (dump) {
+               /* Prepare CAM address */
+               SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
+               for (row = 0; row < VFC_CAM_NUM_ROWS;
+                    row++, offset += VFC_CAM_RESP_DWORDS) {
+                       /* Write VFC CAM command */
+                       SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
+                       ARR_REG_WR(p_hwfn,
+                                  p_ptt,
+                                  s_storm_defs[storm_id].sem_fast_mem_addr +
+                                  SEM_FAST_REG_VFC_DATA_WR,
+                                  cam_cmd, VFC_CAM_CMD_DWORDS);
+
+                       /* Write VFC CAM address */
+                       ARR_REG_WR(p_hwfn,
+                                  p_ptt,
+                                  s_storm_defs[storm_id].sem_fast_mem_addr +
+                                  SEM_FAST_REG_VFC_ADDR,
+                                  cam_addr, VFC_CAM_ADDR_DWORDS);
+
+                       /* Read VFC CAM read response */
+                       ARR_REG_RD(p_hwfn,
+                                  p_ptt,
+                                  s_storm_defs[storm_id].sem_fast_mem_addr +
+                                  SEM_FAST_REG_VFC_DATA_RD,
+                                  dump_buf + offset, VFC_CAM_RESP_DWORDS);
+               }
+       } else {
+               offset += total_size;
+       }
+
+       return offset;
+}
+
+/* Dump VFC RAM. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
+                               struct qed_ptt *p_ptt,
+                               u32 *dump_buf,
+                               bool dump,
+                               u8 storm_id, struct vfc_ram_defs *ram_defs)
+{
+       u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
+       u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
+       u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
+       u32 offset = 0;
+       u32 row, i;
+
+       offset += qed_grc_dump_mem_hdr(p_hwfn,
+                                      dump_buf + offset,
+                                      dump,
+                                      ram_defs->mem_name,
+                                      0,
+                                      total_size,
+                                      256,
+                                      false,
+                                      ram_defs->type_name,
+                                      true, s_storm_defs[storm_id].letter);
+
+       /* Prepare RAM address */
+       SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
+
+       if (!dump)
+               return offset + total_size;
+
+       for (row = ram_defs->base_row;
+            row < ram_defs->base_row + ram_defs->num_rows;
+            row++, offset += VFC_RAM_RESP_DWORDS) {
+               /* Write VFC RAM command */
+               ARR_REG_WR(p_hwfn,
+                          p_ptt,
+                          s_storm_defs[storm_id].sem_fast_mem_addr +
+                          SEM_FAST_REG_VFC_DATA_WR,
+                          ram_cmd, VFC_RAM_CMD_DWORDS);
+
+               /* Write VFC RAM address */
+               SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
+               ARR_REG_WR(p_hwfn,
+                          p_ptt,
+                          s_storm_defs[storm_id].sem_fast_mem_addr +
+                          SEM_FAST_REG_VFC_ADDR,
+                          ram_addr, VFC_RAM_ADDR_DWORDS);
+
+               /* Read VFC RAM read response */
+               ARR_REG_RD(p_hwfn,
+                          p_ptt,
+                          s_storm_defs[storm_id].sem_fast_mem_addr +
+                          SEM_FAST_REG_VFC_DATA_RD,
+                          dump_buf + offset, VFC_RAM_RESP_DWORDS);
+       }
+
+       return offset;
+}
+
+/* Dumps GRC VFC data. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn,
+                           struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+       struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+       u8 storm_id, i;
+       u32 offset = 0;
+
+       for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+               if (qed_grc_is_storm_included(p_hwfn,
+                                             (enum dbg_storms)storm_id) &&
+                   s_storm_defs[storm_id].has_vfc &&
+                   (storm_id != DBG_PSTORM_ID ||
+                    dev_data->platform_id == PLATFORM_ASIC)) {
+                       /* Read CAM */
+                       offset += qed_grc_dump_vfc_cam(p_hwfn,
+                                                      p_ptt,
+                                                      dump_buf + offset,
+                                                      dump, storm_id);
+
+                       /* Read RAM */
+                       for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
+                               offset += qed_grc_dump_vfc_ram(p_hwfn,
+                                                              p_ptt,
+                                                              dump_buf +
+                                                              offset,
+                                                              dump,
+                                                              storm_id,
+                                                              &s_vfc_ram_defs
+                                                              [i]);
+               }
+       }
+
+       return offset;
+}
+
+/* Dumps GRC RSS data. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
+                           struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+       struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+       u32 offset = 0;
+       u8 rss_mem_id;
+
+       for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
+               struct rss_mem_defs *rss_defs = &s_rss_mem_defs[rss_mem_id];
+               u32 num_entries = rss_defs->num_entries[dev_data->chip_id];
+               u32 entry_width = rss_defs->entry_width[dev_data->chip_id];
+               u32 total_dwords = (num_entries * entry_width) / 32;
+               u32 size = RSS_REG_RSS_RAM_DATA_SIZE;
+               bool packed = (entry_width == 16);
+               u32 rss_addr = rss_defs->addr;
+               u32 i, addr;
+
+               offset += qed_grc_dump_mem_hdr(p_hwfn,
+                                              dump_buf + offset,
+                                              dump,
+                                              rss_defs->mem_name,
+                                              0,
+                                              total_dwords,
+                                              entry_width,
+                                              packed,
+                                              rss_defs->type_name, false, 0);
+
+               if (!dump) {
+                       offset += total_dwords;
+                       continue;
+               }
+
+               /* Dump RSS data */
+               for (i = 0; i < total_dwords;
+                    i += RSS_REG_RSS_RAM_DATA_SIZE, rss_addr++) {
+                       addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
+                       qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
+                               offset += qed_grc_dump_addr_range(p_hwfn,
+                                                                 p_ptt,
+                                                                 dump_buf +
+                                                                 offset,
+                                                                 dump,
+                                                                 addr,
+                                                                 size);
+               }
+       }
+
+       return offset;
+}
+
+/* Dumps GRC Big RAM. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
+                               struct qed_ptt *p_ptt,
+                               u32 *dump_buf, bool dump, u8 big_ram_id)
+{
+       struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+       u32 total_blocks, ram_size, offset = 0, i;
+       char mem_name[12] = "???_BIG_RAM";
+       char type_name[8] = "???_RAM";
+       struct big_ram_defs *big_ram;
+
+       big_ram = &s_big_ram_defs[big_ram_id];
+       total_blocks = big_ram->num_of_blocks[dev_data->chip_id];
+       ram_size = total_blocks * BIG_RAM_BLOCK_SIZE_DWORDS;
+
+       strncpy(type_name, big_ram->instance_name,
+               strlen(big_ram->instance_name));
+       strncpy(mem_name, big_ram->instance_name,
+               strlen(big_ram->instance_name));
+
+       /* Dump memory header */
+       offset += qed_grc_dump_mem_hdr(p_hwfn,
+                                      dump_buf + offset,
+                                      dump,
+                                      mem_name,
+                                      0,
+                                      ram_size,
+                                      BIG_RAM_BLOCK_SIZE_BYTES * 8,
+                                      false, type_name, false, 0);
+
+       if (!dump)
+               return offset + ram_size;
+
+       /* Read and dump Big RAM data */
+       for (i = 0; i < total_blocks / 2; i++) {
+               u32 addr, len;
+
+               qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
+               addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
+               len = 2 * BIG_RAM_BLOCK_SIZE_DWORDS;
+               offset += qed_grc_dump_addr_range(p_hwfn,
+                                                 p_ptt,
+                                                 dump_buf + offset,
+                                                 dump,
+                                                 addr,
+                                                 len);
+       }
+
+       return offset;
+}
+
+static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
+                           struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+       bool block_enable[MAX_BLOCK_ID] = { 0 };
+       u32 offset = 0, addr;
+       bool halted = false;
+
+       /* Halt MCP */
+       if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
+               halted = !qed_mcp_halt(p_hwfn, p_ptt);
+               if (!halted)
+                       DP_NOTICE(p_hwfn, "MCP halt failed!\n");
+       }
+
+       /* Dump MCP scratchpad */
+       offset += qed_grc_dump_mem(p_hwfn,
+                                  p_ptt,
+                                  dump_buf + offset,
+                                  dump,
+                                  NULL,
+                                  BYTES_TO_DWORDS(MCP_REG_SCRATCH),
+                                  MCP_REG_SCRATCH_SIZE,
+                                  0, false, "MCP", false, 0);
+
+       /* Dump MCP cpu_reg_file */
+       offset += qed_grc_dump_mem(p_hwfn,
+                                  p_ptt,
+                                  dump_buf + offset,
+                                  dump,
+                                  NULL,
+                                  BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
+                                  MCP_REG_CPU_REG_FILE_SIZE,
+                                  0, false, "MCP", false, 0);
+
+       /* Dump MCP registers */
+       block_enable[BLOCK_MCP] = true;
+       offset += qed_grc_dump_registers(p_hwfn,
+                                        p_ptt,
+                                        dump_buf + offset,
+                                        dump, block_enable, "block", "MCP");
+
+       /* Dump required non-MCP registers */
+       offset += qed_grc_dump_regs_hdr(dump_buf + offset,
+                                       dump, 1, "eng", -1, "block", "MCP");
+       addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
+       offset += qed_grc_dump_reg_entry(p_hwfn,
+                                        p_ptt,
+                                        dump_buf + offset,
+                                        dump,
+                                        addr,
+                                        1);
+
+       /* Release MCP */
+       if (halted && qed_mcp_resume(p_hwfn, p_ptt))
+               DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
+       return offset;
+}
+
+/* Dumps the tbus indirect memory for all PHYs. */
+static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
+                           struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+       u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
+       char mem_name[32];
+       u8 phy_id;
+
+       for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) {
+               struct phy_defs *phy_defs = &s_phy_defs[phy_id];
+               int printed_chars;
+
+               printed_chars = snprintf(mem_name, sizeof(mem_name), "tbus_%s",
+                                        phy_defs->phy_name);
+               if (printed_chars < 0 || printed_chars >= sizeof(mem_name))
+                       DP_NOTICE(p_hwfn,
+                                 "Unexpected debug error: invalid PHY memory name\n");
+               offset += qed_grc_dump_mem_hdr(p_hwfn,
+                                              dump_buf + offset,
+                                              dump,
+                                              mem_name,
+                                              0,
+                                              PHY_DUMP_SIZE_DWORDS,
+                                              16, true, mem_name, false, 0);
+               if (dump) {
+                       u32 addr_lo_addr = phy_defs->base_addr +
+                                          phy_defs->tbus_addr_lo_addr;
+                       u32 addr_hi_addr = phy_defs->base_addr +
+                                          phy_defs->tbus_addr_hi_addr;
+                       u32 data_lo_addr = phy_defs->base_addr +
+                                          phy_defs->tbus_data_lo_addr;
+                       u32 data_hi_addr = phy_defs->base_addr +
+                                          phy_defs->tbus_data_hi_addr;
+                       u8 *bytes_buf = (u8 *)(dump_buf + offset);
+
+                       for (tbus_hi_offset = 0;
+                            tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
+                            tbus_hi_offset++) {
+                               qed_wr(p_hwfn,
+                                      p_ptt, addr_hi_addr, tbus_hi_offset);
+                               for (tbus_lo_offset = 0; tbus_lo_offset < 256;
+                                    tbus_lo_offset++) {
+                                       qed_wr(p_hwfn,
+                                              p_ptt,
+                                              addr_lo_addr, tbus_lo_offset);
+                                       *(bytes_buf++) =
+                                               (u8)qed_rd(p_hwfn, p_ptt,
+                                                          data_lo_addr);
+                                       *(bytes_buf++) =
+                                               (u8)qed_rd(p_hwfn, p_ptt,
+                                                          data_hi_addr);
+                               }
+                       }
+               }
+
+               offset += PHY_DUMP_SIZE_DWORDS;
+       }
+
+       return offset;
+}
+
+static void qed_config_dbg_line(struct qed_hwfn *p_hwfn,
+                               struct qed_ptt *p_ptt,
+                               enum block_id block_id,
+                               u8 line_id,
+                               u8 cycle_en,
+                               u8 right_shift, u8 force_valid, u8 force_frame)
+{
+       struct block_defs *p_block_defs = s_block_defs[block_id];
+
+       qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_select_addr, line_id);
+       qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_cycle_enable_addr, cycle_en);
+       qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_shift_addr, right_shift);
+       qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_force_valid_addr, force_valid);
+       qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_force_frame_addr, force_frame);
+}
+
+/* Dumps Static Debug data. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
+                                    struct qed_ptt *p_ptt,
+                                    u32 *dump_buf, bool dump)
+{
+       u32 block_dwords = NUM_DBG_BUS_LINES * STATIC_DEBUG_LINE_DWORDS;
+       struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+       u32 offset = 0, block_id, line_id;
+       struct block_defs *p_block_defs;
+
+       if (dump) {
+               DP_VERBOSE(p_hwfn,
+                          QED_MSG_DEBUG, "Dumping static debug data...\n");
+
+               /* Disable all blocks debug output */
+               for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
+                       p_block_defs = s_block_defs[block_id];
+
+                       if (p_block_defs->has_dbg_bus[dev_data->chip_id])
+                               qed_wr(p_hwfn, p_ptt,
+                                      p_block_defs->dbg_cycle_enable_addr, 0);
+               }
+
+               qed_bus_reset_dbg_block(p_hwfn, p_ptt);
+               qed_bus_set_framing_mode(p_hwfn,
+                                        p_ptt, DBG_BUS_FRAME_MODE_8HW_0ST);
+               qed_wr(p_hwfn,
+                      p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
+               qed_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
+               qed_bus_enable_dbg_block(p_hwfn, p_ptt, true);
+       }
+
+       /* Dump all static debug lines for each relevant block */
+       for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
+               p_block_defs = s_block_defs[block_id];
+
+               if (!p_block_defs->has_dbg_bus[dev_data->chip_id])
+                       continue;
+
+               /* Dump static section params */
+               offset += qed_grc_dump_mem_hdr(p_hwfn,
+                                              dump_buf + offset,
+                                              dump,
+                                              p_block_defs->name, 0,
+                                              block_dwords, 32, false,
+                                              "STATIC", false, 0);
+
+               if (dump && !dev_data->block_in_reset[block_id]) {
+                       u8 dbg_client_id =
+                               p_block_defs->dbg_client_id[dev_data->chip_id];
+                       u32 addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
+                       u32 len = STATIC_DEBUG_LINE_DWORDS;
+
+                       /* Enable block's client */
+                       qed_bus_enable_clients(p_hwfn, p_ptt,
+                                              BIT(dbg_client_id));
+
+                       for (line_id = 0; line_id < NUM_DBG_BUS_LINES;
+                            line_id++) {
+                               /* Configure debug line ID */
+                               qed_config_dbg_line(p_hwfn,
+                                                   p_ptt,
+                                                   (enum block_id)block_id,
+                                                   (u8)line_id,
+                                                   0xf, 0, 0, 0);
+
+                               /* Read debug line info */
+                               offset +=
+                                   qed_grc_dump_addr_range(p_hwfn,
+                                                           p_ptt,
+                                                           dump_buf + offset,
+                                                           dump,
+                                                           addr,
+                                                           len);
+                       }
+
+                       /* Disable block's client and debug output */
+                       qed_bus_enable_clients(p_hwfn, p_ptt, 0);
+                       qed_wr(p_hwfn, p_ptt,
+                              p_block_defs->dbg_cycle_enable_addr, 0);
+               } else {
+                       /* All lines are invalid - dump zeros */
+                       if (dump)
+                               memset(dump_buf + offset, 0,
+                                      DWORDS_TO_BYTES(block_dwords));
+                       offset += block_dwords;
+               }
+       }
+
+       if (dump) {
+               qed_bus_enable_dbg_block(p_hwfn, p_ptt, false);
+               qed_bus_enable_clients(p_hwfn, p_ptt, 0);
+       }
+
+       return offset;
+}
+
+/* Performs GRC Dump to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
+                                   struct qed_ptt *p_ptt,
+                                   u32 *dump_buf,
+                                   bool dump, u32 *num_dumped_dwords)
+{
+       struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+       bool parities_masked = false;
+       u8 i, port_mode = 0;
+       u32 offset = 0;
+
+       *num_dumped_dwords = 0;
+
+       /* Find port mode */
+       if (dump) {
+               switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
+               case 0:
+                       port_mode = 1;
+                       break;
+               case 1:
+                       port_mode = 2;
+                       break;
+               case 2:
+                       port_mode = 4;
+                       break;
+               }
+       }
+
+       /* Update reset state */
+       if (dump)
+               qed_update_blocks_reset_state(p_hwfn, p_ptt);
+
+       /* Dump global params */
+       offset += qed_dump_common_global_params(p_hwfn,
+                                               p_ptt,
+                                               dump_buf + offset, dump, 4);
+       offset += qed_dump_str_param(dump_buf + offset,
+                                    dump, "dump-type", "grc-dump");
+       offset += qed_dump_num_param(dump_buf + offset,
+                                    dump,
+                                    "num-lcids",
+                                    qed_grc_get_param(p_hwfn,
+                                               DBG_GRC_PARAM_NUM_LCIDS));
+       offset += qed_dump_num_param(dump_buf + offset,
+                                    dump,
+                                    "num-ltids",
+                                    qed_grc_get_param(p_hwfn,
+                                               DBG_GRC_PARAM_NUM_LTIDS));
+       offset += qed_dump_num_param(dump_buf + offset,
+                                    dump, "num-ports", port_mode);
+
+       /* Dump reset registers (dumped before taking blocks out of reset ) */
+       if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
+               offset += qed_grc_dump_reset_regs(p_hwfn,
+                                                 p_ptt,
+                                                 dump_buf + offset, dump);
+
+       /* Take all blocks out of reset (using reset registers) */
+       if (dump) {
+               qed_grc_unreset_blocks(p_hwfn, p_ptt);
+               qed_update_blocks_reset_state(p_hwfn, p_ptt);
+       }
+
+       /* Disable all parities using MFW command */
+       if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
+               parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
+               if (!parities_masked) {
+                       DP_NOTICE(p_hwfn,
+                                 "Failed to mask parities using MFW\n");
+                       if (qed_grc_get_param
+                           (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
+                               return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
+               }
+       }
+
+       /* Dump modified registers (dumped before modifying them) */
+       if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
+               offset += qed_grc_dump_modified_regs(p_hwfn,
+                                                    p_ptt,
+                                                    dump_buf + offset, dump);
+
+       /* Stall storms */
+       if (dump &&
+           (qed_grc_is_included(p_hwfn,
+                                DBG_GRC_PARAM_DUMP_IOR) ||
+            qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
+               qed_grc_stall_storms(p_hwfn, p_ptt, true);
+
+       /* Dump all regs  */
+       if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
+               /* Dump all blocks except MCP */
+               bool block_enable[MAX_BLOCK_ID];
+
+               for (i = 0; i < MAX_BLOCK_ID; i++)
+                       block_enable[i] = true;
+               block_enable[BLOCK_MCP] = false;
+               offset += qed_grc_dump_registers(p_hwfn,
+                                                p_ptt,
+                                                dump_buf +
+                                                offset,
+                                                dump,
+                                                block_enable, NULL, NULL);
+
+               /* Dump special registers */
+               offset += qed_grc_dump_special_regs(p_hwfn,
+                                                   p_ptt,
+                                                   dump_buf + offset, dump);
+       }
+
+       /* Dump memories */
+       offset += qed_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
+
+       /* Dump MCP */
+       if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
+               offset += qed_grc_dump_mcp(p_hwfn,
+                                          p_ptt, dump_buf + offset, dump);
+
+       /* Dump context */
+       if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
+               offset += qed_grc_dump_ctx(p_hwfn,
+                                          p_ptt, dump_buf + offset, dump);
+
+       /* Dump RSS memories */
+       if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
+               offset += qed_grc_dump_rss(p_hwfn,
+                                          p_ptt, dump_buf + offset, dump);
+
+       /* Dump Big RAM */
+       for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
+               if (qed_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
+                       offset += qed_grc_dump_big_ram(p_hwfn,
+                                                      p_ptt,
+                                                      dump_buf + offset,
+                                                      dump, i);
+
+       /* Dump IORs */
+       if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR))
+               offset += qed_grc_dump_iors(p_hwfn,
+                                           p_ptt, dump_buf + offset, dump);
+
+       /* Dump VFC */
+       if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC))
+               offset += qed_grc_dump_vfc(p_hwfn,
+                                          p_ptt, dump_buf + offset, dump);
+
+       /* Dump PHY tbus */
+       if (qed_grc_is_included(p_hwfn,
+                               DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id ==
+           CHIP_K2 && dev_data->platform_id == PLATFORM_ASIC)
+               offset += qed_grc_dump_phy(p_hwfn,
+                                          p_ptt, dump_buf + offset, dump);
+
+       /* Dump static debug data  */
+       if (qed_grc_is_included(p_hwfn,
+                               DBG_GRC_PARAM_DUMP_STATIC) &&
+           dev_data->bus.state == DBG_BUS_STATE_IDLE)
+               offset += qed_grc_dump_static_debug(p_hwfn,
+                                                   p_ptt,
+                                                   dump_buf + offset, dump);
+
+       /* Dump last section */
+       offset += qed_dump_last_section(dump_buf, offset, dump);
+       if (dump) {
+               /* Unstall storms */
+               if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
+                       qed_grc_stall_storms(p_hwfn, p_ptt, false);
+
+               /* Clear parity status */
+               qed_grc_clear_all_prty(p_hwfn, p_ptt);
+
+               /* Enable all parities using MFW command */
+               if (parities_masked)
+                       qed_mcp_mask_parities(p_hwfn, p_ptt, 0);
+       }
+
+       *num_dumped_dwords = offset;
+
+       return DBG_STATUS_OK;
+}
+
+/* Writes the specified failing Idle Check rule to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
+                                    struct qed_ptt *p_ptt,
+                                    u32 *
+                                    dump_buf,
+                                    bool dump,
+                                    u16 rule_id,
+                                    const struct dbg_idle_chk_rule *rule,
+                                    u16 fail_entry_id, u32 *cond_reg_values)
+{
+       const union dbg_idle_chk_reg *regs = &((const union dbg_idle_chk_reg *)
+                                              s_dbg_arrays
+                                              [BIN_BUF_DBG_IDLE_CHK_REGS].
+                                              ptr)[rule->reg_offset];
+       const struct dbg_idle_chk_cond_reg *cond_regs = &regs[0].cond_reg;
+       struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+       struct dbg_idle_chk_result_hdr *hdr =
+               (struct dbg_idle_chk_result_hdr *)dump_buf;
+       const struct dbg_idle_chk_info_reg *info_regs =
+               &regs[rule->num_cond_regs].info_reg;
+       u32 next_reg_offset = 0, i, offset = 0;
+       u8 reg_id;
+
+       /* Dump rule data */
+       if (dump) {
+               memset(hdr, 0, sizeof(*hdr));
+               hdr->rule_id = rule_id;
+               hdr->mem_entry_id = fail_entry_id;
+               hdr->severity = rule->severity;
+               hdr->num_dumped_cond_regs = rule->num_cond_regs;
+       }
+
+       offset += IDLE_CHK_RESULT_HDR_DWORDS;
+
+       /* Dump condition register values */
+       for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
+               const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
+
+               /* Write register header */
+               if (dump) {
+                       struct dbg_idle_chk_result_reg_hdr *reg_hdr =
+                           (struct dbg_idle_chk_result_reg_hdr *)(dump_buf
+                                                                  + offset);
+                       offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
+                       memset(reg_hdr, 0,
+                              sizeof(struct dbg_idle_chk_result_reg_hdr));
+                       reg_hdr->start_entry = reg->start_entry;
+                       reg_hdr->size = reg->entry_size;
+                       SET_FIELD(reg_hdr->data,
+                                 DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM,
+                                 reg->num_entries > 1 || reg->start_entry > 0
+                                 ? 1 : 0);
+                       SET_FIELD(reg_hdr->data,
+                                 DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
+
+                       /* Write register values */
+                       for (i = 0; i < reg_hdr->size;
+                            i++, next_reg_offset++, offset++)
+                               dump_buf[offset] =
+                                   cond_reg_values[next_reg_offset];
+               } else {
+                       offset += IDLE_CHK_RESULT_REG_HDR_DWORDS +
+                           reg->entry_size;
+               }
+       }
+
+       /* Dump info register values */
+       for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
+               const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
+               u32 block_id;
+
+               if (!dump) {
+                       offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
+                       continue;
+               }
+
+               /* Check if register's block is in reset */
+               block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
+               if (block_id >= MAX_BLOCK_ID) {
+                       DP_NOTICE(p_hwfn, "Invalid block_id\n");
+                       return 0;
+               }
+
+               if (!dev_data->block_in_reset[block_id]) {
+                       bool eval_mode = GET_FIELD(reg->mode.data,
+                                                  DBG_MODE_HDR_EVAL_MODE) > 0;
+                       bool mode_match = true;
+
+                       /* Check mode */
+                       if (eval_mode) {
+                               u16 modes_buf_offset =
+                                       GET_FIELD(reg->mode.data,
+                                               DBG_MODE_HDR_MODES_BUF_OFFSET);
+                               mode_match =
+                                       qed_is_mode_match(p_hwfn,
+                                                         &modes_buf_offset);
+                       }
+
+                       if (mode_match) {
+                               u32 addr =
+                                   GET_FIELD(reg->data,
+                                             DBG_IDLE_CHK_INFO_REG_ADDRESS);
+
+                               /* Write register header */
+                               struct dbg_idle_chk_result_reg_hdr *reg_hdr =
+                                       (struct dbg_idle_chk_result_reg_hdr *)
+                                       (dump_buf + offset);
+
+                               offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
+                               hdr->num_dumped_info_regs++;
+                               memset(reg_hdr, 0, sizeof(*reg_hdr));
+                               reg_hdr->size = reg->size;
+                               SET_FIELD(reg_hdr->data,
+                                         DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
+                                         rule->num_cond_regs + reg_id);
+
+                               /* Write register values */
+                               offset +=
+                                   qed_grc_dump_addr_range(p_hwfn,
+                                                           p_ptt,
+                                                           dump_buf + offset,
+                                                           dump,
+                                                           addr,
+                                                           reg->size);
+                       }
+               }
+       }
+
+       return offset;
+}
+
+/* Dumps idle check rule entries. Returns the dumped size in dwords. */
+static u32
+qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+                              u32 *dump_buf, bool dump,
+                              const struct dbg_idle_chk_rule *input_rules,
+                              u32 num_input_rules, u32 *num_failing_rules)
+{
+       struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+       u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
+       u32 i, offset = 0;
+       u16 entry_id;
+       u8 reg_id;
+
+       *num_failing_rules = 0;
+       for (i = 0; i < num_input_rules; i++) {
+               const struct dbg_idle_chk_cond_reg *cond_regs;
+               const struct dbg_idle_chk_rule *rule;
+               const union dbg_idle_chk_reg *regs;
+               u16 num_reg_entries = 1;
+               bool check_rule = true;
+               const u32 *imm_values;
+
+               rule = &input_rules[i];
+               regs = &((const union dbg_idle_chk_reg *)
+                        s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)
+                       [rule->reg_offset];
+               cond_regs = &regs[0].cond_reg;
+               imm_values = &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr
+                            [rule->imm_offset];
+
+               /* Check if all condition register blocks are out of reset, and
+                * find maximal number of entries (all condition registers that
+                * are memories must have the same size, which is > 1).
+                */
+               for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule;
+                    reg_id++) {
+                       u32 block_id = GET_FIELD(cond_regs[reg_id].data,
+                                               DBG_IDLE_CHK_COND_REG_BLOCK_ID);
+
+                       if (block_id >= MAX_BLOCK_ID) {
+                               DP_NOTICE(p_hwfn, "Invalid block_id\n");
+                               return 0;
+                       }
+
+                       check_rule = !dev_data->block_in_reset[block_id];
+                       if (cond_regs[reg_id].num_entries > num_reg_entries)
+                               num_reg_entries = cond_regs[reg_id].num_entries;
+               }
+
+               if (!check_rule && dump)
+                       continue;
+
+               if (!dump) {
+                       u32 entry_dump_size =
+                               qed_idle_chk_dump_failure(p_hwfn,
+                                                         p_ptt,
+                                                         dump_buf + offset,
+                                                         false,
+                                                         rule->rule_id,
+                                                         rule,
+                                                         0,
+                                                         NULL);
+
+                       offset += num_reg_entries * entry_dump_size;
+                       (*num_failing_rules) += num_reg_entries;
+                       continue;
+               }
+
+               /* Go over all register entries (number of entries is the same
+                * for all condition registers).
+                */
+               for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
+                       /* Read current entry of all condition registers */
+                       u32 next_reg_offset = 0;
+
+                       for (reg_id = 0; reg_id < rule->num_cond_regs;
+                            reg_id++) {
+                               const struct dbg_idle_chk_cond_reg *reg =
+                                       &cond_regs[reg_id];
+
+                               /* Find GRC address (if it's a memory,the
+                                * address of the specific entry is calculated).
+                                */
+                               u32 addr =
+                                   GET_FIELD(reg->data,
+                                             DBG_IDLE_CHK_COND_REG_ADDRESS);
+
+                               if (reg->num_entries > 1 ||
+                                   reg->start_entry > 0) {
+                                       u32 padded_entry_size =
+                                          reg->entry_size > 1 ?
+                                          roundup_pow_of_two(reg->entry_size) :
+                                          1;
+
+                                       addr += (reg->start_entry + entry_id) *
+                                               padded_entry_size;
+                               }
+
+                               /* Read registers */
+                               if (next_reg_offset + reg->entry_size >=
+                                   IDLE_CHK_MAX_ENTRIES_SIZE) {
+                                       DP_NOTICE(p_hwfn,
+                                                 "idle check registers entry is too large\n");
+                                       return 0;
+                               }
+
+                               next_reg_offset +=
+                                   qed_grc_dump_addr_range(p_hwfn,
+                                                           p_ptt,
+                                                           cond_reg_values +
+                                                           next_reg_offset,
+                                                           dump, addr,
+                                                           reg->entry_size);
+                       }
+
+                       /* Call rule's condition function - a return value of
+                        * true indicates failure.
+                        */
+                       if ((*cond_arr[rule->cond_id])(cond_reg_values,
+                                                      imm_values)) {
+                               offset +=
+                                   qed_idle_chk_dump_failure(p_hwfn,
+                                                             p_ptt,
+                                                             dump_buf + offset,
+                                                             dump,
+                                                             rule->rule_id,
+                                                             rule,
+                                                             entry_id,
+                                                             cond_reg_values);
+                               (*num_failing_rules)++;
+                               break;
+                       }
+               }
+       }
+
+       return offset;
+}
+
+/* Performs Idle Check Dump to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
+                            struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+       u32 offset = 0, input_offset = 0, num_failing_rules = 0;
+       u32 num_failing_rules_offset;
+
+       /* Dump global params */
+       offset += qed_dump_common_global_params(p_hwfn,
+                                               p_ptt,
+                                               dump_buf + offset, dump, 1);
+       offset += qed_dump_str_param(dump_buf + offset,
+                                    dump, "dump-type", "idle-chk");
+
+       /* Dump idle check section header with a single parameter */
+       offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
+       num_failing_rules_offset = offset;
+       offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
+       while (input_offset <
+              s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) {
+               const struct dbg_idle_chk_cond_hdr *cond_hdr =
+                       (const struct dbg_idle_chk_cond_hdr *)
+                       &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr
+                       [input_offset++];
+               bool eval_mode = GET_FIELD(cond_hdr->mode.data,
+                                          DBG_MODE_HDR_EVAL_MODE) > 0;
+               bool mode_match = true;
+
+               /* Check mode */
+               if (eval_mode) {
+                       u16 modes_buf_offset =
+                               GET_FIELD(cond_hdr->mode.data,
+                                         DBG_MODE_HDR_MODES_BUF_OFFSET);
+
+                       mode_match = qed_is_mode_match(p_hwfn,
+                                                      &modes_buf_offset);
+               }
+
+               if (mode_match) {
+                       u32 curr_failing_rules;
+
+                       offset +=
+                           qed_idle_chk_dump_rule_entries(p_hwfn,
+                               p_ptt,
+                               dump_buf + offset,
+                               dump,
+                               (const struct dbg_idle_chk_rule *)
+                               &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].
+                               ptr[input_offset],
+                               cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS,
+                               &curr_failing_rules);
+                       num_failing_rules += curr_failing_rules;
+               }
+
+               input_offset += cond_hdr->data_size;
+       }
+
+       /* Overwrite num_rules parameter */
+       if (dump)
+               qed_dump_num_param(dump_buf + num_failing_rules_offset,
+                                  dump, "num_rules", num_failing_rules);
+
+       return offset;
+}
+
+/* Finds the meta data image in NVRAM. */
+static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
+                                           struct qed_ptt *p_ptt,
+                                           u32 image_type,
+                                           u32 *nvram_offset_bytes,
+                                           u32 *nvram_size_bytes)
+{
+       u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
+       struct mcp_file_att file_att;
+
+       /* Call NVRAM get file command */
+       int nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
+                                           p_ptt,
+                                           DRV_MSG_CODE_NVM_GET_FILE_ATT,
+                                           image_type,
+                                           &ret_mcp_resp,
+                                           &ret_mcp_param,
+                                           &ret_txn_size,
+                                           (u32 *)&file_att);
+
+       /* Check response */
+       if (nvm_result ||
+           (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
+               return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
+
+       /* Update return values */
+       *nvram_offset_bytes = file_att.nvm_start_addr;
+       *nvram_size_bytes = file_att.len;
+       DP_VERBOSE(p_hwfn,
+                  QED_MSG_DEBUG,
+                  "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
+                  image_type, *nvram_offset_bytes, *nvram_size_bytes);
+
+       /* Check alignment */
+       if (*nvram_size_bytes & 0x3)
+               return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
+       return DBG_STATUS_OK;
+}
+
+static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
+                                     struct qed_ptt *p_ptt,
+                                     u32 nvram_offset_bytes,
+                                     u32 nvram_size_bytes, u32 *ret_buf)
+{
+       u32 ret_mcp_resp, ret_mcp_param, ret_read_size;
+       u32 bytes_to_copy, read_offset = 0;
+       s32 bytes_left = nvram_size_bytes;
+
+       DP_VERBOSE(p_hwfn,
+                  QED_MSG_DEBUG,
+                  "nvram_read: reading image of size %d bytes from NVRAM\n",
+                  nvram_size_bytes);
+       do {
+               bytes_to_copy =
+                   (bytes_left >
+                    MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
+
+               /* Call NVRAM read command */
+               if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+                                      DRV_MSG_CODE_NVM_READ_NVRAM,
+                                      (nvram_offset_bytes +
+                                       read_offset) |
+                                      (bytes_to_copy <<
+                                       DRV_MB_PARAM_NVM_LEN_SHIFT),
+                                      &ret_mcp_resp, &ret_mcp_param,
+                                      &ret_read_size,
+                                      (u32 *)((u8 *)ret_buf +
+                                              read_offset)) != 0)
+                       return DBG_STATUS_NVRAM_READ_FAILED;
+
+               /* Check response */
+               if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
+                       return DBG_STATUS_NVRAM_READ_FAILED;
+
+               /* Update read offset */
+               read_offset += ret_read_size;
+               bytes_left -= ret_read_size;
+       } while (bytes_left > 0);
+
+       return DBG_STATUS_OK;
+}
+
+/* Get info on the MCP Trace data in the scratchpad:
+ * - trace_data_grc_addr - the GRC address of the trace data
+ * - trace_data_size_bytes - the size in bytes of the MCP Trace data (without
+ *     the header)
+ */
+static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn,
+                                                  struct qed_ptt *p_ptt,
+                                                  u32 *trace_data_grc_addr,
+                                                  u32 *trace_data_size_bytes)
+{
+       /* Read MCP trace section offsize structure from MCP scratchpad */
+       u32 spad_trace_offsize = qed_rd(p_hwfn,
+                                       p_ptt,
+                                       MCP_SPAD_TRACE_OFFSIZE_ADDR);
+       u32 signature;
+
+       /* Extract MCP trace section GRC address from offsize structure (within
+        * scratchpad).
+        */
+       *trace_data_grc_addr =
+               MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
+
+       /* Read signature from MCP trace section */
+       signature = qed_rd(p_hwfn, p_ptt,
+                          *trace_data_grc_addr +
+                          offsetof(struct mcp_trace, signature));
+       if (signature != MFW_TRACE_SIGNATURE)
+               return DBG_STATUS_INVALID_TRACE_SIGNATURE;
+
+       /* Read trace size from MCP trace section */
+       *trace_data_size_bytes = qed_rd(p_hwfn,
+                                       p_ptt,
+                                       *trace_data_grc_addr +
+                                       offsetof(struct mcp_trace, size));
+       return DBG_STATUS_OK;
+}
+
+/* Reads MCP trace meta data image from NVRAM.
+ * - running_bundle_id (OUT) - the running bundle ID (invalid when loaded from
+ *     file)
+ * - trace_meta_offset_bytes (OUT) - the NVRAM offset in bytes in which the MCP
+ *     Trace meta data starts (invalid when loaded from file)
+ * - trace_meta_size_bytes (OUT) - the size in bytes of the MCP Trace meta data
+ */
+static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
+                                                  struct qed_ptt *p_ptt,
+                                                  u32 trace_data_size_bytes,
+                                                  u32 *running_bundle_id,
+                                                  u32 *trace_meta_offset_bytes,
+                                                  u32 *trace_meta_size_bytes)
+{
+       /* Read MCP trace section offsize structure from MCP scratchpad */
+       u32 spad_trace_offsize = qed_rd(p_hwfn,
+                                       p_ptt,
+                                       MCP_SPAD_TRACE_OFFSIZE_ADDR);
+
+       /* Find running bundle ID */
+       u32 running_mfw_addr =
+               MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
+               QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
+       u32 nvram_image_type;
+
+       *running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
+       if (*running_bundle_id > 1)
+               return DBG_STATUS_INVALID_NVRAM_BUNDLE;
+
+       /* Find image in NVRAM */
+       nvram_image_type =
+           (*running_bundle_id ==
+            DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
+
+       return qed_find_nvram_image(p_hwfn,
+                                   p_ptt,
+                                   nvram_image_type,
+                                   trace_meta_offset_bytes,
+                                   trace_meta_size_bytes);
+}
+
+/* Reads the MCP Trace meta data (from NVRAM or buffer) into the specified
+ * buffer.
+ */
+static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
+                                              struct qed_ptt *p_ptt,
+                                              u32 nvram_offset_in_bytes,
+                                              u32 size_in_bytes, u32 *buf)
+{
+       u8 *byte_buf = (u8 *)buf;
+       u8 modules_num, i;
+       u32 signature;
+
+       /* Read meta data from NVRAM */
+       enum dbg_status status = qed_nvram_read(p_hwfn,
+                                               p_ptt,
+                                               nvram_offset_in_bytes,
+                                               size_in_bytes,
+                                               buf);
+
+       if (status != DBG_STATUS_OK)
+               return status;
+
+       /* Extract and check first signature */
+       signature = qed_read_unaligned_dword(byte_buf);
+       byte_buf += sizeof(u32);
+       if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
+               return DBG_STATUS_INVALID_TRACE_SIGNATURE;
+
+       /* Extract number of modules */
+       modules_num = *(byte_buf++);
+
+       /* Skip all modules */
+       for (i = 0; i < modules_num; i++) {
+               u8 module_len = *(byte_buf++);
+
+               byte_buf += module_len;
+       }
+
+       /* Extract and check second signature */
+       signature = qed_read_unaligned_dword(byte_buf);
+       byte_buf += sizeof(u32);
+       if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
+               return DBG_STATUS_INVALID_TRACE_SIGNATURE;
+       return DBG_STATUS_OK;
+}
+
+/* Dump MCP Trace */
+static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
+                                         struct qed_ptt *p_ptt,
+                                         u32 *dump_buf,
+                                         bool dump, u32 *num_dumped_dwords)
+{
+       u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
+       u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0;
+       u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0;
+       enum dbg_status status;
+       bool mcp_access;
+       int halted = 0;
+
+       mcp_access = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
+
+       *num_dumped_dwords = 0;
+
+       /* Get trace data info */
+       status = qed_mcp_trace_get_data_info(p_hwfn,
+                                            p_ptt,
+                                            &trace_data_grc_addr,
+                                            &trace_data_size_bytes);
+       if (status != DBG_STATUS_OK)
+               return status;
+
+       /* Dump global params */
+       offset += qed_dump_common_global_params(p_hwfn,
+                                               p_ptt,
+                                               dump_buf + offset, dump, 1);
+       offset += qed_dump_str_param(dump_buf + offset,
+                                    dump, "dump-type", "mcp-trace");
+
+       /* Halt MCP while reading from scratchpad so the read data will be
+        * consistent if halt fails, MCP trace is taken anyway, with a small
+        * risk that it may be corrupt.
+        */
+       if (dump && mcp_access) {
+               halted = !qed_mcp_halt(p_hwfn, p_ptt);
+               if (!halted)
+                       DP_NOTICE(p_hwfn, "MCP halt failed!\n");
+       }
+
+       /* Find trace data size */
+       trace_data_size_dwords =
+               DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace),
+                            BYTES_IN_DWORD);
+
+       /* Dump trace data section header and param */
+       offset += qed_dump_section_hdr(dump_buf + offset,
+                                      dump, "mcp_trace_data", 1);
+       offset += qed_dump_num_param(dump_buf + offset,
+                                    dump, "size", trace_data_size_dwords);
+
+       /* Read trace data from scratchpad into dump buffer */
+       offset += qed_grc_dump_addr_range(p_hwfn,
+                                         p_ptt,
+                                         dump_buf + offset,
+                                         dump,
+                                         BYTES_TO_DWORDS(trace_data_grc_addr),
+                                         trace_data_size_dwords);
+
+       /* Resume MCP (only if halt succeeded) */
+       if (halted && qed_mcp_resume(p_hwfn, p_ptt) != 0)
+               DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
+
+       /* Dump trace meta section header */
+       offset += qed_dump_section_hdr(dump_buf + offset,
+                                      dump, "mcp_trace_meta", 1);
+
+       /* Read trace meta info */
+       if (mcp_access) {
+               status = qed_mcp_trace_get_meta_info(p_hwfn,
+                                                    p_ptt,
+                                                    trace_data_size_bytes,
+                                                    &running_bundle_id,
+                                                    &trace_meta_offset_bytes,
+                                                    &trace_meta_size_bytes);
+               if (status == DBG_STATUS_OK)
+                       trace_meta_size_dwords =
+                               BYTES_TO_DWORDS(trace_meta_size_bytes);
+       }
+
+       /* Dump trace meta size param */
+       offset += qed_dump_num_param(dump_buf + offset,
+                                    dump, "size", trace_meta_size_dwords);
+
+       /* Read trace meta image into dump buffer */
+       if (dump && trace_meta_size_dwords)
+               status = qed_mcp_trace_read_meta(p_hwfn,
+                                                p_ptt,
+                                                trace_meta_offset_bytes,
+                                                trace_meta_size_bytes,
+                                                dump_buf + offset);
+       if (status == DBG_STATUS_OK)
+               offset += trace_meta_size_dwords;
+
+       *num_dumped_dwords = offset;
+
+       /* If no mcp access, indicate that the dump doesn't contain the meta
+        * data from NVRAM.
+        */
+       return mcp_access ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
+}
+
+/* Dump GRC FIFO */
+static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
+                                        struct qed_ptt *p_ptt,
+                                        u32 *dump_buf,
+                                        bool dump, u32 *num_dumped_dwords)
+{
+       u32 offset = 0, dwords_read, size_param_offset;
+       bool fifo_has_data;
+
+       *num_dumped_dwords = 0;
+
+       /* Dump global params */
+       offset += qed_dump_common_global_params(p_hwfn,
+                                               p_ptt,
+                                               dump_buf + offset, dump, 1);
+       offset += qed_dump_str_param(dump_buf + offset,
+                                    dump, "dump-type", "reg-fifo");
+
+       /* Dump fifo data section header and param. The size param is 0 for now,
+        * and is overwritten after reading the FIFO.
+        */
+       offset += qed_dump_section_hdr(dump_buf + offset,
+                                      dump, "reg_fifo_data", 1);
+       size_param_offset = offset;
+       offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
+
+       if (!dump) {
+               /* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
+                * test how much data is available, except for reading it.
+                */
+               offset += REG_FIFO_DEPTH_DWORDS;
+               *num_dumped_dwords = offset;
+               return DBG_STATUS_OK;
+       }
+
+       fifo_has_data = qed_rd(p_hwfn, p_ptt,
+                              GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
+
+       /* Pull available data from fifo. Use DMAE since this is widebus memory
+        * and must be accessed atomically. Test for dwords_read not passing
+        * buffer size since more entries could be added to the buffer as we are
+        * emptying it.
+        */
+       for (dwords_read = 0;
+            fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS;
+            dwords_read += REG_FIFO_ELEMENT_DWORDS, offset +=
+            REG_FIFO_ELEMENT_DWORDS) {
+               if (qed_dmae_grc2host(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO,
+                                     (u64)(uintptr_t)(&dump_buf[offset]),
+                                     REG_FIFO_ELEMENT_DWORDS, 0))
+                       return DBG_STATUS_DMAE_FAILED;
+               fifo_has_data = qed_rd(p_hwfn, p_ptt,
+                                      GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
+       }
+
+       qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
+                          dwords_read);
+
+       *num_dumped_dwords = offset;
+       return DBG_STATUS_OK;
+}
+
+/* Dump IGU FIFO */
+static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
+                                        struct qed_ptt *p_ptt,
+                                        u32 *dump_buf,
+                                        bool dump, u32 *num_dumped_dwords)
+{
+       u32 offset = 0, dwords_read, size_param_offset;
+       bool fifo_has_data;
+
+       *num_dumped_dwords = 0;
+
+       /* Dump global params */
+       offset += qed_dump_common_global_params(p_hwfn,
+                                               p_ptt,
+                                               dump_buf + offset, dump, 1);
+       offset += qed_dump_str_param(dump_buf + offset,
+                                    dump, "dump-type", "igu-fifo");
+
+       /* Dump fifo data section header and param. The size param is 0 for now,
+        * and is overwritten after reading the FIFO.
+        */
+       offset += qed_dump_section_hdr(dump_buf + offset,
+                                      dump, "igu_fifo_data", 1);
+       size_param_offset = offset;
+       offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
+
+       if (!dump) {
+               /* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
+                * test how much data is available, except for reading it.
+                */
+               offset += IGU_FIFO_DEPTH_DWORDS;
+               *num_dumped_dwords = offset;
+               return DBG_STATUS_OK;
+       }
+
+       fifo_has_data = qed_rd(p_hwfn, p_ptt,
+                              IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
+
+       /* Pull available data from fifo. Use DMAE since this is widebus memory
+        * and must be accessed atomically. Test for dwords_read not passing
+        * buffer size since more entries could be added to the buffer as we are
+        * emptying it.
+        */
+       for (dwords_read = 0;
+            fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
+            dwords_read += IGU_FIFO_ELEMENT_DWORDS, offset +=
+            IGU_FIFO_ELEMENT_DWORDS) {
+               if (qed_dmae_grc2host(p_hwfn, p_ptt,
+                                     IGU_REG_ERROR_HANDLING_MEMORY,
+                                     (u64)(uintptr_t)(&dump_buf[offset]),
+                                     IGU_FIFO_ELEMENT_DWORDS, 0))
+                       return DBG_STATUS_DMAE_FAILED;
+               fifo_has_data = qed_rd(p_hwfn, p_ptt,
+                                      IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
+       }
+
+       qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
+                          dwords_read);
+
+       *num_dumped_dwords = offset;
+       return DBG_STATUS_OK;
+}
+
+/* Protection Override dump */
+static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
+                                                   struct qed_ptt *p_ptt,
+                                                   u32 *dump_buf,
+                                                   bool dump,
+                                                   u32 *num_dumped_dwords)
+{
+       u32 offset = 0, size_param_offset, override_window_dwords;
+
+       *num_dumped_dwords = 0;
+
+       /* Dump global params */
+       offset += qed_dump_common_global_params(p_hwfn,
+                                               p_ptt,
+                                               dump_buf + offset, dump, 1);
+       offset += qed_dump_str_param(dump_buf + offset,
+                                    dump, "dump-type", "protection-override");
+
+       /* Dump data section header and param. The size param is 0 for now, and
+        * is overwritten after reading the data.
+        */
+       offset += qed_dump_section_hdr(dump_buf + offset,
+                                      dump, "protection_override_data", 1);
+       size_param_offset = offset;
+       offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
+
+       if (!dump) {
+               offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
+               *num_dumped_dwords = offset;
+               return DBG_STATUS_OK;
+       }
+
+       /* Add override window info to buffer */
+       override_window_dwords =
+               qed_rd(p_hwfn, p_ptt,
+                      GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
+                      PROTECTION_OVERRIDE_ELEMENT_DWORDS;
+       if (qed_dmae_grc2host(p_hwfn, p_ptt,
+                             GRC_REG_PROTECTION_OVERRIDE_WINDOW,
+                             (u64)(uintptr_t)(dump_buf + offset),
+                             override_window_dwords, 0))
+               return DBG_STATUS_DMAE_FAILED;
+       offset += override_window_dwords;
+       qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
+                          override_window_dwords);
+
+       *num_dumped_dwords = offset;
+       return DBG_STATUS_OK;
+}
+
+/* Performs FW Asserts Dump to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
+                              struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+       struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+       struct fw_asserts_ram_section *asserts;
+       char storm_letter_str[2] = "?";
+       struct fw_info fw_info;
+       u32 offset = 0;
+       u8 storm_id;
+
+       /* Dump global params */
+       offset += qed_dump_common_global_params(p_hwfn,
+                                               p_ptt,
+                                               dump_buf + offset, dump, 1);
+       offset += qed_dump_str_param(dump_buf + offset,
+                                    dump, "dump-type", "fw-asserts");
+       for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+               u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx;
+               u32 last_list_idx, addr;
+
+               if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id])
+                       continue;
+
+               /* Read FW info for the current Storm */
+               qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
+
+               asserts = &fw_info.fw_asserts_section;
+
+               /* Dump FW Asserts section header and params */
+               storm_letter_str[0] = s_storm_defs[storm_id].letter;
+               offset += qed_dump_section_hdr(dump_buf + offset, dump,
+                                              "fw_asserts", 2);
+               offset += qed_dump_str_param(dump_buf + offset, dump, "storm",
+                                            storm_letter_str);
+               offset += qed_dump_num_param(dump_buf + offset, dump, "size",
+                                            asserts->list_element_dword_size);
+
+               if (!dump) {
+                       offset += asserts->list_element_dword_size;
+                       continue;
+               }
+
+               /* Read and dump FW Asserts data */
+               fw_asserts_section_addr =
+                       s_storm_defs[storm_id].sem_fast_mem_addr +
+                       SEM_FAST_REG_INT_RAM +
+                       RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
+               next_list_idx_addr =
+                       fw_asserts_section_addr +
+                       DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
+               next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
+               last_list_idx = (next_list_idx > 0
+                                ? next_list_idx
+                                : asserts->list_num_elements) - 1;
+               addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
+                      asserts->list_dword_offset +
+                      last_list_idx * asserts->list_element_dword_size;
+               offset +=
+                   qed_grc_dump_addr_range(p_hwfn, p_ptt,
+                                           dump_buf + offset,
+                                           dump, addr,
+                                           asserts->list_element_dword_size);
+       }
+
+       /* Dump last section */
+       offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
+       return offset;
+}
+
+/***************************** Public Functions *******************************/
+
+enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
+{
+       /* Convert binary data to debug arrays */
+       struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
+       u8 buf_id;
+
+       for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
+               s_dbg_arrays[buf_id].ptr =
+                   (u32 *)(bin_ptr + buf_array[buf_id].offset);
+               s_dbg_arrays[buf_id].size_in_dwords =
+                   BYTES_TO_DWORDS(buf_array[buf_id].length);
+       }
+
+       return DBG_STATUS_OK;
+}
+
+/* Assign default GRC param values */
+void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
+{
+       struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+       u32 i;
+
+       for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
+               dev_data->grc.param_val[i] =
+                   s_grc_param_defs[i].default_val[dev_data->chip_id];
+}
+
+enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+                                             struct qed_ptt *p_ptt,
+                                             u32 *buf_size)
+{
+       enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+
+       *buf_size = 0;
+       if (status != DBG_STATUS_OK)
+               return status;
+       if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
+           !s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
+           !s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
+           !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
+           !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
+               return DBG_STATUS_DBG_ARRAY_NOT_SET;
+       return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size);
+}
+
+enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
+                                struct qed_ptt *p_ptt,
+                                u32 *dump_buf,
+                                u32 buf_size_in_dwords,
+                                u32 *num_dumped_dwords)
+{
+       u32 needed_buf_size_in_dwords;
+       enum dbg_status status;
+
+       status = qed_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt,
+                                              &needed_buf_size_in_dwords);
+
+       *num_dumped_dwords = 0;
+       if (status != DBG_STATUS_OK)
+               return status;
+       if (buf_size_in_dwords < needed_buf_size_in_dwords)
+               return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+       /* GRC Dump */
+       status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
+
+       /* Revert GRC params to their default */
+       qed_dbg_grc_set_params_default(p_hwfn);
+
+       return status;
+}
+
+enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+                                                  struct qed_ptt *p_ptt,
+                                                  u32 *buf_size)
+{
+       enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+       struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+       *buf_size = 0;
+       if (status != DBG_STATUS_OK)
+               return status;
+       if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
+           !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
+           !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
+           !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
+               return DBG_STATUS_DBG_ARRAY_NOT_SET;
+       if (!dev_data->idle_chk.buf_size_set) {
+               dev_data->idle_chk.buf_size = qed_idle_chk_dump(p_hwfn,
+                                                               p_ptt,
+                                                               NULL, false);
+               dev_data->idle_chk.buf_size_set = true;
+       }
+
+       *buf_size = dev_data->idle_chk.buf_size;
+       return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
+                                     struct qed_ptt *p_ptt,
+                                     u32 *dump_buf,
+                                     u32 buf_size_in_dwords,
+                                     u32 *num_dumped_dwords)
+{
+       u32 needed_buf_size_in_dwords;
+       enum dbg_status status;
+
+       status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt,
+                                                   &needed_buf_size_in_dwords);
+
+       *num_dumped_dwords = 0;
+       if (status != DBG_STATUS_OK)
+               return status;
+       if (buf_size_in_dwords < needed_buf_size_in_dwords)
+               return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+       /* Update reset state */
+       qed_update_blocks_reset_state(p_hwfn, p_ptt);
+
+       /* Idle Check Dump */
+       *num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
+
+       /* Revert GRC params to their default */
+       qed_dbg_grc_set_params_default(p_hwfn);
+
+       return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+                                                   struct qed_ptt *p_ptt,
+                                                   u32 *buf_size)
+{
+       enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+
+       *buf_size = 0;
+       if (status != DBG_STATUS_OK)
+               return status;
+       return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size);
+}
+
+enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
+                                      struct qed_ptt *p_ptt,
+                                      u32 *dump_buf,
+                                      u32 buf_size_in_dwords,
+                                      u32 *num_dumped_dwords)
+{
+       u32 needed_buf_size_in_dwords;
+       enum dbg_status status;
+
+       /* validate buffer size */
+       status =
+           qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt,
+                                               &needed_buf_size_in_dwords);
+
+       if (status != DBG_STATUS_OK &&
+           status != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
+               return status;
+
+       if (buf_size_in_dwords < needed_buf_size_in_dwords)
+               return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+       /* Update reset state */
+       qed_update_blocks_reset_state(p_hwfn, p_ptt);
+
+       /* Perform dump */
+       status = qed_mcp_trace_dump(p_hwfn,
+                                   p_ptt, dump_buf, true, num_dumped_dwords);
+
+       /* Revert GRC params to their default */
+       qed_dbg_grc_set_params_default(p_hwfn);
+
+       return status;
+}
+
+enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+                                                  struct qed_ptt *p_ptt,
+                                                  u32 *buf_size)
+{
+       enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+
+       *buf_size = 0;
+       if (status != DBG_STATUS_OK)
+               return status;
+       return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
+}
+
+enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
+                                     struct qed_ptt *p_ptt,
+                                     u32 *dump_buf,
+                                     u32 buf_size_in_dwords,
+                                     u32 *num_dumped_dwords)
+{
+       u32 needed_buf_size_in_dwords;
+       enum dbg_status status;
+
+       status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn, p_ptt,
+                                                   &needed_buf_size_in_dwords);
+
+       *num_dumped_dwords = 0;
+       if (status != DBG_STATUS_OK)
+               return status;
+       if (buf_size_in_dwords < needed_buf_size_in_dwords)
+               return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+       /* Update reset state */
+       qed_update_blocks_reset_state(p_hwfn, p_ptt);
+
+       status = qed_reg_fifo_dump(p_hwfn,
+                                  p_ptt, dump_buf, true, num_dumped_dwords);
+
+       /* Revert GRC params to their default */
+       qed_dbg_grc_set_params_default(p_hwfn);
+
+       return status;
+}
+
+enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+                                                  struct qed_ptt *p_ptt,
+                                                  u32 *buf_size)
+{
+       enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+
+       *buf_size = 0;
+       if (status != DBG_STATUS_OK)
+               return status;
+       return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
+}
+
+enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
+                                     struct qed_ptt *p_ptt,
+                                     u32 *dump_buf,
+                                     u32 buf_size_in_dwords,
+                                     u32 *num_dumped_dwords)
+{
+       u32 needed_buf_size_in_dwords;
+       enum dbg_status status;
+
+       status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn, p_ptt,
+                                                   &needed_buf_size_in_dwords);
+
+       *num_dumped_dwords = 0;
+       if (status != DBG_STATUS_OK)
+               return status;
+       if (buf_size_in_dwords < needed_buf_size_in_dwords)
+               return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+       /* Update reset state */
+       qed_update_blocks_reset_state(p_hwfn, p_ptt);
+
+       status = qed_igu_fifo_dump(p_hwfn,
+                                  p_ptt, dump_buf, true, num_dumped_dwords);
+       /* Revert GRC params to their default */
+       qed_dbg_grc_set_params_default(p_hwfn);
+
+       return status;
+}
+
+enum dbg_status
+qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+                                             struct qed_ptt *p_ptt,
+                                             u32 *buf_size)
+{
+       enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+
+       *buf_size = 0;
+       if (status != DBG_STATUS_OK)
+               return status;
+       return qed_protection_override_dump(p_hwfn,
+                                           p_ptt, NULL, false, buf_size);
+}
+
+enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
+                                                struct qed_ptt *p_ptt,
+                                                u32 *dump_buf,
+                                                u32 buf_size_in_dwords,
+                                                u32 *num_dumped_dwords)
+{
+       u32 needed_buf_size_in_dwords;
+       enum dbg_status status;
+
+       status = qed_dbg_protection_override_get_dump_buf_size(p_hwfn, p_ptt,
+                                               &needed_buf_size_in_dwords);
+
+       *num_dumped_dwords = 0;
+       if (status != DBG_STATUS_OK)
+               return status;
+       if (buf_size_in_dwords < needed_buf_size_in_dwords)
+               return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+       /* Update reset state */
+       qed_update_blocks_reset_state(p_hwfn, p_ptt);
+
+       status = qed_protection_override_dump(p_hwfn,
+                                             p_ptt,
+                                             dump_buf,
+                                             true, num_dumped_dwords);
+
+       /* Revert GRC params to their default */
+       qed_dbg_grc_set_params_default(p_hwfn);
+
+       return status;
+}
+
+enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+                                                    struct qed_ptt *p_ptt,
+                                                    u32 *buf_size)
+{
+       enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+
+       *buf_size = 0;
+       if (status != DBG_STATUS_OK)
+               return status;
+
+       /* Update reset state */
+       qed_update_blocks_reset_state(p_hwfn, p_ptt);
+       *buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false);
+       return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
+                                       struct qed_ptt *p_ptt,
+                                       u32 *dump_buf,
+                                       u32 buf_size_in_dwords,
+                                       u32 *num_dumped_dwords)
+{
+       u32 needed_buf_size_in_dwords;
+       enum dbg_status status;
+
+       status = qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn, p_ptt,
+                                               &needed_buf_size_in_dwords);
+
+       *num_dumped_dwords = 0;
+       if (status != DBG_STATUS_OK)
+               return status;
+       if (buf_size_in_dwords < needed_buf_size_in_dwords)
+               return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+       *num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
+       return DBG_STATUS_OK;
+}
+
+/******************************* Data Types **********************************/
+
+struct mcp_trace_format {
+       u32 data;
+#define MCP_TRACE_FORMAT_MODULE_MASK   0x0000ffff
+#define MCP_TRACE_FORMAT_MODULE_SHIFT  0
+#define MCP_TRACE_FORMAT_LEVEL_MASK    0x00030000
+#define MCP_TRACE_FORMAT_LEVEL_SHIFT   16
+#define MCP_TRACE_FORMAT_P1_SIZE_MASK  0x000c0000
+#define MCP_TRACE_FORMAT_P1_SIZE_SHIFT 18
+#define MCP_TRACE_FORMAT_P2_SIZE_MASK  0x00300000
+#define MCP_TRACE_FORMAT_P2_SIZE_SHIFT 20
+#define MCP_TRACE_FORMAT_P3_SIZE_MASK  0x00c00000
+#define MCP_TRACE_FORMAT_P3_SIZE_SHIFT 22
+#define MCP_TRACE_FORMAT_LEN_MASK      0xff000000
+#define MCP_TRACE_FORMAT_LEN_SHIFT     24
+       char *format_str;
+};
+
+struct mcp_trace_meta {
+       u32 modules_num;
+       char **modules;
+       u32 formats_num;
+       struct mcp_trace_format *formats;
+};
+
+/* Reg fifo element */
+struct reg_fifo_element {
+       u64 data;
+#define REG_FIFO_ELEMENT_ADDRESS_SHIFT         0
+#define REG_FIFO_ELEMENT_ADDRESS_MASK          0x7fffff
+#define REG_FIFO_ELEMENT_ACCESS_SHIFT          23
+#define REG_FIFO_ELEMENT_ACCESS_MASK           0x1
+#define REG_FIFO_ELEMENT_PF_SHIFT              24
+#define REG_FIFO_ELEMENT_PF_MASK               0xf
+#define REG_FIFO_ELEMENT_VF_SHIFT              28
+#define REG_FIFO_ELEMENT_VF_MASK               0xff
+#define REG_FIFO_ELEMENT_PORT_SHIFT            36
+#define REG_FIFO_ELEMENT_PORT_MASK             0x3
+#define REG_FIFO_ELEMENT_PRIVILEGE_SHIFT       38
+#define REG_FIFO_ELEMENT_PRIVILEGE_MASK                0x3
+#define REG_FIFO_ELEMENT_PROTECTION_SHIFT      40
+#define REG_FIFO_ELEMENT_PROTECTION_MASK       0x7
+#define REG_FIFO_ELEMENT_MASTER_SHIFT          43
+#define REG_FIFO_ELEMENT_MASTER_MASK           0xf
+#define REG_FIFO_ELEMENT_ERROR_SHIFT           47
+#define REG_FIFO_ELEMENT_ERROR_MASK            0x1f
+};
+
+/* IGU fifo element */
+struct igu_fifo_element {
+       u32 dword0;
+#define IGU_FIFO_ELEMENT_DWORD0_FID_SHIFT              0
+#define IGU_FIFO_ELEMENT_DWORD0_FID_MASK               0xff
+#define IGU_FIFO_ELEMENT_DWORD0_IS_PF_SHIFT            8
+#define IGU_FIFO_ELEMENT_DWORD0_IS_PF_MASK             0x1
+#define IGU_FIFO_ELEMENT_DWORD0_SOURCE_SHIFT           9
+#define IGU_FIFO_ELEMENT_DWORD0_SOURCE_MASK            0xf
+#define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_SHIFT         13
+#define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_MASK          0xf
+#define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_SHIFT         17
+#define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_MASK          0x7fff
+       u32 dword1;
+       u32 dword2;
+#define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_SHIFT       0
+#define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_MASK                0x1
+#define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_SHIFT         1
+#define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_MASK          0xffffffff
+       u32 reserved;
+};
+
+struct igu_fifo_wr_data {
+       u32 data;
+#define IGU_FIFO_WR_DATA_PROD_CONS_SHIFT               0
+#define IGU_FIFO_WR_DATA_PROD_CONS_MASK                        0xffffff
+#define IGU_FIFO_WR_DATA_UPDATE_FLAG_SHIFT             24
+#define IGU_FIFO_WR_DATA_UPDATE_FLAG_MASK              0x1
+#define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_SHIFT       25
+#define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_MASK                0x3
+#define IGU_FIFO_WR_DATA_SEGMENT_SHIFT                 27
+#define IGU_FIFO_WR_DATA_SEGMENT_MASK                  0x1
+#define IGU_FIFO_WR_DATA_TIMER_MASK_SHIFT              28
+#define IGU_FIFO_WR_DATA_TIMER_MASK_MASK               0x1
+#define IGU_FIFO_WR_DATA_CMD_TYPE_SHIFT                        31
+#define IGU_FIFO_WR_DATA_CMD_TYPE_MASK                 0x1
+};
+
+struct igu_fifo_cleanup_wr_data {
+       u32 data;
+#define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_SHIFT                0
+#define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_MASK         0x7ffffff
+#define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_SHIFT     27
+#define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_MASK      0x1
+#define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_SHIFT    28
+#define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_MASK     0x7
+#define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_SHIFT                31
+#define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_MASK         0x1
+};
+
+/* Protection override element */
+struct protection_override_element {
+       u64 data;
+#define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_SHIFT              0
+#define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_MASK               0x7fffff
+#define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_SHIFT          23
+#define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_MASK           0xffffff
+#define PROTECTION_OVERRIDE_ELEMENT_READ_SHIFT                 47
+#define PROTECTION_OVERRIDE_ELEMENT_READ_MASK                  0x1
+#define PROTECTION_OVERRIDE_ELEMENT_WRITE_SHIFT                        48
+#define PROTECTION_OVERRIDE_ELEMENT_WRITE_MASK                 0x1
+#define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_SHIFT      49
+#define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_MASK       0x7
+#define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_SHIFT     52
+#define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_MASK      0x7
+};
+
+enum igu_fifo_sources {
+       IGU_SRC_PXP0,
+       IGU_SRC_PXP1,
+       IGU_SRC_PXP2,
+       IGU_SRC_PXP3,
+       IGU_SRC_PXP4,
+       IGU_SRC_PXP5,
+       IGU_SRC_PXP6,
+       IGU_SRC_PXP7,
+       IGU_SRC_CAU,
+       IGU_SRC_ATTN,
+       IGU_SRC_GRC
+};
+
+enum igu_fifo_addr_types {
+       IGU_ADDR_TYPE_MSIX_MEM,
+       IGU_ADDR_TYPE_WRITE_PBA,
+       IGU_ADDR_TYPE_WRITE_INT_ACK,
+       IGU_ADDR_TYPE_WRITE_ATTN_BITS,
+       IGU_ADDR_TYPE_READ_INT,
+       IGU_ADDR_TYPE_WRITE_PROD_UPDATE,
+       IGU_ADDR_TYPE_RESERVED
+};
+
+struct igu_fifo_addr_data {
+       u16 start_addr;
+       u16 end_addr;
+       char *desc;
+       char *vf_desc;
+       enum igu_fifo_addr_types type;
+};
+
+/******************************** Constants **********************************/
+
+#define MAX_MSG_LEN                            1024
+#define MCP_TRACE_MAX_MODULE_LEN               8
+#define MCP_TRACE_FORMAT_MAX_PARAMS            3
+#define MCP_TRACE_FORMAT_PARAM_WIDTH \
+       (MCP_TRACE_FORMAT_P2_SIZE_SHIFT - MCP_TRACE_FORMAT_P1_SIZE_SHIFT)
+#define REG_FIFO_ELEMENT_ADDR_FACTOR           4
+#define REG_FIFO_ELEMENT_IS_PF_VF_VAL          127
+#define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR        4
+
+/********************************* Macros ************************************/
+
+#define BYTES_TO_DWORDS(bytes)                 ((bytes) / BYTES_IN_DWORD)
+
+/***************************** Constant Arrays *******************************/
+
+/* Status string array */
+static const char * const s_status_str[] = {
+       "Operation completed successfully",
+       "Debug application version wasn't set",
+       "Unsupported debug application version",
+       "The debug block wasn't reset since the last recording",
+       "Invalid arguments",
+       "The debug output was already set",
+       "Invalid PCI buffer size",
+       "PCI buffer allocation failed",
+       "A PCI buffer wasn't allocated",
+       "Too many inputs were enabled. Enabled less inputs, or set 'unifyInputs' to true",
+       "GRC/Timestamp input overlap in cycle dword 0",
+       "Cannot record Storm data since the entire recording cycle is used by HW",
+       "The Storm was already enabled",
+       "The specified Storm wasn't enabled",
+       "The block was already enabled",
+       "The specified block wasn't enabled",
+       "No input was enabled for recording",
+       "Filters and triggers are not allowed when recording in 64b units",
+       "The filter was already enabled",
+       "The trigger was already enabled",
+       "The trigger wasn't enabled",
+       "A constraint can be added only after a filter was enabled or a trigger state was added",
+       "Cannot add more than 3 trigger states",
+       "Cannot add more than 4 constraints per filter or trigger state",
+       "The recording wasn't started",
+       "A trigger was configured, but it didn't trigger",
+       "No data was recorded",
+       "Dump buffer is too small",
+       "Dumped data is not aligned to chunks",
+       "Unknown chip",
+       "Failed allocating virtual memory",
+       "The input block is in reset",
+       "Invalid MCP trace signature found in NVRAM",
+       "Invalid bundle ID found in NVRAM",
+       "Failed getting NVRAM image",
+       "NVRAM image is not dword-aligned",
+       "Failed reading from NVRAM",
+       "Idle check parsing failed",
+       "MCP Trace data is corrupt",
+       "Dump doesn't contain meta data - it must be provided in an image file",
+       "Failed to halt MCP",
+       "Failed to resume MCP after halt",
+       "DMAE transaction failed",
+       "Failed to empty SEMI sync FIFO",
+       "IGU FIFO data is corrupt",
+       "MCP failed to mask parities",
+       "FW Asserts parsing failed",
+       "GRC FIFO data is corrupt",
+       "Protection Override data is corrupt",
+       "Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)",
+       "When a block is filtered, no other blocks can be recorded unless inputs are unified (due to a HW bug)"
+};
+
+/* Idle check severity names array */
+static const char * const s_idle_chk_severity_str[] = {
+       "Error",
+       "Error if no traffic",
+       "Warning"
+};
+
+/* MCP Trace level names array */
+static const char * const s_mcp_trace_level_str[] = {
+       "ERROR",
+       "TRACE",
+       "DEBUG"
+};
+
+/* Parsing strings */
+static const char * const s_access_strs[] = {
+       "read",
+       "write"
+};
+
+static const char * const s_privilege_strs[] = {
+       "VF",
+       "PDA",
+       "HV",
+       "UA"
+};
+
+static const char * const s_protection_strs[] = {
+       "(default)",
+       "(default)",
+       "(default)",
+       "(default)",
+       "override VF",
+       "override PDA",
+       "override HV",
+       "override UA"
+};
+
+static const char * const s_master_strs[] = {
+       "???",
+       "pxp",
+       "mcp",
+       "msdm",
+       "psdm",
+       "ysdm",
+       "usdm",
+       "tsdm",
+       "xsdm",
+       "dbu",
+       "dmae",
+       "???",
+       "???",
+       "???",
+       "???",
+       "???"
+};
+
+static const char * const s_reg_fifo_error_strs[] = {
+       "grc timeout",
+       "address doesn't belong to any block",
+       "reserved address in block or write to read-only address",
+       "privilege/protection mismatch",
+       "path isolation error"
+};
+
+static const char * const s_igu_fifo_source_strs[] = {
+       "TSTORM",
+       "MSTORM",
+       "USTORM",
+       "XSTORM",
+       "YSTORM",
+       "PSTORM",
+       "PCIE",
+       "NIG_QM_PBF",
+       "CAU",
+       "ATTN",
+       "GRC",
+};
+
+static const char * const s_igu_fifo_error_strs[] = {
+       "no error",
+       "length error",
+       "function disabled",
+       "VF sent command to attnetion address",
+       "host sent prod update command",
+       "read of during interrupt register while in MIMD mode",
+       "access to PXP BAR reserved address",
+       "producer update command to attention index",
+       "unknown error",
+       "SB index not valid",
+       "SB relative index and FID not found",
+       "FID not match",
+       "command with error flag asserted (PCI error or CAU discard)",
+       "VF sent cleanup and RF cleanup is disabled",
+       "cleanup command on type bigger than 4"
+};
+
+/* IGU FIFO address data */
+static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
+       {0x0, 0x101, "MSI-X Memory", NULL, IGU_ADDR_TYPE_MSIX_MEM},
+       {0x102, 0x1ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED},
+       {0x200, 0x200, "Write PBA[0:63]", NULL, IGU_ADDR_TYPE_WRITE_PBA},
+       {0x201, 0x201, "Write PBA[64:127]", "reserved",
+        IGU_ADDR_TYPE_WRITE_PBA},
+       {0x202, 0x202, "Write PBA[128]", "reserved", IGU_ADDR_TYPE_WRITE_PBA},
+       {0x203, 0x3ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED},
+       {0x400, 0x5ef, "Write interrupt acknowledgment", NULL,
+        IGU_ADDR_TYPE_WRITE_INT_ACK},
+       {0x5f0, 0x5f0, "Attention bits update", NULL,
+        IGU_ADDR_TYPE_WRITE_ATTN_BITS},
+       {0x5f1, 0x5f1, "Attention bits set", NULL,
+        IGU_ADDR_TYPE_WRITE_ATTN_BITS},
+       {0x5f2, 0x5f2, "Attention bits clear", NULL,
+        IGU_ADDR_TYPE_WRITE_ATTN_BITS},
+       {0x5f3, 0x5f3, "Read interrupt 0:63 with mask", NULL,
+        IGU_ADDR_TYPE_READ_INT},
+       {0x5f4, 0x5f4, "Read interrupt 0:31 with mask", NULL,
+        IGU_ADDR_TYPE_READ_INT},
+       {0x5f5, 0x5f5, "Read interrupt 32:63 with mask", NULL,
+        IGU_ADDR_TYPE_READ_INT},
+       {0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL,
+        IGU_ADDR_TYPE_READ_INT},
+       {0x5f7, 0x5ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED},
+       {0x600, 0x7ff, "Producer update", NULL, IGU_ADDR_TYPE_WRITE_PROD_UPDATE}
+};
+
+/******************************** Variables **********************************/
+
+/* MCP Trace meta data - used in case the dump doesn't contain the meta data
+ * (e.g. due to no NVRAM access).
+ */
+static struct dbg_array s_mcp_trace_meta = { NULL, 0 };
+
+/* Temporary buffer, used for print size calculations */
+static char s_temp_buf[MAX_MSG_LEN];
+
+/***************************** Public Functions *******************************/
+
+enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
+{
+       /* Convert binary data to debug arrays */
+       struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
+       u8 buf_id;
+
+       for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
+               s_dbg_arrays[buf_id].ptr =
+                   (u32 *)(bin_ptr + buf_array[buf_id].offset);
+               s_dbg_arrays[buf_id].size_in_dwords =
+                   BYTES_TO_DWORDS(buf_array[buf_id].length);
+       }
+
+       return DBG_STATUS_OK;
+}
+
+static u32 qed_cyclic_add(u32 a, u32 b, u32 size)
+{
+       return (a + b) % size;
+}
+
+static u32 qed_cyclic_sub(u32 a, u32 b, u32 size)
+{
+       return (size + a - b) % size;
+}
+
+/* Reads the specified number of bytes from the specified cyclic buffer (up to 4
+ * bytes) and returns them as a dword value. the specified buffer offset is
+ * updated.
+ */
+static u32 qed_read_from_cyclic_buf(void *buf,
+                                   u32 *offset,
+                                   u32 buf_size, u8 num_bytes_to_read)
+{
+       u8 *bytes_buf = (u8 *)buf;
+       u8 *val_ptr;
+       u32 val = 0;
+       u8 i;
+
+       val_ptr = (u8 *)&val;
+
+       for (i = 0; i < num_bytes_to_read; i++) {
+               val_ptr[i] = bytes_buf[*offset];
+               *offset = qed_cyclic_add(*offset, 1, buf_size);
+       }
+
+       return val;
+}
+
+/* Reads and returns the next byte from the specified buffer.
+ * The specified buffer offset is updated.
+ */
+static u8 qed_read_byte_from_buf(void *buf, u32 *offset)
+{
+       return ((u8 *)buf)[(*offset)++];
+}
+
+/* Reads and returns the next dword from the specified buffer.
+ * The specified buffer offset is updated.
+ */
+static u32 qed_read_dword_from_buf(void *buf, u32 *offset)
+{
+       u32 dword_val = *(u32 *)&((u8 *)buf)[*offset];
+
+       *offset += 4;
+       return dword_val;
+}
+
+/* Reads the next string from the specified buffer, and copies it to the
+ * specified pointer. The specified buffer offset is updated.
+ */
+static void qed_read_str_from_buf(void *buf, u32 *offset, u32 size, char *dest)
+{
+       const char *source_str = &((const char *)buf)[*offset];
+
+       strncpy(dest, source_str, size);
+       dest[size - 1] = '\0';
+       *offset += size;
+}
+
+/* Returns a pointer to the specified offset (in bytes) of the specified buffer.
+ * If the specified buffer in NULL, a temporary buffer pointer is returned.
+ */
+static char *qed_get_buf_ptr(void *buf, u32 offset)
+{
+       return buf ? (char *)buf + offset : s_temp_buf;
+}
+
+/* Reads a param from the specified buffer. Returns the number of dwords read.
+ * If the returned str_param is NULL, the param is numeric and its value is
+ * returned in num_param.
+ * Otheriwise, the param is a string and its pointer is returned in str_param.
+ */
+static u32 qed_read_param(u32 *dump_buf,
+                         const char **param_name,
+                         const char **param_str_val, u32 *param_num_val)
+{
+       char *char_buf = (char *)dump_buf;
+       u32 offset = 0; /* In bytes */
+
+       /* Extract param name */
+       *param_name = char_buf;
+       offset += strlen(*param_name) + 1;
+
+       /* Check param type */
+       if (*(char_buf + offset++)) {
+               /* String param */
+               *param_str_val = char_buf + offset;
+               offset += strlen(*param_str_val) + 1;
+               if (offset & 0x3)
+                       offset += (4 - (offset & 0x3));
+       } else {
+               /* Numeric param */
+               *param_str_val = NULL;
+               if (offset & 0x3)
+                       offset += (4 - (offset & 0x3));
+               *param_num_val = *(u32 *)(char_buf + offset);
+               offset += 4;
+       }
+
+       return offset / 4;
+}
+
+/* Reads a section header from the specified buffer.
+ * Returns the number of dwords read.
+ */
+static u32 qed_read_section_hdr(u32 *dump_buf,
+                               const char **section_name,
+                               u32 *num_section_params)
+{
+       const char *param_str_val;
+
+       return qed_read_param(dump_buf,
+                             section_name, &param_str_val, num_section_params);
+}
+
+/* Reads section params from the specified buffer and prints them to the results
+ * buffer. Returns the number of dwords read.
+ */
+static u32 qed_print_section_params(u32 *dump_buf,
+                                   u32 num_section_params,
+                                   char *results_buf, u32 *num_chars_printed)
+{
+       u32 i, dump_offset = 0, results_offset = 0;
+
+       for (i = 0; i < num_section_params; i++) {
+               const char *param_name;
+               const char *param_str_val;
+               u32 param_num_val = 0;
+
+               dump_offset += qed_read_param(dump_buf + dump_offset,
+                                             &param_name,
+                                             &param_str_val, &param_num_val);
+               if (param_str_val)
+                       /* String param */
+                       results_offset +=
+                               sprintf(qed_get_buf_ptr(results_buf,
+                                                       results_offset),
+                                       "%s: %s\n", param_name, param_str_val);
+               else if (strcmp(param_name, "fw-timestamp"))
+                       /* Numeric param */
+                       results_offset +=
+                               sprintf(qed_get_buf_ptr(results_buf,
+                                                       results_offset),
+                                       "%s: %d\n", param_name, param_num_val);
+       }
+
+       results_offset +=
+           sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
+       *num_chars_printed = results_offset;
+       return dump_offset;
+}
+
+const char *qed_dbg_get_status_str(enum dbg_status status)
+{
+       return (status <
+               MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status";
+}
+
+/* Parses the idle check rules and returns the number of characters printed.
+ * In case of parsing error, returns 0.
+ */
+static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
+                                        u32 *dump_buf,
+                                        u32 *dump_buf_end,
+                                        u32 num_rules,
+                                        bool print_fw_idle_chk,
+                                        char *results_buf,
+                                        u32 *num_errors, u32 *num_warnings)
+{
+       u32 rule_idx, results_offset = 0; /* Offset in results_buf in bytes */
+       u16 i, j;
+
+       *num_errors = 0;
+       *num_warnings = 0;
+
+       /* Go over dumped results */
+       for (rule_idx = 0; rule_idx < num_rules && dump_buf < dump_buf_end;
+            rule_idx++) {
+               const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data;
+               struct dbg_idle_chk_result_hdr *hdr;
+               const char *parsing_str;
+               u32 parsing_str_offset;
+               const char *lsi_msg;
+               u8 curr_reg_id = 0;
+               bool has_fw_msg;
+
+               hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
+               rule_parsing_data =
+                       (const struct dbg_idle_chk_rule_parsing_data *)
+                       &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].
+                       ptr[hdr->rule_id];
+               parsing_str_offset =
+                       GET_FIELD(rule_parsing_data->data,
+                                 DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET);
+               has_fw_msg =
+                       GET_FIELD(rule_parsing_data->data,
+                               DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
+               parsing_str = &((const char *)
+                               s_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
+                               [parsing_str_offset];
+               lsi_msg = parsing_str;
+
+               if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES)
+                       return 0;
+
+               /* Skip rule header */
+               dump_buf += (sizeof(struct dbg_idle_chk_result_hdr) / 4);
+
+               /* Update errors/warnings count */
+               if (hdr->severity == IDLE_CHK_SEVERITY_ERROR ||
+                   hdr->severity == IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC)
+                       (*num_errors)++;
+               else
+                       (*num_warnings)++;
+
+               /* Print rule severity */
+               results_offset +=
+                   sprintf(qed_get_buf_ptr(results_buf,
+                                           results_offset), "%s: ",
+                           s_idle_chk_severity_str[hdr->severity]);
+
+               /* Print rule message */
+               if (has_fw_msg)
+                       parsing_str += strlen(parsing_str) + 1;
+               results_offset +=
+                   sprintf(qed_get_buf_ptr(results_buf,
+                                           results_offset), "%s.",
+                           has_fw_msg &&
+                           print_fw_idle_chk ? parsing_str : lsi_msg);
+               parsing_str += strlen(parsing_str) + 1;
+
+               /* Print register values */
+               results_offset +=
+                   sprintf(qed_get_buf_ptr(results_buf,
+                                           results_offset), " Registers:");
+               for (i = 0;
+                    i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs;
+                    i++) {
+                       struct dbg_idle_chk_result_reg_hdr *reg_hdr
+                           = (struct dbg_idle_chk_result_reg_hdr *)
+                           dump_buf;
+                       bool is_mem =
+                               GET_FIELD(reg_hdr->data,
+                                         DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM);
+                       u8 reg_id =
+                               GET_FIELD(reg_hdr->data,
+                                         DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID);
+
+                       /* Skip reg header */
+                       dump_buf +=
+                           (sizeof(struct dbg_idle_chk_result_reg_hdr) / 4);
+
+                       /* Skip register names until the required reg_id is
+                        * reached.
+                        */
+                       for (; reg_id > curr_reg_id;
+                            curr_reg_id++,
+                            parsing_str += strlen(parsing_str) + 1);
+
+                       results_offset +=
+                           sprintf(qed_get_buf_ptr(results_buf,
+                                                   results_offset), " %s",
+                                   parsing_str);
+                       if (i < hdr->num_dumped_cond_regs && is_mem)
+                               results_offset +=
+                                   sprintf(qed_get_buf_ptr(results_buf,
+                                                           results_offset),
+                                           "[%d]", hdr->mem_entry_id +
+                                           reg_hdr->start_entry);
+                       results_offset +=
+                           sprintf(qed_get_buf_ptr(results_buf,
+                                                   results_offset), "=");
+                       for (j = 0; j < reg_hdr->size; j++, dump_buf++) {
+                               results_offset +=
+                                   sprintf(qed_get_buf_ptr(results_buf,
+                                                           results_offset),
+                                           "0x%x", *dump_buf);
+                               if (j < reg_hdr->size - 1)
+                                       results_offset +=
+                                           sprintf(qed_get_buf_ptr
+                                                   (results_buf,
+                                                    results_offset), ",");
+                       }
+               }
+
+               results_offset +=
+                   sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
+       }
+
+       /* Check if end of dump buffer was exceeded */
+       if (dump_buf > dump_buf_end)
+               return 0;
+       return results_offset;
+}
+
+/* Parses an idle check dump buffer.
+ * If result_buf is not NULL, the idle check results are printed to it.
+ * In any case, the required results buffer size is assigned to
+ * parsed_results_bytes.
+ * The parsing status is returned.
+ */
+static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
+                                              u32 *dump_buf,
+                                              u32 num_dumped_dwords,
+                                              char *results_buf,
+                                              u32 *parsed_results_bytes,
+                                              u32 *num_errors,
+                                              u32 *num_warnings)
+{
+       const char *section_name, *param_name, *param_str_val;
+       u32 *dump_buf_end = dump_buf + num_dumped_dwords;
+       u32 num_section_params = 0, num_rules;
+       u32 results_offset = 0; /* Offset in results_buf in bytes */
+
+       *parsed_results_bytes = 0;
+       *num_errors = 0;
+       *num_warnings = 0;
+       if (!s_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
+           !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
+               return DBG_STATUS_DBG_ARRAY_NOT_SET;
+
+       /* Read global_params section */
+       dump_buf += qed_read_section_hdr(dump_buf,
+                                        &section_name, &num_section_params);
+       if (strcmp(section_name, "global_params"))
+               return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
+
+       /* Print global params */
+       dump_buf += qed_print_section_params(dump_buf,
+                                            num_section_params,
+                                            results_buf, &results_offset);
+
+       /* Read idle_chk section */
+       dump_buf += qed_read_section_hdr(dump_buf,
+                                        &section_name, &num_section_params);
+       if (strcmp(section_name, "idle_chk") || num_section_params != 1)
+               return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
+
+       dump_buf += qed_read_param(dump_buf,
+                                  &param_name, &param_str_val, &num_rules);
+       if (strcmp(param_name, "num_rules") != 0)
+               return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
+
+       if (num_rules) {
+               u32 rules_print_size;
+
+               /* Print FW output */
+               results_offset +=
+                   sprintf(qed_get_buf_ptr(results_buf,
+                                           results_offset),
+                           "FW_IDLE_CHECK:\n");
+               rules_print_size =
+                       qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf,
+                                                     dump_buf_end, num_rules,
+                                                     true,
+                                                     results_buf ?
+                                                     results_buf +
+                                                     results_offset : NULL,
+                                                     num_errors, num_warnings);
+               results_offset += rules_print_size;
+               if (rules_print_size == 0)
+                       return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
+
+               /* Print LSI output */
+               results_offset +=
+                   sprintf(qed_get_buf_ptr(results_buf,
+                                           results_offset),
+                           "\nLSI_IDLE_CHECK:\n");
+               rules_print_size =
+                       qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf,
+                                                     dump_buf_end, num_rules,
+                                                     false,
+                                                     results_buf ?
+                                                     results_buf +
+                                                     results_offset : NULL,
+                                                     num_errors, num_warnings);
+               results_offset += rules_print_size;
+               if (rules_print_size == 0)
+                       return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
+       }
+
+       /* Print errors/warnings count */
+       if (*num_errors) {
+               results_offset +=
+                   sprintf(qed_get_buf_ptr(results_buf,
+                                           results_offset),
+                           "\nIdle Check failed!!! (with %d errors and %d warnings)\n",
+                           *num_errors, *num_warnings);
+       } else if (*num_warnings) {
+               results_offset +=
+                   sprintf(qed_get_buf_ptr(results_buf,
+                                           results_offset),
+                           "\nIdle Check completed successfuly (with %d warnings)\n",
+                           *num_warnings);
+       } else {
+               results_offset +=
+                   sprintf(qed_get_buf_ptr(results_buf,
+                                           results_offset),
+                           "\nIdle Check completed successfuly\n");
+       }
+
+       /* Add 1 for string NULL termination */
+       *parsed_results_bytes = results_offset + 1;
+       return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
+                                                 u32 *dump_buf,
+                                                 u32 num_dumped_dwords,
+                                                 u32 *results_buf_size)
+{
+       u32 num_errors, num_warnings;
+
+       return qed_parse_idle_chk_dump(p_hwfn,
+                                      dump_buf,
+                                      num_dumped_dwords,
+                                      NULL,
+                                      results_buf_size,
+                                      &num_errors, &num_warnings);
+}
+
+enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
+                                          u32 *dump_buf,
+                                          u32 num_dumped_dwords,
+                                          char *results_buf,
+                                          u32 *num_errors, u32 *num_warnings)
+{
+       u32 parsed_buf_size;
+
+       return qed_parse_idle_chk_dump(p_hwfn,
+                                      dump_buf,
+                                      num_dumped_dwords,
+                                      results_buf,
+                                      &parsed_buf_size,
+                                      num_errors, num_warnings);
+}
+
+/* Frees the specified MCP Trace meta data */
+static void qed_mcp_trace_free_meta(struct qed_hwfn *p_hwfn,
+                                   struct mcp_trace_meta *meta)
+{
+       u32 i;
+
+       /* Release modules */
+       if (meta->modules) {
+               for (i = 0; i < meta->modules_num; i++)
+                       kfree(meta->modules[i]);
+               kfree(meta->modules);
+       }
+
+       /* Release formats */
+       if (meta->formats) {
+               for (i = 0; i < meta->formats_num; i++)
+                       kfree(meta->formats[i].format_str);
+               kfree(meta->formats);
+       }
+}
+
+/* Allocates and fills MCP Trace meta data based on the specified meta data
+ * dump buffer.
+ * Returns debug status code.
+ */
+static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn,
+                                               const u32 *meta_buf,
+                                               struct mcp_trace_meta *meta)
+{
+       u8 *meta_buf_bytes = (u8 *)meta_buf;
+       u32 offset = 0, signature, i;
+
+       memset(meta, 0, sizeof(*meta));
+
+       /* Read first signature */
+       signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
+       if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
+               return DBG_STATUS_INVALID_TRACE_SIGNATURE;
+
+       /* Read number of modules and allocate memory for all the modules
+        * pointers.
+        */
+       meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset);
+       meta->modules = kzalloc(meta->modules_num * sizeof(char *), GFP_KERNEL);
+       if (!meta->modules)
+               return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+
+       /* Allocate and read all module strings */
+       for (i = 0; i < meta->modules_num; i++) {
+               u8 module_len = qed_read_byte_from_buf(meta_buf_bytes, &offset);
+
+               *(meta->modules + i) = kzalloc(module_len, GFP_KERNEL);
+               if (!(*(meta->modules + i))) {
+                       /* Update number of modules to be released */
+                       meta->modules_num = i ? i - 1 : 0;
+                       return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+               }
+
+               qed_read_str_from_buf(meta_buf_bytes, &offset, module_len,
+                                     *(meta->modules + i));
+               if (module_len > MCP_TRACE_MAX_MODULE_LEN)
+                       (*(meta->modules + i))[MCP_TRACE_MAX_MODULE_LEN] = '\0';
+       }
+
+       /* Read second signature */
+       signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
+       if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
+               return DBG_STATUS_INVALID_TRACE_SIGNATURE;
+
+       /* Read number of formats and allocate memory for all formats */
+       meta->formats_num = qed_read_dword_from_buf(meta_buf_bytes, &offset);
+       meta->formats = kzalloc(meta->formats_num *
+                               sizeof(struct mcp_trace_format),
+                               GFP_KERNEL);
+       if (!meta->formats)
+               return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+
+       /* Allocate and read all strings */
+       for (i = 0; i < meta->formats_num; i++) {
+               struct mcp_trace_format *format_ptr = &meta->formats[i];
+               u8 format_len;
+
+               format_ptr->data = qed_read_dword_from_buf(meta_buf_bytes,
+                                                          &offset);
+               format_len =
+                   (format_ptr->data &
+                    MCP_TRACE_FORMAT_LEN_MASK) >> MCP_TRACE_FORMAT_LEN_SHIFT;
+               format_ptr->format_str = kzalloc(format_len, GFP_KERNEL);
+               if (!format_ptr->format_str) {
+                       /* Update number of modules to be released */
+                       meta->formats_num = i ? i - 1 : 0;
+                       return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+               }
+
+               qed_read_str_from_buf(meta_buf_bytes,
+                                     &offset,
+                                     format_len, format_ptr->format_str);
+       }
+
+       return DBG_STATUS_OK;
+}
+
+/* Parses an MCP Trace dump buffer.
+ * If result_buf is not NULL, the MCP Trace results are printed to it.
+ * In any case, the required results buffer size is assigned to
+ * parsed_results_bytes.
+ * The parsing status is returned.
+ */
+static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
+                                               u32 *dump_buf,
+                                               u32 num_dumped_dwords,
+                                               char *results_buf,
+                                               u32 *parsed_results_bytes)
+{
+       u32 results_offset = 0, param_mask, param_shift, param_num_val;
+       u32 num_section_params, offset, end_offset, bytes_left;
+       const char *section_name, *param_name, *param_str_val;
+       u32 trace_data_dwords, trace_meta_dwords;
+       struct mcp_trace_meta meta;
+       struct mcp_trace *trace;
+       enum dbg_status status;
+       const u32 *meta_buf;
+       u8 *trace_buf;
+
+       *parsed_results_bytes = 0;
+
+       /* Read global_params section */
+       dump_buf += qed_read_section_hdr(dump_buf,
+                                        &section_name, &num_section_params);
+       if (strcmp(section_name, "global_params"))
+               return DBG_STATUS_MCP_TRACE_BAD_DATA;
+
+       /* Print global params */
+       dump_buf += qed_print_section_params(dump_buf,
+                                            num_section_params,
+                                            results_buf, &results_offset);
+
+       /* Read trace_data section */
+       dump_buf += qed_read_section_hdr(dump_buf,
+                                        &section_name, &num_section_params);
+       if (strcmp(section_name, "mcp_trace_data") || num_section_params != 1)
+               return DBG_STATUS_MCP_TRACE_BAD_DATA;
+       dump_buf += qed_read_param(dump_buf,
+                                  &param_name, &param_str_val, &param_num_val);
+       if (strcmp(param_name, "size"))
+               return DBG_STATUS_MCP_TRACE_BAD_DATA;
+       trace_data_dwords = param_num_val;
+
+       /* Prepare trace info */
+       trace = (struct mcp_trace *)dump_buf;
+       trace_buf = (u8 *)dump_buf + sizeof(struct mcp_trace);
+       offset = trace->trace_oldest;
+       end_offset = trace->trace_prod;
+       bytes_left = qed_cyclic_sub(end_offset, offset, trace->size);
+       dump_buf += trace_data_dwords;
+
+       /* Read meta_data section */
+       dump_buf += qed_read_section_hdr(dump_buf,
+                                        &section_name, &num_section_params);
+       if (strcmp(section_name, "mcp_trace_meta"))
+               return DBG_STATUS_MCP_TRACE_BAD_DATA;
+       dump_buf += qed_read_param(dump_buf,
+                                  &param_name, &param_str_val, &param_num_val);
+       if (strcmp(param_name, "size") != 0)
+               return DBG_STATUS_MCP_TRACE_BAD_DATA;
+       trace_meta_dwords = param_num_val;
+
+       /* Choose meta data buffer */
+       if (!trace_meta_dwords) {
+               /* Dump doesn't include meta data */
+               if (!s_mcp_trace_meta.ptr)
+                       return DBG_STATUS_MCP_TRACE_NO_META;
+               meta_buf = s_mcp_trace_meta.ptr;
+       } else {
+               /* Dump includes meta data */
+               meta_buf = dump_buf;
+       }
+
+       /* Allocate meta data memory */
+       status = qed_mcp_trace_alloc_meta(p_hwfn, meta_buf, &meta);
+       if (status != DBG_STATUS_OK)
+               goto free_mem;
+
+       /* Ignore the level and modules masks - just print everything that is
+        * already in the buffer.
+        */
+       while (bytes_left) {
+               struct mcp_trace_format *format_ptr;
+               u8 format_level, format_module;
+               u32 params[3] = { 0, 0, 0 };
+               u32 header, format_idx, i;
+
+               if (bytes_left < MFW_TRACE_ENTRY_SIZE) {
+                       status = DBG_STATUS_MCP_TRACE_BAD_DATA;
+                       goto free_mem;
+               }
+
+               header = qed_read_from_cyclic_buf(trace_buf,
+                                                 &offset,
+                                                 trace->size,
+                                                 MFW_TRACE_ENTRY_SIZE);
+               bytes_left -= MFW_TRACE_ENTRY_SIZE;
+               format_idx = header & MFW_TRACE_EVENTID_MASK;
+
+               /* Skip message if its  index doesn't exist in the meta data */
+               if (format_idx > meta.formats_num) {
+                       u8 format_size =
+                           (u8)((header &
+                                 MFW_TRACE_PRM_SIZE_MASK) >>
+                                MFW_TRACE_PRM_SIZE_SHIFT);
+
+                       if (bytes_left < format_size) {
+                               status = DBG_STATUS_MCP_TRACE_BAD_DATA;
+                               goto free_mem;
+                       }
+
+                       offset = qed_cyclic_add(offset,
+                                               format_size, trace->size);
+                       bytes_left -= format_size;
+                       continue;
+               }
+
+               format_ptr = &meta.formats[format_idx];
+               for (i = 0,
+                    param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, param_shift =
+                    MCP_TRACE_FORMAT_P1_SIZE_SHIFT;
+                    i < MCP_TRACE_FORMAT_MAX_PARAMS;
+                    i++, param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
+                    param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) {
+                       /* Extract param size (0..3) */
+                       u8 param_size =
+                           (u8)((format_ptr->data &
+                                 param_mask) >> param_shift);
+
+                       /* If the param size is zero, there are no other
+                        * parameters.
+                        */
+                       if (!param_size)
+                               break;
+
+                       /* Size is encoded using 2 bits, where 3 is used to
+                        * encode 4.
+                        */
+                       if (param_size == 3)
+                               param_size = 4;
+                       if (bytes_left < param_size) {
+                               status = DBG_STATUS_MCP_TRACE_BAD_DATA;
+                               goto free_mem;
+                       }
+
+                       params[i] = qed_read_from_cyclic_buf(trace_buf,
+                                                            &offset,
+                                                            trace->size,
+                                                            param_size);
+                       bytes_left -= param_size;
+               }
+
+               format_level =
+                   (u8)((format_ptr->data &
+                         MCP_TRACE_FORMAT_LEVEL_MASK) >>
+                         MCP_TRACE_FORMAT_LEVEL_SHIFT);
+               format_module =
+                   (u8)((format_ptr->data &
+                         MCP_TRACE_FORMAT_MODULE_MASK) >>
+                        MCP_TRACE_FORMAT_MODULE_SHIFT);
+               if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str)) {
+                       status = DBG_STATUS_MCP_TRACE_BAD_DATA;
+                       goto free_mem;
+               }
+
+               /* Print current message to results buffer */
+               results_offset +=
+                   sprintf(qed_get_buf_ptr(results_buf,
+                                           results_offset), "%s %-8s: ",
+                           s_mcp_trace_level_str[format_level],
+                           meta.modules[format_module]);
+               results_offset +=
+                   sprintf(qed_get_buf_ptr(results_buf,
+                                           results_offset),
+                           format_ptr->format_str, params[0], params[1],
+                           params[2]);
+       }
+
+free_mem:
+       *parsed_results_bytes = results_offset + 1;
+       qed_mcp_trace_free_meta(p_hwfn, &meta);
+       return status;
+}
+
+enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
+                                                  u32 *dump_buf,
+                                                  u32 num_dumped_dwords,
+                                                  u32 *results_buf_size)
+{
+       return qed_parse_mcp_trace_dump(p_hwfn,
+                                       dump_buf,
+                                       num_dumped_dwords,
+                                       NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
+                                           u32 *dump_buf,
+                                           u32 num_dumped_dwords,
+                                           char *results_buf)
+{
+       u32 parsed_buf_size;
+
+       return qed_parse_mcp_trace_dump(p_hwfn,
+                                       dump_buf,
+                                       num_dumped_dwords,
+                                       results_buf, &parsed_buf_size);
+}
+
+/* Parses a Reg FIFO dump buffer.
+ * If result_buf is not NULL, the Reg FIFO results are printed to it.
+ * In any case, the required results buffer size is assigned to
+ * parsed_results_bytes.
+ * The parsing status is returned.
+ */
+static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn,
+                                              u32 *dump_buf,
+                                              u32 num_dumped_dwords,
+                                              char *results_buf,
+                                              u32 *parsed_results_bytes)
+{
+       u32 results_offset = 0, param_num_val, num_section_params, num_elements;
+       const char *section_name, *param_name, *param_str_val;
+       struct reg_fifo_element *elements;
+       u8 i, j, err_val, vf_val;
+       char vf_str[4];
+
+       /* Read global_params section */
+       dump_buf += qed_read_section_hdr(dump_buf,
+                                        &section_name, &num_section_params);
+       if (strcmp(section_name, "global_params"))
+               return DBG_STATUS_REG_FIFO_BAD_DATA;
+
+       /* Print global params */
+       dump_buf += qed_print_section_params(dump_buf,
+                                            num_section_params,
+                                            results_buf, &results_offset);
+
+       /* Read reg_fifo_data section */
+       dump_buf += qed_read_section_hdr(dump_buf,
+                                        &section_name, &num_section_params);
+       if (strcmp(section_name, "reg_fifo_data"))
+               return DBG_STATUS_REG_FIFO_BAD_DATA;
+       dump_buf += qed_read_param(dump_buf,
+                                  &param_name, &param_str_val, &param_num_val);
+       if (strcmp(param_name, "size"))
+               return DBG_STATUS_REG_FIFO_BAD_DATA;
+       if (param_num_val % REG_FIFO_ELEMENT_DWORDS)
+               return DBG_STATUS_REG_FIFO_BAD_DATA;
+       num_elements = param_num_val / REG_FIFO_ELEMENT_DWORDS;
+       elements = (struct reg_fifo_element *)dump_buf;
+
+       /* Decode elements */
+       for (i = 0; i < num_elements; i++) {
+               bool err_printed = false;
+
+               /* Discover if element belongs to a VF or a PF */
+               vf_val = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_VF);
+               if (vf_val == REG_FIFO_ELEMENT_IS_PF_VF_VAL)
+                       sprintf(vf_str, "%s", "N/A");
+               else
+                       sprintf(vf_str, "%d", vf_val);
+
+               /* Add parsed element to parsed buffer */
+               results_offset +=
+                   sprintf(qed_get_buf_ptr(results_buf,
+                                           results_offset),
+                           "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
+                           elements[i].data,
+                           (u32)GET_FIELD(elements[i].data,
+                                     REG_FIFO_ELEMENT_ADDRESS) *
+                                     REG_FIFO_ELEMENT_ADDR_FACTOR,
+                                     s_access_strs[GET_FIELD(elements[i].data,
+                                                   REG_FIFO_ELEMENT_ACCESS)],
+                           (u32)GET_FIELD(elements[i].data,
+                                          REG_FIFO_ELEMENT_PF), vf_str,
+                           (u32)GET_FIELD(elements[i].data,
+                                     REG_FIFO_ELEMENT_PORT),
+                                     s_privilege_strs[GET_FIELD(elements[i].
+                                     data,
+                                     REG_FIFO_ELEMENT_PRIVILEGE)],
+                           s_protection_strs[GET_FIELD(elements[i].data,
+                                               REG_FIFO_ELEMENT_PROTECTION)],
+                           s_master_strs[GET_FIELD(elements[i].data,
+                                               REG_FIFO_ELEMENT_MASTER)]);
+
+               /* Print errors */
+               for (j = 0,
+                    err_val = GET_FIELD(elements[i].data,
+                                        REG_FIFO_ELEMENT_ERROR);
+                    j < ARRAY_SIZE(s_reg_fifo_error_strs);
+                    j++, err_val >>= 1) {
+                       if (!(err_val & 0x1))
+                               continue;
+                       if (err_printed)
+                               results_offset +=
+                                       sprintf(qed_get_buf_ptr(results_buf,
+                                                               results_offset),
+                                               ", ");
+                       results_offset +=
+                               sprintf(qed_get_buf_ptr(results_buf,
+                                                       results_offset), "%s",
+                                       s_reg_fifo_error_strs[j]);
+                       err_printed = true;
+               }
+
+               results_offset +=
+                   sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
+       }
+
+       results_offset += sprintf(qed_get_buf_ptr(results_buf,
+                                                 results_offset),
+                                 "fifo contained %d elements", num_elements);
+
+       /* Add 1 for string NULL termination */
+       *parsed_results_bytes = results_offset + 1;
+       return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+                                                 u32 *dump_buf,
+                                                 u32 num_dumped_dwords,
+                                                 u32 *results_buf_size)
+{
+       return qed_parse_reg_fifo_dump(p_hwfn,
+                                      dump_buf,
+                                      num_dumped_dwords,
+                                      NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
+                                          u32 *dump_buf,
+                                          u32 num_dumped_dwords,
+                                          char *results_buf)
+{
+       u32 parsed_buf_size;
+
+       return qed_parse_reg_fifo_dump(p_hwfn,
+                                      dump_buf,
+                                      num_dumped_dwords,
+                                      results_buf, &parsed_buf_size);
+}
+
+/* Parses an IGU FIFO dump buffer.
+ * If result_buf is not NULL, the IGU FIFO results are printed to it.
+ * In any case, the required results buffer size is assigned to
+ * parsed_results_bytes.
+ * The parsing status is returned.
+ */
+static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn,
+                                              u32 *dump_buf,
+                                              u32 num_dumped_dwords,
+                                              char *results_buf,
+                                              u32 *parsed_results_bytes)
+{
+       u32 results_offset = 0, param_num_val, num_section_params, num_elements;
+       const char *section_name, *param_name, *param_str_val;
+       struct igu_fifo_element *elements;
+       char parsed_addr_data[32];
+       char parsed_wr_data[256];
+       u8 i, j;
+
+       /* Read global_params section */
+       dump_buf += qed_read_section_hdr(dump_buf,
+                                        &section_name, &num_section_params);
+       if (strcmp(section_name, "global_params"))
+               return DBG_STATUS_IGU_FIFO_BAD_DATA;
+
+       /* Print global params */
+       dump_buf += qed_print_section_params(dump_buf,
+                                            num_section_params,
+                                            results_buf, &results_offset);
+
+       /* Read igu_fifo_data section */
+       dump_buf += qed_read_section_hdr(dump_buf,
+                                        &section_name, &num_section_params);
+       if (strcmp(section_name, "igu_fifo_data"))
+               return DBG_STATUS_IGU_FIFO_BAD_DATA;
+       dump_buf += qed_read_param(dump_buf,
+                                  &param_name, &param_str_val, &param_num_val);
+       if (strcmp(param_name, "size"))
+               return DBG_STATUS_IGU_FIFO_BAD_DATA;
+       if (param_num_val % IGU_FIFO_ELEMENT_DWORDS)
+               return DBG_STATUS_IGU_FIFO_BAD_DATA;
+       num_elements = param_num_val / IGU_FIFO_ELEMENT_DWORDS;
+       elements = (struct igu_fifo_element *)dump_buf;
+
+       /* Decode elements */
+       for (i = 0; i < num_elements; i++) {
+               /* dword12 (dword index 1 and 2) contains bits 32..95 of the
+                * FIFO element.
+                */
+               u64 dword12 =
+                   ((u64)elements[i].dword2 << 32) | elements[i].dword1;
+               bool is_wr_cmd = GET_FIELD(dword12,
+                                          IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD);
+               bool is_pf = GET_FIELD(elements[i].dword0,
+                                      IGU_FIFO_ELEMENT_DWORD0_IS_PF);
+               u16 cmd_addr = GET_FIELD(elements[i].dword0,
+                                        IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR);
+               u8 source = GET_FIELD(elements[i].dword0,
+                                     IGU_FIFO_ELEMENT_DWORD0_SOURCE);
+               u8 err_type = GET_FIELD(elements[i].dword0,
+                                       IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE);
+               const struct igu_fifo_addr_data *addr_data = NULL;
+
+               if (source >= ARRAY_SIZE(s_igu_fifo_source_strs))
+                       return DBG_STATUS_IGU_FIFO_BAD_DATA;
+               if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs))
+                       return DBG_STATUS_IGU_FIFO_BAD_DATA;
+
+               /* Find address data */
+               for (j = 0; j < ARRAY_SIZE(s_igu_fifo_addr_data) && !addr_data;
+                    j++)
+                       if (cmd_addr >= s_igu_fifo_addr_data[j].start_addr &&
+                           cmd_addr <= s_igu_fifo_addr_data[j].end_addr)
+                               addr_data = &s_igu_fifo_addr_data[j];
+               if (!addr_data)
+                       return DBG_STATUS_IGU_FIFO_BAD_DATA;
+
+               /* Prepare parsed address data */
+               switch (addr_data->type) {
+               case IGU_ADDR_TYPE_MSIX_MEM:
+                       sprintf(parsed_addr_data,
+                               " vector_num=0x%x", cmd_addr / 2);
+                       break;
+               case IGU_ADDR_TYPE_WRITE_INT_ACK:
+               case IGU_ADDR_TYPE_WRITE_PROD_UPDATE:
+                       sprintf(parsed_addr_data,
+                               " SB=0x%x", cmd_addr - addr_data->start_addr);
+                       break;
+               default:
+                       parsed_addr_data[0] = '\0';
+               }
+
+               /* Prepare parsed write data */
+               if (is_wr_cmd) {
+                       u32 wr_data = GET_FIELD(dword12,
+                                       IGU_FIFO_ELEMENT_DWORD12_WR_DATA);
+                       u32 prod_cons = GET_FIELD(wr_data,
+                                                 IGU_FIFO_WR_DATA_PROD_CONS);
+                       u8 is_cleanup = GET_FIELD(wr_data,
+                                                 IGU_FIFO_WR_DATA_CMD_TYPE);
+
+                       if (source == IGU_SRC_ATTN) {
+                               sprintf(parsed_wr_data,
+                                       "prod: 0x%x, ", prod_cons);
+                       } else {
+                               if (is_cleanup) {
+                                       u8 cleanup_val = GET_FIELD(wr_data,
+                                                                  IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL);
+                                       u8 cleanup_type = GET_FIELD(wr_data,
+                                                                   IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE);
+
+                                       sprintf(parsed_wr_data,
+                                               "cmd_type: cleanup, cleanup_val: %s, cleanup_type: %d, ",
+                                               cleanup_val ? "set" : "clear",
+                                               cleanup_type);
+                               } else {
+                                       u8 update_flag = GET_FIELD(wr_data,
+                                                                  IGU_FIFO_WR_DATA_UPDATE_FLAG);
+                                       u8 en_dis_int_for_sb =
+                                           GET_FIELD(wr_data,
+                                                     IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB);
+                                       u8 segment = GET_FIELD(wr_data,
+                                                              IGU_FIFO_WR_DATA_SEGMENT);
+                                       u8 timer_mask = GET_FIELD(wr_data,
+                                                                 IGU_FIFO_WR_DATA_TIMER_MASK);
+
+                                       sprintf(parsed_wr_data,
+                                               "cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb: %s, segment: %s, timer_mask=%d, ",
+                                               prod_cons,
+                                               update_flag ? "update" : "nop",
+                                               en_dis_int_for_sb
+                                               ? (en_dis_int_for_sb ==
+                                                  1 ? "disable" : "nop") :
+                                               "enable",
+                                               segment ? "attn" : "regular",
+                                               timer_mask);
+                               }
+                       }
+               } else {
+                       parsed_wr_data[0] = '\0';
+               }
+
+               /* Add parsed element to parsed buffer */
+               results_offset +=
+                   sprintf(qed_get_buf_ptr(results_buf,
+                                           results_offset),
+                           "raw: 0x%01x%08x%08x, %s: %d, source: %s, type: %s, cmd_addr: 0x%x (%s%s), %serror: %s\n",
+                           elements[i].dword2, elements[i].dword1,
+                           elements[i].dword0,
+                           is_pf ? "pf" : "vf",
+                           GET_FIELD(elements[i].dword0,
+                                     IGU_FIFO_ELEMENT_DWORD0_FID),
+                           s_igu_fifo_source_strs[source],
+                           is_wr_cmd ? "wr" : "rd", cmd_addr,
+                           (!is_pf && addr_data->vf_desc)
+                           ? addr_data->vf_desc : addr_data->desc,
+                           parsed_addr_data, parsed_wr_data,
+                           s_igu_fifo_error_strs[err_type]);
+       }
+
+       results_offset += sprintf(qed_get_buf_ptr(results_buf,
+                                                 results_offset),
+                                 "fifo contained %d elements", num_elements);
+
+       /* Add 1 for string NULL termination */
+       *parsed_results_bytes = results_offset + 1;
+       return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+                                                 u32 *dump_buf,
+                                                 u32 num_dumped_dwords,
+                                                 u32 *results_buf_size)
+{
+       return qed_parse_igu_fifo_dump(p_hwfn,
+                                      dump_buf,
+                                      num_dumped_dwords,
+                                      NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
+                                          u32 *dump_buf,
+                                          u32 num_dumped_dwords,
+                                          char *results_buf)
+{
+       u32 parsed_buf_size;
+
+       return qed_parse_igu_fifo_dump(p_hwfn,
+                                      dump_buf,
+                                      num_dumped_dwords,
+                                      results_buf, &parsed_buf_size);
+}
+
+static enum dbg_status
+qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn,
+                                  u32 *dump_buf,
+                                  u32 num_dumped_dwords,
+                                  char *results_buf,
+                                  u32 *parsed_results_bytes)
+{
+       u32 results_offset = 0, param_num_val, num_section_params, num_elements;
+       const char *section_name, *param_name, *param_str_val;
+       struct protection_override_element *elements;
+       u8 i;
+
+       /* Read global_params section */
+       dump_buf += qed_read_section_hdr(dump_buf,
+                                        &section_name, &num_section_params);
+       if (strcmp(section_name, "global_params"))
+               return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
+
+       /* Print global params */
+       dump_buf += qed_print_section_params(dump_buf,
+                                            num_section_params,
+                                            results_buf, &results_offset);
+
+       /* Read protection_override_data section */
+       dump_buf += qed_read_section_hdr(dump_buf,
+                                        &section_name, &num_section_params);
+       if (strcmp(section_name, "protection_override_data"))
+               return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
+       dump_buf += qed_read_param(dump_buf,
+                                  &param_name, &param_str_val, &param_num_val);
+       if (strcmp(param_name, "size"))
+               return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
+       if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS != 0)
+               return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
+       num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS;
+       elements = (struct protection_override_element *)dump_buf;
+
+       /* Decode elements */
+       for (i = 0; i < num_elements; i++) {
+               u32 address = GET_FIELD(elements[i].data,
+                                       PROTECTION_OVERRIDE_ELEMENT_ADDRESS) *
+                                       PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR;
+
+               results_offset +=
+                   sprintf(qed_get_buf_ptr(results_buf,
+                                           results_offset),
+                           "window %2d, address: 0x%07x, size: %7d regs, read: %d, write: %d, read protection: %-12s, write protection: %-12s\n",
+                           i, address,
+                           (u32)GET_FIELD(elements[i].data,
+                                     PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
+                           (u32)GET_FIELD(elements[i].data,
+                                     PROTECTION_OVERRIDE_ELEMENT_READ),
+                           (u32)GET_FIELD(elements[i].data,
+                                     PROTECTION_OVERRIDE_ELEMENT_WRITE),
+                           s_protection_strs[GET_FIELD(elements[i].data,
+                               PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
+                           s_protection_strs[GET_FIELD(elements[i].data,
+                               PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION)]);
+       }
+
+       results_offset += sprintf(qed_get_buf_ptr(results_buf,
+                                                 results_offset),
+                                 "protection override contained %d elements",
+                                 num_elements);
+
+       /* Add 1 for string NULL termination */
+       *parsed_results_bytes = results_offset + 1;
+       return DBG_STATUS_OK;
+}
+
+enum dbg_status
+qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
+                                            u32 *dump_buf,
+                                            u32 num_dumped_dwords,
+                                            u32 *results_buf_size)
+{
+       return qed_parse_protection_override_dump(p_hwfn,
+                                                 dump_buf,
+                                                 num_dumped_dwords,
+                                                 NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
+                                                     u32 *dump_buf,
+                                                     u32 num_dumped_dwords,
+                                                     char *results_buf)
+{
+       u32 parsed_buf_size;
+
+       return qed_parse_protection_override_dump(p_hwfn,
+                                                 dump_buf,
+                                                 num_dumped_dwords,
+                                                 results_buf,
+                                                 &parsed_buf_size);
+}
+
+/* Parses a FW Asserts dump buffer.
+ * If result_buf is not NULL, the FW Asserts results are printed to it.
+ * In any case, the required results buffer size is assigned to
+ * parsed_results_bytes.
+ * The parsing status is returned.
+ */
+static enum dbg_status qed_parse_fw_asserts_dump(struct qed_hwfn *p_hwfn,
+                                                u32 *dump_buf,
+                                                u32 num_dumped_dwords,
+                                                char *results_buf,
+                                                u32 *parsed_results_bytes)
+{
+       u32 results_offset = 0, num_section_params, param_num_val, i;
+       const char *param_name, *param_str_val, *section_name;
+       bool last_section_found = false;
+
+       *parsed_results_bytes = 0;
+
+       /* Read global_params section */
+       dump_buf += qed_read_section_hdr(dump_buf,
+                                        &section_name, &num_section_params);
+       if (strcmp(section_name, "global_params"))
+               return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
+
+       /* Print global params */
+       dump_buf += qed_print_section_params(dump_buf,
+                                            num_section_params,
+                                            results_buf, &results_offset);
+       while (!last_section_found) {
+               const char *storm_letter = NULL;
+               u32 storm_dump_size = 0;
+
+               dump_buf += qed_read_section_hdr(dump_buf,
+                                                &section_name,
+                                                &num_section_params);
+               if (!strcmp(section_name, "last")) {
+                       last_section_found = true;
+                       continue;
+               } else if (strcmp(section_name, "fw_asserts")) {
+                       return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
+               }
+
+               /* Extract params */
+               for (i = 0; i < num_section_params; i++) {
+                       dump_buf += qed_read_param(dump_buf,
+                                                  &param_name,
+                                                  &param_str_val,
+                                                  &param_num_val);
+                       if (!strcmp(param_name, "storm"))
+                               storm_letter = param_str_val;
+                       else if (!strcmp(param_name, "size"))
+                               storm_dump_size = param_num_val;
+                       else
+                               return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
+               }
+
+               if (!storm_letter || !storm_dump_size)
+                       return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
+
+               /* Print data */
+               results_offset += sprintf(qed_get_buf_ptr(results_buf,
+                                                         results_offset),
+                                         "\n%sSTORM_ASSERT: size=%d\n",
+                                         storm_letter, storm_dump_size);
+               for (i = 0; i < storm_dump_size; i++, dump_buf++)
+                       results_offset +=
+                           sprintf(qed_get_buf_ptr(results_buf,
+                                                   results_offset),
+                                   "%08x\n", *dump_buf);
+       }
+
+       /* Add 1 for string NULL termination */
+       *parsed_results_bytes = results_offset + 1;
+       return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
+                                                   u32 *dump_buf,
+                                                   u32 num_dumped_dwords,
+                                                   u32 *results_buf_size)
+{
+       return qed_parse_fw_asserts_dump(p_hwfn,
+                                        dump_buf,
+                                        num_dumped_dwords,
+                                        NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
+                                            u32 *dump_buf,
+                                            u32 num_dumped_dwords,
+                                            char *results_buf)
+{
+       u32 parsed_buf_size;
+
+       return qed_parse_fw_asserts_dump(p_hwfn,
+                                        dump_buf,
+                                        num_dumped_dwords,
+                                        results_buf, &parsed_buf_size);
+}
+
+/* Wrapper for unifying the idle_chk and mcp_trace api */
+static enum dbg_status
+qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
+                                  u32 *dump_buf,
+                                  u32 num_dumped_dwords,
+                                  char *results_buf)
+{
+       u32 num_errors, num_warnnings;
+
+       return qed_print_idle_chk_results(p_hwfn, dump_buf, num_dumped_dwords,
+                                         results_buf, &num_errors,
+                                         &num_warnnings);
+}
+
+/* Feature meta data lookup table */
+static struct {
+       char *name;
+       enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn,
+                                   struct qed_ptt *p_ptt, u32 *size);
+       enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn,
+                                       struct qed_ptt *p_ptt, u32 *dump_buf,
+                                       u32 buf_size, u32 *dumped_dwords);
+       enum dbg_status (*print_results)(struct qed_hwfn *p_hwfn,
+                                        u32 *dump_buf, u32 num_dumped_dwords,
+                                        char *results_buf);
+       enum dbg_status (*results_buf_size)(struct qed_hwfn *p_hwfn,
+                                           u32 *dump_buf,
+                                           u32 num_dumped_dwords,
+                                           u32 *results_buf_size);
+} qed_features_lookup[] = {
+       {
+       "grc", qed_dbg_grc_get_dump_buf_size,
+                   qed_dbg_grc_dump, NULL, NULL}, {
+       "idle_chk",
+                   qed_dbg_idle_chk_get_dump_buf_size,
+                   qed_dbg_idle_chk_dump,
+                   qed_print_idle_chk_results_wrapper,
+                   qed_get_idle_chk_results_buf_size}, {
+       "mcp_trace",
+                   qed_dbg_mcp_trace_get_dump_buf_size,
+                   qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results,
+                   qed_get_mcp_trace_results_buf_size}, {
+       "reg_fifo",
+                   qed_dbg_reg_fifo_get_dump_buf_size,
+                   qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results,
+                   qed_get_reg_fifo_results_buf_size}, {
+       "igu_fifo",
+                   qed_dbg_igu_fifo_get_dump_buf_size,
+                   qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results,
+                   qed_get_igu_fifo_results_buf_size}, {
+       "protection_override",
+                   qed_dbg_protection_override_get_dump_buf_size,
+                   qed_dbg_protection_override_dump,
+                   qed_print_protection_override_results,
+                   qed_get_protection_override_results_buf_size}, {
+       "fw_asserts",
+                   qed_dbg_fw_asserts_get_dump_buf_size,
+                   qed_dbg_fw_asserts_dump,
+                   qed_print_fw_asserts_results,
+                   qed_get_fw_asserts_results_buf_size},};
+
+static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
+{
+       u32 i, precision = 80;
+
+       if (!p_text_buf)
+               return;
+
+       pr_notice("\n%.*s", precision, p_text_buf);
+       for (i = precision; i < text_size; i += precision)
+               pr_cont("%.*s", precision, p_text_buf + i);
+       pr_cont("\n");
+}
+
+#define QED_RESULTS_BUF_MIN_SIZE 16
+/* Generic function for decoding debug feature info */
+static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
+                                     enum qed_dbg_features feature_idx)
+{
+       struct qed_dbg_feature *feature =
+           &p_hwfn->cdev->dbg_params.features[feature_idx];
+       u32 text_size_bytes, null_char_pos, i;
+       enum dbg_status rc;
+       char *text_buf;
+
+       /* Check if feature supports formatting capability */
+       if (!qed_features_lookup[feature_idx].results_buf_size)
+               return DBG_STATUS_OK;
+
+       /* Obtain size of formatted output */
+       rc = qed_features_lookup[feature_idx].
+               results_buf_size(p_hwfn, (u32 *)feature->dump_buf,
+                                feature->dumped_dwords, &text_size_bytes);
+       if (rc != DBG_STATUS_OK)
+               return rc;
+
+       /* Make sure that the allocated size is a multiple of dword (4 bytes) */
+       null_char_pos = text_size_bytes - 1;
+       text_size_bytes = (text_size_bytes + 3) & ~0x3;
+
+       if (text_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
+               DP_NOTICE(p_hwfn->cdev,
+                         "formatted size of feature was too small %d. Aborting\n",
+                         text_size_bytes);
+               return DBG_STATUS_INVALID_ARGS;
+       }
+
+       /* Allocate temp text buf */
+       text_buf = vzalloc(text_size_bytes);
+       if (!text_buf)
+               return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+
+       /* Decode feature opcodes to string on temp buf */
+       rc = qed_features_lookup[feature_idx].
+               print_results(p_hwfn, (u32 *)feature->dump_buf,
+                             feature->dumped_dwords, text_buf);
+       if (rc != DBG_STATUS_OK) {
+               vfree(text_buf);
+               return rc;
+       }
+
+       /* Replace the original null character with a '\n' character.
+        * The bytes that were added as a result of the dword alignment are also
+        * padded with '\n' characters.
+        */
+       for (i = null_char_pos; i < text_size_bytes; i++)
+               text_buf[i] = '\n';
+
+       /* Dump printable feature to log */
+       if (p_hwfn->cdev->dbg_params.print_data)
+               qed_dbg_print_feature(text_buf, text_size_bytes);
+
+       /* Free the old dump_buf and point the dump_buf to the newly allocagted
+        * and formatted text buffer.
+        */
+       vfree(feature->dump_buf);
+       feature->dump_buf = text_buf;
+       feature->buf_size = text_size_bytes;
+       feature->dumped_dwords = text_size_bytes / 4;
+       return rc;
+}
+
+/* Generic function for performing the dump of a debug feature. */
+static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
+                                   struct qed_ptt *p_ptt,
+                                   enum qed_dbg_features feature_idx)
+{
+       struct qed_dbg_feature *feature =
+           &p_hwfn->cdev->dbg_params.features[feature_idx];
+       u32 buf_size_dwords;
+       enum dbg_status rc;
+
+       DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n",
+                 qed_features_lookup[feature_idx].name);
+
+       /* Dump_buf was already allocated need to free (this can happen if dump
+        * was called but file was never read).
+        * We can't use the buffer as is since size may have changed.
+        */
+       if (feature->dump_buf) {
+               vfree(feature->dump_buf);
+               feature->dump_buf = NULL;
+       }
+
+       /* Get buffer size from hsi, allocate accordingly, and perform the
+        * dump.
+        */
+       rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
+                                                      &buf_size_dwords);
+       if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
+               return rc;
+       feature->buf_size = buf_size_dwords * sizeof(u32);
+       feature->dump_buf = vmalloc(feature->buf_size);
+       if (!feature->dump_buf)
+               return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+
+       rc = qed_features_lookup[feature_idx].
+               perform_dump(p_hwfn, p_ptt, (u32 *)feature->dump_buf,
+                            feature->buf_size / sizeof(u32),
+                            &feature->dumped_dwords);
+
+       /* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error.
+        * In this case the buffer holds valid binary data, but we wont able
+        * to parse it (since parsing relies on data in NVRAM which is only
+        * accessible when MFW is responsive). skip the formatting but return
+        * success so that binary data is provided.
+        */
+       if (rc == DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
+               return DBG_STATUS_OK;
+
+       if (rc != DBG_STATUS_OK)
+               return rc;
+
+       /* Format output */
+       rc = format_feature(p_hwfn, feature_idx);
+       return rc;
+}
+
+int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
+{
+       return qed_dbg_feature(cdev, buffer, DBG_FEATURE_GRC, num_dumped_bytes);
+}
+
+int qed_dbg_grc_size(struct qed_dev *cdev)
+{
+       return qed_dbg_feature_size(cdev, DBG_FEATURE_GRC);
+}
+
+int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
+{
+       return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IDLE_CHK,
+                              num_dumped_bytes);
+}
+
+int qed_dbg_idle_chk_size(struct qed_dev *cdev)
+{
+       return qed_dbg_feature_size(cdev, DBG_FEATURE_IDLE_CHK);
+}
+
+int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
+{
+       return qed_dbg_feature(cdev, buffer, DBG_FEATURE_REG_FIFO,
+                              num_dumped_bytes);
+}
+
+int qed_dbg_reg_fifo_size(struct qed_dev *cdev)
+{
+       return qed_dbg_feature_size(cdev, DBG_FEATURE_REG_FIFO);
+}
+
+int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
+{
+       return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IGU_FIFO,
+                              num_dumped_bytes);
+}
+
+int qed_dbg_igu_fifo_size(struct qed_dev *cdev)
+{
+       return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO);
+}
+
+int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
+                               u32 *num_dumped_bytes)
+{
+       return qed_dbg_feature(cdev, buffer, DBG_FEATURE_PROTECTION_OVERRIDE,
+                              num_dumped_bytes);
+}
+
+int qed_dbg_protection_override_size(struct qed_dev *cdev)
+{
+       return qed_dbg_feature_size(cdev, DBG_FEATURE_PROTECTION_OVERRIDE);
+}
+
+int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
+                      u32 *num_dumped_bytes)
+{
+       return qed_dbg_feature(cdev, buffer, DBG_FEATURE_FW_ASSERTS,
+                              num_dumped_bytes);
+}
+
+int qed_dbg_fw_asserts_size(struct qed_dev *cdev)
+{
+       return qed_dbg_feature_size(cdev, DBG_FEATURE_FW_ASSERTS);
+}
+
+int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
+                     u32 *num_dumped_bytes)
+{
+       return qed_dbg_feature(cdev, buffer, DBG_FEATURE_MCP_TRACE,
+                              num_dumped_bytes);
+}
+
+int qed_dbg_mcp_trace_size(struct qed_dev *cdev)
+{
+       return qed_dbg_feature_size(cdev, DBG_FEATURE_MCP_TRACE);
+}
+
+/* Defines the amount of bytes allocated for recording the length of debugfs
+ * feature buffer.
+ */
+#define REGDUMP_HEADER_SIZE                    sizeof(u32)
+#define REGDUMP_HEADER_FEATURE_SHIFT           24
+#define REGDUMP_HEADER_ENGINE_SHIFT            31
+#define REGDUMP_HEADER_OMIT_ENGINE_SHIFT       30
+enum debug_print_features {
+       OLD_MODE = 0,
+       IDLE_CHK = 1,
+       GRC_DUMP = 2,
+       MCP_TRACE = 3,
+       REG_FIFO = 4,
+       PROTECTION_OVERRIDE = 5,
+       IGU_FIFO = 6,
+       PHY = 7,
+       FW_ASSERTS = 8,
+};
+
+static u32 qed_calc_regdump_header(enum debug_print_features feature,
+                                  int engine, u32 feature_size, u8 omit_engine)
+{
+       /* Insert the engine, feature and mode inside the header and combine it
+        * with feature size.
+        */
+       return feature_size | (feature << REGDUMP_HEADER_FEATURE_SHIFT) |
+              (omit_engine << REGDUMP_HEADER_OMIT_ENGINE_SHIFT) |
+              (engine << REGDUMP_HEADER_ENGINE_SHIFT);
+}
+
+int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
+{
+       u8 cur_engine, omit_engine = 0, org_engine;
+       u32 offset = 0, feature_size;
+       int rc;
+
+       if (cdev->num_hwfns == 1)
+               omit_engine = 1;
+
+       org_engine = qed_get_debug_engine(cdev);
+       for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
+               /* Collect idle_chks and grcDump for each hw function */
+               DP_VERBOSE(cdev, QED_MSG_DEBUG,
+                          "obtaining idle_chk and grcdump for current engine\n");
+               qed_set_debug_engine(cdev, cur_engine);
+
+               /* First idle_chk */
+               rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
+                                     REGDUMP_HEADER_SIZE, &feature_size);
+               if (!rc) {
+                       *(u32 *)((u8 *)buffer + offset) =
+                           qed_calc_regdump_header(IDLE_CHK, cur_engine,
+                                                   feature_size, omit_engine);
+                       offset += (feature_size + REGDUMP_HEADER_SIZE);
+               } else {
+                       DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
+               }
+
+               /* Second idle_chk */
+               rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
+                                     REGDUMP_HEADER_SIZE, &feature_size);
+               if (!rc) {
+                       *(u32 *)((u8 *)buffer + offset) =
+                           qed_calc_regdump_header(IDLE_CHK, cur_engine,
+                                                   feature_size, omit_engine);
+                       offset += (feature_size + REGDUMP_HEADER_SIZE);
+               } else {
+                       DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
+               }
+
+               /* reg_fifo dump */
+               rc = qed_dbg_reg_fifo(cdev, (u8 *)buffer + offset +
+                                     REGDUMP_HEADER_SIZE, &feature_size);
+               if (!rc) {
+                       *(u32 *)((u8 *)buffer + offset) =
+                           qed_calc_regdump_header(REG_FIFO, cur_engine,
+                                                   feature_size, omit_engine);
+                       offset += (feature_size + REGDUMP_HEADER_SIZE);
+               } else {
+                       DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc);
+               }
+
+               /* igu_fifo dump */
+               rc = qed_dbg_igu_fifo(cdev, (u8 *)buffer + offset +
+                                     REGDUMP_HEADER_SIZE, &feature_size);
+               if (!rc) {
+                       *(u32 *)((u8 *)buffer + offset) =
+                           qed_calc_regdump_header(IGU_FIFO, cur_engine,
+                                                   feature_size, omit_engine);
+                       offset += (feature_size + REGDUMP_HEADER_SIZE);
+               } else {
+                       DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc);
+               }
+
+               /* protection_override dump */
+               rc = qed_dbg_protection_override(cdev, (u8 *)buffer + offset +
+                                                REGDUMP_HEADER_SIZE,
+                                                &feature_size);
+               if (!rc) {
+                       *(u32 *)((u8 *)buffer + offset) =
+                           qed_calc_regdump_header(PROTECTION_OVERRIDE,
+                                                   cur_engine,
+                                                   feature_size, omit_engine);
+                       offset += (feature_size + REGDUMP_HEADER_SIZE);
+               } else {
+                       DP_ERR(cdev,
+                              "qed_dbg_protection_override failed. rc = %d\n",
+                              rc);
+               }
+
+               /* fw_asserts dump */
+               rc = qed_dbg_fw_asserts(cdev, (u8 *)buffer + offset +
+                                       REGDUMP_HEADER_SIZE, &feature_size);
+               if (!rc) {
+                       *(u32 *)((u8 *)buffer + offset) =
+                           qed_calc_regdump_header(FW_ASSERTS, cur_engine,
+                                                   feature_size, omit_engine);
+                       offset += (feature_size + REGDUMP_HEADER_SIZE);
+               } else {
+                       DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n",
+                              rc);
+               }
+
+               /* GRC dump - must be last because when mcp stuck it will
+                * clutter idle_chk, reg_fifo, ...
+                */
+               rc = qed_dbg_grc(cdev, (u8 *)buffer + offset +
+                                REGDUMP_HEADER_SIZE, &feature_size);
+               if (!rc) {
+                       *(u32 *)((u8 *)buffer + offset) =
+                           qed_calc_regdump_header(GRC_DUMP, cur_engine,
+                                                   feature_size, omit_engine);
+                       offset += (feature_size + REGDUMP_HEADER_SIZE);
+               } else {
+                       DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc);
+               }
+       }
+
+       /* mcp_trace */
+       rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset +
+                              REGDUMP_HEADER_SIZE, &feature_size);
+       if (!rc) {
+               *(u32 *)((u8 *)buffer + offset) =
+                   qed_calc_regdump_header(MCP_TRACE, cur_engine,
+                                           feature_size, omit_engine);
+               offset += (feature_size + REGDUMP_HEADER_SIZE);
+       } else {
+               DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
+       }
+
+       qed_set_debug_engine(cdev, org_engine);
+
+       return 0;
+}
+
+int qed_dbg_all_data_size(struct qed_dev *cdev)
+{
+       u8 cur_engine, org_engine;
+       u32 regs_len = 0;
+
+       org_engine = qed_get_debug_engine(cdev);
+       for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
+               /* Engine specific */
+               DP_VERBOSE(cdev, QED_MSG_DEBUG,
+                          "calculating idle_chk and grcdump register length for current engine\n");
+               qed_set_debug_engine(cdev, cur_engine);
+               regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
+                           REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
+                           REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) +
+                           REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) +
+                           REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) +
+                           REGDUMP_HEADER_SIZE +
+                           qed_dbg_protection_override_size(cdev) +
+                           REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
+       }
+
+       /* Engine common */
+       regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev);
+       qed_set_debug_engine(cdev, org_engine);
+
+       return regs_len;
+}
+
+int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
+                   enum qed_dbg_features feature, u32 *num_dumped_bytes)
+{
+       struct qed_hwfn *p_hwfn =
+               &cdev->hwfns[cdev->dbg_params.engine_for_debug];
+       struct qed_dbg_feature *qed_feature =
+               &cdev->dbg_params.features[feature];
+       enum dbg_status dbg_rc;
+       struct qed_ptt *p_ptt;
+       int rc = 0;
+
+       /* Acquire ptt */
+       p_ptt = qed_ptt_acquire(p_hwfn);
+       if (!p_ptt)
+               return -EINVAL;
+
+       /* Get dump */
+       dbg_rc = qed_dbg_dump(p_hwfn, p_ptt, feature);
+       if (dbg_rc != DBG_STATUS_OK) {
+               DP_VERBOSE(cdev, QED_MSG_DEBUG, "%s\n",
+                          qed_dbg_get_status_str(dbg_rc));
+               *num_dumped_bytes = 0;
+               rc = -EINVAL;
+               goto out;
+       }
+
+       DP_VERBOSE(cdev, QED_MSG_DEBUG,
+                  "copying debugfs feature to external buffer\n");
+       memcpy(buffer, qed_feature->dump_buf, qed_feature->buf_size);
+       *num_dumped_bytes = cdev->dbg_params.features[feature].dumped_dwords *
+                           4;
+
+out:
+       qed_ptt_release(p_hwfn, p_ptt);
+       return rc;
+}
+
+int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
+{
+       struct qed_hwfn *p_hwfn =
+               &cdev->hwfns[cdev->dbg_params.engine_for_debug];
+       struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+       struct qed_dbg_feature *qed_feature =
+               &cdev->dbg_params.features[feature];
+       u32 buf_size_dwords;
+       enum dbg_status rc;
+
+       if (!p_ptt)
+               return -EINVAL;
+
+       rc = qed_features_lookup[feature].get_size(p_hwfn, p_ptt,
+                                                  &buf_size_dwords);
+       if (rc != DBG_STATUS_OK)
+               buf_size_dwords = 0;
+
+       qed_ptt_release(p_hwfn, p_ptt);
+       qed_feature->buf_size = buf_size_dwords * sizeof(u32);
+       return qed_feature->buf_size;
+}
+
+u8 qed_get_debug_engine(struct qed_dev *cdev)
+{
+       return cdev->dbg_params.engine_for_debug;
+}
+
+void qed_set_debug_engine(struct qed_dev *cdev, int engine_number)
+{
+       DP_VERBOSE(cdev, QED_MSG_DEBUG, "set debug engine to %d\n",
+                  engine_number);
+       cdev->dbg_params.engine_for_debug = engine_number;
+}
+
+void qed_dbg_pf_init(struct qed_dev *cdev)
+{
+       const u8 *dbg_values;
+
+       /* Debug values are after init values.
+        * The offset is the first dword of the file.
+        */
+       dbg_values = cdev->firmware->data + *(u32 *)cdev->firmware->data;
+       qed_dbg_set_bin_ptr((u8 *)dbg_values);
+       qed_dbg_user_set_bin_ptr((u8 *)dbg_values);
+}
+
+void qed_dbg_pf_exit(struct qed_dev *cdev)
+{
+       struct qed_dbg_feature *feature = NULL;
+       enum qed_dbg_features feature_idx;
+
+       /* Debug features' buffers may be allocated if debug feature was used
+        * but dump wasn't called.
+        */
+       for (feature_idx = 0; feature_idx < DBG_FEATURE_NUM; feature_idx++) {
+               feature = &cdev->dbg_params.features[feature_idx];
+               if (feature->dump_buf) {
+                       vfree(feature->dump_buf);
+                       feature->dump_buf = NULL;
+               }
+       }
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.h b/drivers/net/ethernet/qlogic/qed/qed_debug.h
new file mode 100644 (file)
index 0000000..f872d73
--- /dev/null
@@ -0,0 +1,54 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_DEBUGFS_H
+#define _QED_DEBUGFS_H
+
+enum qed_dbg_features {
+       DBG_FEATURE_GRC,
+       DBG_FEATURE_IDLE_CHK,
+       DBG_FEATURE_MCP_TRACE,
+       DBG_FEATURE_REG_FIFO,
+       DBG_FEATURE_IGU_FIFO,
+       DBG_FEATURE_PROTECTION_OVERRIDE,
+       DBG_FEATURE_FW_ASSERTS,
+       DBG_FEATURE_NUM
+};
+
+int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes);
+int qed_dbg_grc_size(struct qed_dev *cdev);
+int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer,
+                    u32 *num_dumped_bytes);
+int qed_dbg_idle_chk_size(struct qed_dev *cdev);
+int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer,
+                    u32 *num_dumped_bytes);
+int qed_dbg_reg_fifo_size(struct qed_dev *cdev);
+int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer,
+                    u32 *num_dumped_bytes);
+int qed_dbg_igu_fifo_size(struct qed_dev *cdev);
+int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
+                               u32 *num_dumped_bytes);
+int qed_dbg_protection_override_size(struct qed_dev *cdev);
+int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
+                      u32 *num_dumped_bytes);
+int qed_dbg_fw_asserts_size(struct qed_dev *cdev);
+int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
+                     u32 *num_dumped_bytes);
+int qed_dbg_mcp_trace_size(struct qed_dev *cdev);
+int qed_dbg_all_data(struct qed_dev *cdev, void *buffer);
+int qed_dbg_all_data_size(struct qed_dev *cdev);
+u8 qed_get_debug_engine(struct qed_dev *cdev);
+void qed_set_debug_engine(struct qed_dev *cdev, int engine_number);
+int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
+                   enum qed_dbg_features feature, u32 *num_dumped_bytes);
+int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature);
+
+void qed_dbg_pf_init(struct qed_dev *cdev);
+void qed_dbg_pf_exit(struct qed_dev *cdev);
+
+#endif
index 0e4f4a9306b55ac64173a7a1ce73ad0626e5a143..fad73195010d01b4d50ad80ff5c7e399787daa94 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #include <linux/types.h>
 #include "qed_cxt.h"
 #include "qed_dcbx.h"
 #include "qed_dev_api.h"
+#include "qed_fcoe.h"
 #include "qed_hsi.h"
 #include "qed_hw.h"
 #include "qed_init_ops.h"
 #include "qed_int.h"
+#include "qed_iscsi.h"
+#include "qed_ll2.h"
 #include "qed_mcp.h"
+#include "qed_ooo.h"
 #include "qed_reg_addr.h"
 #include "qed_sp.h"
 #include "qed_sriov.h"
 #include "qed_vf.h"
+#include "qed_roce.h"
+
+static DEFINE_SPINLOCK(qm_lock);
 
-static spinlock_t qm_lock;
-static bool qm_lock_init = false;
+#define QED_MIN_DPIS            (4)
+#define QED_MIN_PWM_REGION      (QED_WID_SIZE * QED_MIN_DPIS)
 
 /* API common to all protocols */
 enum BAR_ID {
@@ -44,8 +75,8 @@ enum BAR_ID {
        BAR_ID_1        /* Used for doorbells */
 };
 
-static u32 qed_hw_bar_size(struct qed_hwfn     *p_hwfn,
-                          enum BAR_ID          bar_id)
+static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
+                          struct qed_ptt *p_ptt, enum BAR_ID bar_id)
 {
        u32 bar_reg = (bar_id == BAR_ID_0 ?
                       PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
@@ -54,7 +85,7 @@ static u32 qed_hw_bar_size(struct qed_hwfn    *p_hwfn,
        if (IS_VF(p_hwfn->cdev))
                return 1 << 17;
 
-       val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
+       val = qed_rd(p_hwfn, p_ptt, bar_reg);
        if (val)
                return 1 << (val + 15);
 
@@ -70,8 +101,7 @@ static u32 qed_hw_bar_size(struct qed_hwfn   *p_hwfn,
        }
 }
 
-void qed_init_dp(struct qed_dev *cdev,
-                u32 dp_module, u8 dp_level)
+void qed_init_dp(struct qed_dev *cdev, u32 dp_module, u8 dp_level)
 {
        u32 i;
 
@@ -132,15 +162,6 @@ void qed_resc_free(struct qed_dev *cdev)
 
        kfree(cdev->reset_stats);
 
-       for_each_hwfn(cdev, i) {
-               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
-
-               kfree(p_hwfn->p_tx_cids);
-               p_hwfn->p_tx_cids = NULL;
-               kfree(p_hwfn->p_rx_cids);
-               p_hwfn->p_rx_cids = NULL;
-       }
-
        for_each_hwfn(cdev, i) {
                struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 
@@ -150,202 +171,585 @@ void qed_resc_free(struct qed_dev *cdev)
                qed_eq_free(p_hwfn, p_hwfn->p_eq);
                qed_consq_free(p_hwfn, p_hwfn->p_consq);
                qed_int_free(p_hwfn);
+#ifdef CONFIG_QED_LL2
+               qed_ll2_free(p_hwfn, p_hwfn->p_ll2_info);
+#endif
+               if (p_hwfn->hw_info.personality == QED_PCI_FCOE)
+                       qed_fcoe_free(p_hwfn, p_hwfn->p_fcoe_info);
+
+               if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
+                       qed_iscsi_free(p_hwfn, p_hwfn->p_iscsi_info);
+                       qed_ooo_free(p_hwfn, p_hwfn->p_ooo_info);
+               }
                qed_iov_free(p_hwfn);
                qed_dmae_info_free(p_hwfn);
                qed_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info);
        }
 }
 
-static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
+/******************** QM initialization *******************/
+#define ACTIVE_TCS_BMAP 0x9f
+#define ACTIVE_TCS_BMAP_4PORT_K2 0xf
+
+/* determines the physical queue flags for a given PF. */
+static u32 qed_get_pq_flags(struct qed_hwfn *p_hwfn)
 {
-       u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0;
-       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
-       struct init_qm_port_params *p_qm_port;
-       bool init_rdma_offload_pq = false;
-       bool init_pure_ack_pq = false;
-       bool init_ooo_pq = false;
-       u16 num_pqs, multi_cos_tcs = 1;
-       u8 pf_wfq = qm_info->pf_wfq;
-       u32 pf_rl = qm_info->pf_rl;
-       u16 num_pf_rls = 0;
-       u16 num_vfs = 0;
-
-#ifdef CONFIG_QED_SRIOV
-       if (p_hwfn->cdev->p_iov_info)
-               num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
-#endif
-       memset(qm_info, 0, sizeof(*qm_info));
+       u32 flags;
 
-       num_pqs = multi_cos_tcs + num_vfs + 1;  /* The '1' is for pure-LB */
-       num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
+       /* common flags */
+       flags = PQ_FLAGS_LB;
 
-       if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
-               num_pqs++;      /* for RoCE queue */
-               init_rdma_offload_pq = true;
-               /* we subtract num_vfs because each require a rate limiter,
-                * and one default rate limiter
-                */
-               if (p_hwfn->pf_params.rdma_pf_params.enable_dcqcn)
-                       num_pf_rls = RESC_NUM(p_hwfn, QED_RL) - num_vfs - 1;
+       /* feature flags */
+       if (IS_QED_SRIOV(p_hwfn->cdev))
+               flags |= PQ_FLAGS_VFS;
 
-               num_pqs += num_pf_rls;
-               qm_info->num_pf_rls = (u8) num_pf_rls;
+       /* protocol flags */
+       switch (p_hwfn->hw_info.personality) {
+       case QED_PCI_ETH:
+               flags |= PQ_FLAGS_MCOS;
+               break;
+       case QED_PCI_FCOE:
+               flags |= PQ_FLAGS_OFLD;
+               break;
+       case QED_PCI_ISCSI:
+               flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
+               break;
+       case QED_PCI_ETH_ROCE:
+               flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD | PQ_FLAGS_LLT;
+               break;
+       default:
+               DP_ERR(p_hwfn,
+                      "unknown personality %d\n", p_hwfn->hw_info.personality);
+               return 0;
        }
 
-       if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
-               num_pqs += 2;   /* for iSCSI pure-ACK / OOO queue */
-               init_pure_ack_pq = true;
-               init_ooo_pq = true;
-       }
+       return flags;
+}
 
-       /* Sanity checking that setup requires legal number of resources */
-       if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) {
-               DP_ERR(p_hwfn,
-                      "Need too many Physical queues - 0x%04x when only %04x are available\n",
-                      num_pqs, RESC_NUM(p_hwfn, QED_PQ));
-               return -EINVAL;
-       }
+/* Getters for resource amounts necessary for qm initialization */
+u8 qed_init_qm_get_num_tcs(struct qed_hwfn *p_hwfn)
+{
+       return p_hwfn->hw_info.num_hw_tc;
+}
 
-       /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
-        */
-       qm_info->qm_pq_params = kcalloc(num_pqs,
-                                       sizeof(struct init_qm_pq_params),
-                                       b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
-       if (!qm_info->qm_pq_params)
-               goto alloc_err;
+u16 qed_init_qm_get_num_vfs(struct qed_hwfn *p_hwfn)
+{
+       return IS_QED_SRIOV(p_hwfn->cdev) ?
+              p_hwfn->cdev->p_iov_info->total_vfs : 0;
+}
 
-       qm_info->qm_vport_params = kcalloc(num_vports,
-                                          sizeof(struct init_qm_vport_params),
-                                          b_sleepable ? GFP_KERNEL
-                                                      : GFP_ATOMIC);
-       if (!qm_info->qm_vport_params)
-               goto alloc_err;
+#define NUM_DEFAULT_RLS 1
 
-       qm_info->qm_port_params = kcalloc(MAX_NUM_PORTS,
-                                         sizeof(struct init_qm_port_params),
-                                         b_sleepable ? GFP_KERNEL
-                                                     : GFP_ATOMIC);
-       if (!qm_info->qm_port_params)
-               goto alloc_err;
+u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn)
+{
+       u16 num_pf_rls, num_vfs = qed_init_qm_get_num_vfs(p_hwfn);
 
-       qm_info->wfq_data = kcalloc(num_vports, sizeof(struct qed_wfq_data),
-                                   b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
-       if (!qm_info->wfq_data)
-               goto alloc_err;
+       /* num RLs can't exceed resource amount of rls or vports */
+       num_pf_rls = (u16) min_t(u32, RESC_NUM(p_hwfn, QED_RL),
+                                RESC_NUM(p_hwfn, QED_VPORT));
 
-       vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
+       /* Make sure after we reserve there's something left */
+       if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS)
+               return 0;
 
-       /* First init rate limited queues */
-       for (curr_queue = 0; curr_queue < num_pf_rls; curr_queue++) {
-               qm_info->qm_pq_params[curr_queue].vport_id = vport_id++;
-               qm_info->qm_pq_params[curr_queue].tc_id =
-                   p_hwfn->hw_info.non_offload_tc;
-               qm_info->qm_pq_params[curr_queue].wrr_group = 1;
-               qm_info->qm_pq_params[curr_queue].rl_valid = 1;
-       }
+       /* subtract rls necessary for VFs and one default one for the PF */
+       num_pf_rls -= num_vfs + NUM_DEFAULT_RLS;
 
-       /* First init per-TC PQs */
-       for (i = 0; i < multi_cos_tcs; i++) {
-               struct init_qm_pq_params *params =
-                   &qm_info->qm_pq_params[curr_queue++];
+       return num_pf_rls;
+}
 
-               if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE ||
-                   p_hwfn->hw_info.personality == QED_PCI_ETH) {
-                       params->vport_id = vport_id;
-                       params->tc_id = p_hwfn->hw_info.non_offload_tc;
-                       params->wrr_group = 1;
-               } else {
-                       params->vport_id = vport_id;
-                       params->tc_id = p_hwfn->hw_info.offload_tc;
-                       params->wrr_group = 1;
-               }
-       }
+u16 qed_init_qm_get_num_vports(struct qed_hwfn *p_hwfn)
+{
+       u32 pq_flags = qed_get_pq_flags(p_hwfn);
 
-       /* Then init pure-LB PQ */
-       qm_info->pure_lb_pq = curr_queue;
-       qm_info->qm_pq_params[curr_queue].vport_id =
-           (u8) RESC_START(p_hwfn, QED_VPORT);
-       qm_info->qm_pq_params[curr_queue].tc_id = PURE_LB_TC;
-       qm_info->qm_pq_params[curr_queue].wrr_group = 1;
-       curr_queue++;
-
-       qm_info->offload_pq = 0;
-       if (init_rdma_offload_pq) {
-               qm_info->offload_pq = curr_queue;
-               qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
-               qm_info->qm_pq_params[curr_queue].tc_id =
-                   p_hwfn->hw_info.offload_tc;
-               qm_info->qm_pq_params[curr_queue].wrr_group = 1;
-               curr_queue++;
-       }
-
-       if (init_pure_ack_pq) {
-               qm_info->pure_ack_pq = curr_queue;
-               qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
-               qm_info->qm_pq_params[curr_queue].tc_id =
-                   p_hwfn->hw_info.offload_tc;
-               qm_info->qm_pq_params[curr_queue].wrr_group = 1;
-               curr_queue++;
-       }
-
-       if (init_ooo_pq) {
-               qm_info->ooo_pq = curr_queue;
-               qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
-               qm_info->qm_pq_params[curr_queue].tc_id = DCBX_ISCSI_OOO_TC;
-               qm_info->qm_pq_params[curr_queue].wrr_group = 1;
-               curr_queue++;
-       }
-
-       /* Then init per-VF PQs */
-       vf_offset = curr_queue;
-       for (i = 0; i < num_vfs; i++) {
-               /* First vport is used by the PF */
-               qm_info->qm_pq_params[curr_queue].vport_id = vport_id + i + 1;
-               qm_info->qm_pq_params[curr_queue].tc_id =
-                   p_hwfn->hw_info.non_offload_tc;
-               qm_info->qm_pq_params[curr_queue].wrr_group = 1;
-               qm_info->qm_pq_params[curr_queue].rl_valid = 1;
-               curr_queue++;
-       }
-
-       qm_info->vf_queues_offset = vf_offset;
-       qm_info->num_pqs = num_pqs;
-       qm_info->num_vports = num_vports;
+       /* all pqs share the same vport, except for vfs and pf_rl pqs */
+       return (!!(PQ_FLAGS_RLS & pq_flags)) *
+              qed_init_qm_get_num_pf_rls(p_hwfn) +
+              (!!(PQ_FLAGS_VFS & pq_flags)) *
+              qed_init_qm_get_num_vfs(p_hwfn) + 1;
+}
+
+/* calc amount of PQs according to the requested flags */
+u16 qed_init_qm_get_num_pqs(struct qed_hwfn *p_hwfn)
+{
+       u32 pq_flags = qed_get_pq_flags(p_hwfn);
+
+       return (!!(PQ_FLAGS_RLS & pq_flags)) *
+              qed_init_qm_get_num_pf_rls(p_hwfn) +
+              (!!(PQ_FLAGS_MCOS & pq_flags)) *
+              qed_init_qm_get_num_tcs(p_hwfn) +
+              (!!(PQ_FLAGS_LB & pq_flags)) + (!!(PQ_FLAGS_OOO & pq_flags)) +
+              (!!(PQ_FLAGS_ACK & pq_flags)) + (!!(PQ_FLAGS_OFLD & pq_flags)) +
+              (!!(PQ_FLAGS_LLT & pq_flags)) +
+              (!!(PQ_FLAGS_VFS & pq_flags)) * qed_init_qm_get_num_vfs(p_hwfn);
+}
+
+/* initialize the top level QM params */
+static void qed_init_qm_params(struct qed_hwfn *p_hwfn)
+{
+       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+       bool four_port;
+
+       /* pq and vport bases for this PF */
+       qm_info->start_pq = (u16) RESC_START(p_hwfn, QED_PQ);
+       qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT);
+
+       /* rate limiting and weighted fair queueing are always enabled */
+       qm_info->vport_rl_en = 1;
+       qm_info->vport_wfq_en = 1;
+
+       /* TC config is different for AH 4 port */
+       four_port = p_hwfn->cdev->num_ports_in_engines == MAX_NUM_PORTS_K2;
+
+       /* in AH 4 port we have fewer TCs per port */
+       qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 :
+                                                    NUM_OF_PHYS_TCS;
+
+       /* unless MFW indicated otherwise, ooo_tc == 3 for
+        * AH 4-port and 4 otherwise.
+        */
+       if (!qm_info->ooo_tc)
+               qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC :
+                                             DCBX_TCP_OOO_TC;
+}
+
+/* initialize qm vport params */
+static void qed_init_qm_vport_params(struct qed_hwfn *p_hwfn)
+{
+       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+       u8 i;
+
+       /* all vports participate in weighted fair queueing */
+       for (i = 0; i < qed_init_qm_get_num_vports(p_hwfn); i++)
+               qm_info->qm_vport_params[i].vport_wfq = 1;
+}
 
+/* initialize qm port params */
+static void qed_init_qm_port_params(struct qed_hwfn *p_hwfn)
+{
        /* Initialize qm port parameters */
-       num_ports = p_hwfn->cdev->num_ports_in_engines;
+       u8 i, active_phys_tcs, num_ports = p_hwfn->cdev->num_ports_in_engines;
+
+       /* indicate how ooo and high pri traffic is dealt with */
+       active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?
+                         ACTIVE_TCS_BMAP_4PORT_K2 :
+                         ACTIVE_TCS_BMAP;
+
        for (i = 0; i < num_ports; i++) {
-               p_qm_port = &qm_info->qm_port_params[i];
+               struct init_qm_port_params *p_qm_port =
+                   &p_hwfn->qm_info.qm_port_params[i];
+
                p_qm_port->active = 1;
-               if (num_ports == 4)
-                       p_qm_port->active_phys_tcs = 0x7;
-               else
-                       p_qm_port->active_phys_tcs = 0x9f;
+               p_qm_port->active_phys_tcs = active_phys_tcs;
                p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
                p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
        }
+}
+
+/* Reset the params which must be reset for qm init. QM init may be called as
+ * a result of flows other than driver load (e.g. dcbx renegotiation). Other
+ * params may be affected by the init but would simply recalculate to the same
+ * values. The allocations made for QM init, ports, vports, pqs and vfqs are not
+ * affected as these amounts stay the same.
+ */
+static void qed_init_qm_reset_params(struct qed_hwfn *p_hwfn)
+{
+       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+       qm_info->num_pqs = 0;
+       qm_info->num_vports = 0;
+       qm_info->num_pf_rls = 0;
+       qm_info->num_vf_pqs = 0;
+       qm_info->first_vf_pq = 0;
+       qm_info->first_mcos_pq = 0;
+       qm_info->first_rl_pq = 0;
+}
+
+static void qed_init_qm_advance_vport(struct qed_hwfn *p_hwfn)
+{
+       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+       qm_info->num_vports++;
+
+       if (qm_info->num_vports > qed_init_qm_get_num_vports(p_hwfn))
+               DP_ERR(p_hwfn,
+                      "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n",
+                      qm_info->num_vports, qed_init_qm_get_num_vports(p_hwfn));
+}
+
+/* initialize a single pq and manage qm_info resources accounting.
+ * The pq_init_flags param determines whether the PQ is rate limited
+ * (for VF or PF) and whether a new vport is allocated to the pq or not
+ * (i.e. vport will be shared).
+ */
+
+/* flags for pq init */
+#define PQ_INIT_SHARE_VPORT     (1 << 0)
+#define PQ_INIT_PF_RL           (1 << 1)
+#define PQ_INIT_VF_RL           (1 << 2)
 
-       qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS;
+/* defines for pq init */
+#define PQ_INIT_DEFAULT_WRR_GROUP       1
+#define PQ_INIT_DEFAULT_TC              0
+#define PQ_INIT_OFLD_TC                 (p_hwfn->hw_info.offload_tc)
 
-       qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
+static void qed_init_qm_pq(struct qed_hwfn *p_hwfn,
+                          struct qed_qm_info *qm_info,
+                          u8 tc, u32 pq_init_flags)
+{
+       u16 pq_idx = qm_info->num_pqs, max_pq = qed_init_qm_get_num_pqs(p_hwfn);
 
+       if (pq_idx > max_pq)
+               DP_ERR(p_hwfn,
+                      "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq);
+
+       /* init pq params */
+       qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport +
+           qm_info->num_vports;
+       qm_info->qm_pq_params[pq_idx].tc_id = tc;
+       qm_info->qm_pq_params[pq_idx].wrr_group = PQ_INIT_DEFAULT_WRR_GROUP;
+       qm_info->qm_pq_params[pq_idx].rl_valid =
+           (pq_init_flags & PQ_INIT_PF_RL || pq_init_flags & PQ_INIT_VF_RL);
+
+       /* qm params accounting */
+       qm_info->num_pqs++;
+       if (!(pq_init_flags & PQ_INIT_SHARE_VPORT))
+               qm_info->num_vports++;
+
+       if (pq_init_flags & PQ_INIT_PF_RL)
+               qm_info->num_pf_rls++;
+
+       if (qm_info->num_vports > qed_init_qm_get_num_vports(p_hwfn))
+               DP_ERR(p_hwfn,
+                      "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n",
+                      qm_info->num_vports, qed_init_qm_get_num_vports(p_hwfn));
+
+       if (qm_info->num_pf_rls > qed_init_qm_get_num_pf_rls(p_hwfn))
+               DP_ERR(p_hwfn,
+                      "rl overflow! qm_info->num_pf_rls %d, qm_init_get_num_pf_rls() %d\n",
+                      qm_info->num_pf_rls, qed_init_qm_get_num_pf_rls(p_hwfn));
+}
+
+/* get pq index according to PQ_FLAGS */
+static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
+                                          u32 pq_flags)
+{
+       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+       /* Can't have multiple flags set here */
+       if (bitmap_weight((unsigned long *)&pq_flags, sizeof(pq_flags)) > 1)
+               goto err;
+
+       switch (pq_flags) {
+       case PQ_FLAGS_RLS:
+               return &qm_info->first_rl_pq;
+       case PQ_FLAGS_MCOS:
+               return &qm_info->first_mcos_pq;
+       case PQ_FLAGS_LB:
+               return &qm_info->pure_lb_pq;
+       case PQ_FLAGS_OOO:
+               return &qm_info->ooo_pq;
+       case PQ_FLAGS_ACK:
+               return &qm_info->pure_ack_pq;
+       case PQ_FLAGS_OFLD:
+               return &qm_info->offload_pq;
+       case PQ_FLAGS_LLT:
+               return &qm_info->low_latency_pq;
+       case PQ_FLAGS_VFS:
+               return &qm_info->first_vf_pq;
+       default:
+               goto err;
+       }
+
+err:
+       DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags);
+       return NULL;
+}
+
+/* save pq index in qm info */
+static void qed_init_qm_set_idx(struct qed_hwfn *p_hwfn,
+                               u32 pq_flags, u16 pq_val)
+{
+       u16 *base_pq_idx = qed_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
+
+       *base_pq_idx = p_hwfn->qm_info.start_pq + pq_val;
+}
+
+/* get tx pq index, with the PQ TX base already set (ready for context init) */
+u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags)
+{
+       u16 *base_pq_idx = qed_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
+
+       return *base_pq_idx + CM_TX_PQ_BASE;
+}
+
+u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc)
+{
+       u8 max_tc = qed_init_qm_get_num_tcs(p_hwfn);
+
+       if (tc > max_tc)
+               DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc);
+
+       return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc;
+}
+
+u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf)
+{
+       u16 max_vf = qed_init_qm_get_num_vfs(p_hwfn);
+
+       if (vf > max_vf)
+               DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf);
+
+       return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf;
+}
+
+u16 qed_get_cm_pq_idx_rl(struct qed_hwfn *p_hwfn, u8 rl)
+{
+       u16 max_rl = qed_init_qm_get_num_pf_rls(p_hwfn);
+
+       if (rl > max_rl)
+               DP_ERR(p_hwfn, "rl %d must be smaller than %d\n", rl, max_rl);
+
+       return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl;
+}
+
+/* Functions for creating specific types of pqs */
+static void qed_init_qm_lb_pq(struct qed_hwfn *p_hwfn)
+{
+       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+       if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_LB))
+               return;
+
+       qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_LB, qm_info->num_pqs);
+       qed_init_qm_pq(p_hwfn, qm_info, PURE_LB_TC, PQ_INIT_SHARE_VPORT);
+}
+
+static void qed_init_qm_ooo_pq(struct qed_hwfn *p_hwfn)
+{
+       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+       if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_OOO))
+               return;
+
+       qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_OOO, qm_info->num_pqs);
+       qed_init_qm_pq(p_hwfn, qm_info, qm_info->ooo_tc, PQ_INIT_SHARE_VPORT);
+}
+
+static void qed_init_qm_pure_ack_pq(struct qed_hwfn *p_hwfn)
+{
+       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+       if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_ACK))
+               return;
+
+       qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs);
+       qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+}
+
+static void qed_init_qm_offload_pq(struct qed_hwfn *p_hwfn)
+{
+       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+       if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_OFLD))
+               return;
+
+       qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs);
+       qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+}
+
+static void qed_init_qm_low_latency_pq(struct qed_hwfn *p_hwfn)
+{
+       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+       if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_LLT))
+               return;
+
+       qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_LLT, qm_info->num_pqs);
+       qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+}
+
+static void qed_init_qm_mcos_pqs(struct qed_hwfn *p_hwfn)
+{
+       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+       u8 tc_idx;
+
+       if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_MCOS))
+               return;
+
+       qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_MCOS, qm_info->num_pqs);
+       for (tc_idx = 0; tc_idx < qed_init_qm_get_num_tcs(p_hwfn); tc_idx++)
+               qed_init_qm_pq(p_hwfn, qm_info, tc_idx, PQ_INIT_SHARE_VPORT);
+}
+
+static void qed_init_qm_vf_pqs(struct qed_hwfn *p_hwfn)
+{
+       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+       u16 vf_idx, num_vfs = qed_init_qm_get_num_vfs(p_hwfn);
+
+       if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_VFS))
+               return;
+
+       qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_VFS, qm_info->num_pqs);
        qm_info->num_vf_pqs = num_vfs;
-       qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT);
+       for (vf_idx = 0; vf_idx < num_vfs; vf_idx++)
+               qed_init_qm_pq(p_hwfn,
+                              qm_info, PQ_INIT_DEFAULT_TC, PQ_INIT_VF_RL);
+}
 
-       for (i = 0; i < qm_info->num_vports; i++)
-               qm_info->qm_vport_params[i].vport_wfq = 1;
+static void qed_init_qm_rl_pqs(struct qed_hwfn *p_hwfn)
+{
+       u16 pf_rls_idx, num_pf_rls = qed_init_qm_get_num_pf_rls(p_hwfn);
+       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
 
-       qm_info->vport_rl_en = 1;
-       qm_info->vport_wfq_en = 1;
-       qm_info->pf_rl = pf_rl;
-       qm_info->pf_wfq = pf_wfq;
+       if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_RLS))
+               return;
+
+       qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs);
+       for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++)
+               qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_PF_RL);
+}
+
+static void qed_init_qm_pq_params(struct qed_hwfn *p_hwfn)
+{
+       /* rate limited pqs, must come first (FW assumption) */
+       qed_init_qm_rl_pqs(p_hwfn);
+
+       /* pqs for multi cos */
+       qed_init_qm_mcos_pqs(p_hwfn);
+
+       /* pure loopback pq */
+       qed_init_qm_lb_pq(p_hwfn);
+
+       /* out of order pq */
+       qed_init_qm_ooo_pq(p_hwfn);
+
+       /* pure ack pq */
+       qed_init_qm_pure_ack_pq(p_hwfn);
+
+       /* pq for offloaded protocol */
+       qed_init_qm_offload_pq(p_hwfn);
+
+       /* low latency pq */
+       qed_init_qm_low_latency_pq(p_hwfn);
+
+       /* done sharing vports */
+       qed_init_qm_advance_vport(p_hwfn);
+
+       /* pqs for vfs */
+       qed_init_qm_vf_pqs(p_hwfn);
+}
+
+/* compare values of getters against resources amounts */
+static int qed_init_qm_sanity(struct qed_hwfn *p_hwfn)
+{
+       if (qed_init_qm_get_num_vports(p_hwfn) > RESC_NUM(p_hwfn, QED_VPORT)) {
+               DP_ERR(p_hwfn, "requested amount of vports exceeds resource\n");
+               return -EINVAL;
+       }
+
+       if (qed_init_qm_get_num_pqs(p_hwfn) > RESC_NUM(p_hwfn, QED_PQ)) {
+               DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n");
+               return -EINVAL;
+       }
 
        return 0;
+}
 
-alloc_err:
-       DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
-       qed_qm_info_free(p_hwfn);
-       return -ENOMEM;
+static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn)
+{
+       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+       struct init_qm_vport_params *vport;
+       struct init_qm_port_params *port;
+       struct init_qm_pq_params *pq;
+       int i, tc;
+
+       /* top level params */
+       DP_VERBOSE(p_hwfn,
+                  NETIF_MSG_HW,
+                  "qm init top level params: start_pq %d, start_vport %d, pure_lb_pq %d, offload_pq %d, pure_ack_pq %d\n",
+                  qm_info->start_pq,
+                  qm_info->start_vport,
+                  qm_info->pure_lb_pq,
+                  qm_info->offload_pq, qm_info->pure_ack_pq);
+       DP_VERBOSE(p_hwfn,
+                  NETIF_MSG_HW,
+                  "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d, num_vports %d, max_phys_tcs_per_port %d\n",
+                  qm_info->ooo_pq,
+                  qm_info->first_vf_pq,
+                  qm_info->num_pqs,
+                  qm_info->num_vf_pqs,
+                  qm_info->num_vports, qm_info->max_phys_tcs_per_port);
+       DP_VERBOSE(p_hwfn,
+                  NETIF_MSG_HW,
+                  "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d, pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n",
+                  qm_info->pf_rl_en,
+                  qm_info->pf_wfq_en,
+                  qm_info->vport_rl_en,
+                  qm_info->vport_wfq_en,
+                  qm_info->pf_wfq,
+                  qm_info->pf_rl,
+                  qm_info->num_pf_rls, qed_get_pq_flags(p_hwfn));
+
+       /* port table */
+       for (i = 0; i < p_hwfn->cdev->num_ports_in_engines; i++) {
+               port = &(qm_info->qm_port_params[i]);
+               DP_VERBOSE(p_hwfn,
+                          NETIF_MSG_HW,
+                          "port idx %d, active %d, active_phys_tcs %d, num_pbf_cmd_lines %d, num_btb_blocks %d, reserved %d\n",
+                          i,
+                          port->active,
+                          port->active_phys_tcs,
+                          port->num_pbf_cmd_lines,
+                          port->num_btb_blocks, port->reserved);
+       }
+
+       /* vport table */
+       for (i = 0; i < qm_info->num_vports; i++) {
+               vport = &(qm_info->qm_vport_params[i]);
+               DP_VERBOSE(p_hwfn,
+                          NETIF_MSG_HW,
+                          "vport idx %d, vport_rl %d, wfq %d, first_tx_pq_id [ ",
+                          qm_info->start_vport + i,
+                          vport->vport_rl, vport->vport_wfq);
+               for (tc = 0; tc < NUM_OF_TCS; tc++)
+                       DP_VERBOSE(p_hwfn,
+                                  NETIF_MSG_HW,
+                                  "%d ", vport->first_tx_pq_id[tc]);
+               DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "]\n");
+       }
+
+       /* pq table */
+       for (i = 0; i < qm_info->num_pqs; i++) {
+               pq = &(qm_info->qm_pq_params[i]);
+               DP_VERBOSE(p_hwfn,
+                          NETIF_MSG_HW,
+                          "pq idx %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n",
+                          qm_info->start_pq + i,
+                          pq->vport_id,
+                          pq->tc_id, pq->wrr_group, pq->rl_valid);
+       }
+}
+
+static void qed_init_qm_info(struct qed_hwfn *p_hwfn)
+{
+       /* reset params required for init run */
+       qed_init_qm_reset_params(p_hwfn);
+
+       /* init QM top level params */
+       qed_init_qm_params(p_hwfn);
+
+       /* init QM port params */
+       qed_init_qm_port_params(p_hwfn);
+
+       /* init QM vport params */
+       qed_init_qm_vport_params(p_hwfn);
+
+       /* init QM physical queue params */
+       qed_init_qm_pq_params(p_hwfn);
+
+       /* display all that init */
+       qed_dp_init_qm_params(p_hwfn);
 }
 
 /* This function reconfigures the QM pf on the fly.
@@ -362,17 +766,8 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
        bool b_rc;
        int rc;
 
-       /* qm_info is allocated in qed_init_qm_info() which is already called
-        * from qed_resc_alloc() or previous call of qed_qm_reconf().
-        * The allocated size may change each init, so we free it before next
-        * allocation.
-        */
-       qed_qm_info_free(p_hwfn);
-
        /* initialize qed's qm data structure */
-       rc = qed_init_qm_info(p_hwfn, false);
-       if (rc)
-               return rc;
+       qed_init_qm_info(p_hwfn);
 
        /* stop PF's qm queues */
        spin_lock_bh(&qm_lock);
@@ -386,7 +781,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
        qed_init_clear_rt_data(p_hwfn);
 
        /* prepare QM portion of runtime array */
-       qed_qm_init_pf(p_hwfn);
+       qed_qm_init_pf(p_hwfn, p_ptt);
 
        /* activate init tool on runtime array */
        rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
@@ -405,10 +800,59 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
        return 0;
 }
 
+static int qed_alloc_qm_data(struct qed_hwfn *p_hwfn)
+{
+       struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+       int rc;
+
+       rc = qed_init_qm_sanity(p_hwfn);
+       if (rc)
+               goto alloc_err;
+
+       qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
+                                       qed_init_qm_get_num_pqs(p_hwfn),
+                                       GFP_KERNEL);
+       if (!qm_info->qm_pq_params)
+               goto alloc_err;
+
+       qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
+                                          qed_init_qm_get_num_vports(p_hwfn),
+                                          GFP_KERNEL);
+       if (!qm_info->qm_vport_params)
+               goto alloc_err;
+
+       qm_info->qm_port_params = kzalloc(sizeof(qm_info->qm_port_params) *
+                                         p_hwfn->cdev->num_ports_in_engines,
+                                         GFP_KERNEL);
+       if (!qm_info->qm_port_params)
+               goto alloc_err;
+
+       qm_info->wfq_data = kzalloc(sizeof(*qm_info->wfq_data) *
+                                   qed_init_qm_get_num_vports(p_hwfn),
+                                   GFP_KERNEL);
+       if (!qm_info->wfq_data)
+               goto alloc_err;
+
+       return 0;
+
+alloc_err:
+       DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
+       qed_qm_info_free(p_hwfn);
+       return -ENOMEM;
+}
+
 int qed_resc_alloc(struct qed_dev *cdev)
 {
+       struct qed_iscsi_info *p_iscsi_info;
+       struct qed_fcoe_info *p_fcoe_info;
+       struct qed_ooo_info *p_ooo_info;
+#ifdef CONFIG_QED_LL2
+       struct qed_ll2_info *p_ll2_info;
+#endif
+       u32 rdma_tasks, excess_tasks;
        struct qed_consq *p_consq;
        struct qed_eq *p_eq;
+       u32 line_count;
        int i, rc = 0;
 
        if (IS_VF(cdev))
@@ -418,29 +862,6 @@ int qed_resc_alloc(struct qed_dev *cdev)
        if (!cdev->fw_data)
                return -ENOMEM;
 
-       /* Allocate Memory for the Queue->CID mapping */
-       for_each_hwfn(cdev, i) {
-               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
-               int tx_size = sizeof(struct qed_hw_cid_data) *
-                                    RESC_NUM(p_hwfn, QED_L2_QUEUE);
-               int rx_size = sizeof(struct qed_hw_cid_data) *
-                                    RESC_NUM(p_hwfn, QED_L2_QUEUE);
-
-               p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL);
-               if (!p_hwfn->p_tx_cids) {
-                       DP_NOTICE(p_hwfn,
-                                 "Failed to allocate memory for Tx Cids\n");
-                       goto alloc_no_mem;
-               }
-
-               p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL);
-               if (!p_hwfn->p_rx_cids) {
-                       DP_NOTICE(p_hwfn,
-                                 "Failed to allocate memory for Rx Cids\n");
-                       goto alloc_no_mem;
-               }
-       }
-
        for_each_hwfn(cdev, i) {
                struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
                u32 n_eqes, num_cons;
@@ -453,19 +874,44 @@ int qed_resc_alloc(struct qed_dev *cdev)
                /* Set the HW cid/tid numbers (in the contest manager)
                 * Must be done prior to any further computations.
                 */
-               rc = qed_cxt_set_pf_params(p_hwfn);
+               rc = qed_cxt_set_pf_params(p_hwfn, RDMA_MAX_TIDS);
                if (rc)
                        goto alloc_err;
 
-               /* Prepare and process QM requirements */
-               rc = qed_init_qm_info(p_hwfn, true);
+               rc = qed_alloc_qm_data(p_hwfn);
                if (rc)
                        goto alloc_err;
 
+               /* init qm info */
+               qed_init_qm_info(p_hwfn);
+
                /* Compute the ILT client partition */
-               rc = qed_cxt_cfg_ilt_compute(p_hwfn);
-               if (rc)
-                       goto alloc_err;
+               rc = qed_cxt_cfg_ilt_compute(p_hwfn, &line_count);
+               if (rc) {
+                       DP_NOTICE(p_hwfn,
+                                 "too many ILT lines; re-computing with less lines\n");
+                       /* In case there are not enough ILT lines we reduce the
+                        * number of RDMA tasks and re-compute.
+                        */
+                       excess_tasks =
+                           qed_cxt_cfg_ilt_compute_excess(p_hwfn, line_count);
+                       if (!excess_tasks)
+                               goto alloc_err;
+
+                       rdma_tasks = RDMA_MAX_TIDS - excess_tasks;
+                       rc = qed_cxt_set_pf_params(p_hwfn, rdma_tasks);
+                       if (rc)
+                               goto alloc_err;
+
+                       rc = qed_cxt_cfg_ilt_compute(p_hwfn, &line_count);
+                       if (rc) {
+                               DP_ERR(p_hwfn,
+                                      "failed ILT compute. Requested too many lines: %u\n",
+                                      line_count);
+
+                               goto alloc_err;
+                       }
+               }
 
                /* CID map / ILT shadow table / T2
                 * The talbes sizes are determined by the computations above
@@ -496,12 +942,13 @@ int qed_resc_alloc(struct qed_dev *cdev)
                if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
                        num_cons = qed_cxt_get_proto_cid_count(p_hwfn,
                                                               PROTOCOLID_ROCE,
-                                                              0) * 2;
+                                                              NULL) * 2;
                        n_eqes += num_cons + 2 * MAX_NUM_VFS_BB;
                } else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
                        num_cons =
                            qed_cxt_get_proto_cid_count(p_hwfn,
-                                                       PROTOCOLID_ISCSI, 0);
+                                                       PROTOCOLID_ISCSI,
+                                                       NULL);
                        n_eqes += 2 * num_cons;
                }
 
@@ -523,29 +970,47 @@ int qed_resc_alloc(struct qed_dev *cdev)
                        goto alloc_no_mem;
                p_hwfn->p_consq = p_consq;
 
+#ifdef CONFIG_QED_LL2
+               if (p_hwfn->using_ll2) {
+                       p_ll2_info = qed_ll2_alloc(p_hwfn);
+                       if (!p_ll2_info)
+                               goto alloc_no_mem;
+                       p_hwfn->p_ll2_info = p_ll2_info;
+               }
+#endif
+
+               if (p_hwfn->hw_info.personality == QED_PCI_FCOE) {
+                       p_fcoe_info = qed_fcoe_alloc(p_hwfn);
+                       if (!p_fcoe_info)
+                               goto alloc_no_mem;
+                       p_hwfn->p_fcoe_info = p_fcoe_info;
+               }
+
+               if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
+                       p_iscsi_info = qed_iscsi_alloc(p_hwfn);
+                       if (!p_iscsi_info)
+                               goto alloc_no_mem;
+                       p_hwfn->p_iscsi_info = p_iscsi_info;
+                       p_ooo_info = qed_ooo_alloc(p_hwfn);
+                       if (!p_ooo_info)
+                               goto alloc_no_mem;
+                       p_hwfn->p_ooo_info = p_ooo_info;
+               }
+
                /* DMA info initialization */
                rc = qed_dmae_info_alloc(p_hwfn);
-               if (rc) {
-                       DP_NOTICE(p_hwfn,
-                                 "Failed to allocate memory for dmae_info structure\n");
+               if (rc)
                        goto alloc_err;
-               }
 
                /* DCBX initialization */
                rc = qed_dcbx_info_alloc(p_hwfn);
-               if (rc) {
-                       DP_NOTICE(p_hwfn,
-                                 "Failed to allocate memory for dcbx structure\n");
+               if (rc)
                        goto alloc_err;
-               }
        }
 
        cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
-       if (!cdev->reset_stats) {
-               DP_NOTICE(cdev, "Failed to allocate reset statistics\n");
-               rc = -ENOMEM;
-               goto alloc_err;
-       }
+       if (!cdev->reset_stats)
+               goto alloc_no_mem;
 
        return 0;
 
@@ -580,6 +1045,17 @@ void qed_resc_setup(struct qed_dev *cdev)
                qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
 
                qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
+#ifdef CONFIG_QED_LL2
+               if (p_hwfn->using_ll2)
+                       qed_ll2_setup(p_hwfn, p_hwfn->p_ll2_info);
+#endif
+               if (p_hwfn->hw_info.personality == QED_PCI_FCOE)
+                       qed_fcoe_setup(p_hwfn, p_hwfn->p_fcoe_info);
+
+               if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
+                       qed_iscsi_setup(p_hwfn, p_hwfn->p_iscsi_info);
+                       qed_ooo_setup(p_hwfn, p_hwfn->p_ooo_info);
+               }
        }
 }
 
@@ -605,9 +1081,8 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn,
 
        /* Make sure notification is not set before initiating final cleanup */
        if (REG_RD(p_hwfn, addr)) {
-               DP_NOTICE(
-                       p_hwfn,
-                       "Unexpected; Found final cleanup notification before initiating final cleanup\n");
+               DP_NOTICE(p_hwfn,
+                         "Unexpected; Found final cleanup notification before initiating final cleanup\n");
                REG_WR(p_hwfn, addr, 0);
        }
 
@@ -633,11 +1108,19 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn,
        return rc;
 }
 
-static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
+static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
 {
        int hw_mode = 0;
 
-       hw_mode = (1 << MODE_BB_B0);
+       if (QED_IS_BB_B0(p_hwfn->cdev)) {
+               hw_mode |= 1 << MODE_BB;
+       } else if (QED_IS_AH(p_hwfn->cdev)) {
+               hw_mode |= 1 << MODE_K2;
+       } else {
+               DP_NOTICE(p_hwfn, "Unknown chip type %#x\n",
+                         p_hwfn->cdev->type);
+               return -EINVAL;
+       }
 
        switch (p_hwfn->cdev->num_ports_in_engines) {
        case 1:
@@ -652,7 +1135,7 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
        default:
                DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
                          p_hwfn->cdev->num_ports_in_engines);
-               return;
+               return -EINVAL;
        }
 
        switch (p_hwfn->cdev->mf_mode) {
@@ -678,6 +1161,8 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
        DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP),
                   "Configuring function for hw_mode: 0x%08x\n",
                   p_hwfn->hw_info.hw_mode);
+
+       return 0;
 }
 
 /* Init run time data for all PFs on an engine. */
@@ -701,25 +1186,73 @@ static void qed_init_cau_rt_data(struct qed_dev *cdev)
                                continue;
 
                        qed_init_cau_sb_entry(p_hwfn, &sb_entry,
-                                             p_block->function_id,
-                                             0, 0);
-                       STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2,
-                                        sb_entry);
+                                             p_block->function_id, 0, 0);
+                       STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2, sb_entry);
                }
        }
 }
 
+static void qed_init_cache_line_size(struct qed_hwfn *p_hwfn,
+                                    struct qed_ptt *p_ptt)
+{
+       u32 val, wr_mbs, cache_line_size;
+
+       val = qed_rd(p_hwfn, p_ptt, PSWRQ2_REG_WR_MBS0);
+       switch (val) {
+       case 0:
+               wr_mbs = 128;
+               break;
+       case 1:
+               wr_mbs = 256;
+               break;
+       case 2:
+               wr_mbs = 512;
+               break;
+       default:
+               DP_INFO(p_hwfn,
+                       "Unexpected value of PSWRQ2_REG_WR_MBS0 [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n",
+                       val);
+               return;
+       }
+
+       cache_line_size = min_t(u32, L1_CACHE_BYTES, wr_mbs);
+       switch (cache_line_size) {
+       case 32:
+               val = 0;
+               break;
+       case 64:
+               val = 1;
+               break;
+       case 128:
+               val = 2;
+               break;
+       case 256:
+               val = 3;
+               break;
+       default:
+               DP_INFO(p_hwfn,
+                       "Unexpected value of cache line size [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n",
+                       cache_line_size);
+       }
+
+       if (L1_CACHE_BYTES > wr_mbs)
+               DP_INFO(p_hwfn,
+                       "The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n",
+                       L1_CACHE_BYTES, wr_mbs);
+
+       STORE_RT_REG(p_hwfn, PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET, val);
+}
+
 static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
-                             struct qed_ptt *p_ptt,
-                             int hw_mode)
+                             struct qed_ptt *p_ptt, int hw_mode)
 {
        struct qed_qm_info *qm_info = &p_hwfn->qm_info;
        struct qed_qm_common_rt_init_params params;
        struct qed_dev *cdev = p_hwfn->cdev;
+       u8 vf_id, max_num_vfs;
        u16 num_pfs, pf_id;
        u32 concrete_fid;
        int rc = 0;
-       u8 vf_id;
 
        qed_init_cau_rt_data(cdev);
 
@@ -746,20 +1279,10 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
 
        qed_cxt_hw_init_common(p_hwfn);
 
-       /* Close gate from NIG to BRB/Storm; By default they are open, but
-        * we close them to prevent NIG from passing data to reset blocks.
-        * Should have been done in the ENGINE phase, but init-tool lacks
-        * proper port-pretend capabilities.
-        */
-       qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
-       qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
-       qed_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1);
-       qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
-       qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
-       qed_port_unpretend(p_hwfn, p_ptt);
+       qed_init_cache_line_size(p_hwfn, p_ptt);
 
        rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
-       if (rc != 0)
+       if (rc)
                return rc;
 
        qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
@@ -776,49 +1299,156 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
                qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
        }
 
-       for (vf_id = 0; vf_id < MAX_NUM_VFS_BB; vf_id++) {
-               concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
-               qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid);
-               qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
+       max_num_vfs = QED_IS_AH(cdev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB;
+       for (vf_id = 0; vf_id < max_num_vfs; vf_id++) {
+               concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
+               qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid);
+               qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
+               qed_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0);
+               qed_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1);
+               qed_wr(p_hwfn, p_ptt, TCFC_REG_WEAK_ENABLE_VF, 0x0);
+       }
+       /* pretend to original PF */
+       qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+
+       return rc;
+}
+
+static int
+qed_hw_init_dpi_size(struct qed_hwfn *p_hwfn,
+                    struct qed_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus)
+{
+       u32 dpi_page_size_1, dpi_page_size_2, dpi_page_size;
+       u32 dpi_bit_shift, dpi_count;
+       u32 min_dpis;
+
+       /* Calculate DPI size */
+       dpi_page_size_1 = QED_WID_SIZE * n_cpus;
+       dpi_page_size_2 = max_t(u32, QED_WID_SIZE, PAGE_SIZE);
+       dpi_page_size = max_t(u32, dpi_page_size_1, dpi_page_size_2);
+       dpi_page_size = roundup_pow_of_two(dpi_page_size);
+       dpi_bit_shift = ilog2(dpi_page_size / 4096);
+
+       dpi_count = pwm_region_size / dpi_page_size;
+
+       min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis;
+       min_dpis = max_t(u32, QED_MIN_DPIS, min_dpis);
+
+       p_hwfn->dpi_size = dpi_page_size;
+       p_hwfn->dpi_count = dpi_count;
+
+       qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPI_BIT_SHIFT, dpi_bit_shift);
+
+       if (dpi_count < min_dpis)
+               return -EINVAL;
+
+       return 0;
+}
+
+enum QED_ROCE_EDPM_MODE {
+       QED_ROCE_EDPM_MODE_ENABLE = 0,
+       QED_ROCE_EDPM_MODE_FORCE_ON = 1,
+       QED_ROCE_EDPM_MODE_DISABLE = 2,
+};
+
+static int
+qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+       u32 pwm_regsize, norm_regsize;
+       u32 non_pwm_conn, min_addr_reg1;
+       u32 db_bar_size, n_cpus;
+       u32 roce_edpm_mode;
+       u32 pf_dems_shift;
+       int rc = 0;
+       u8 cond;
+
+       db_bar_size = qed_hw_bar_size(p_hwfn, p_ptt, BAR_ID_1);
+       if (p_hwfn->cdev->num_hwfns > 1)
+               db_bar_size /= 2;
+
+       /* Calculate doorbell regions */
+       non_pwm_conn = qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) +
+                      qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE,
+                                                  NULL) +
+                      qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
+                                                  NULL);
+       norm_regsize = roundup(QED_PF_DEMS_SIZE * non_pwm_conn, 4096);
+       min_addr_reg1 = norm_regsize / 4096;
+       pwm_regsize = db_bar_size - norm_regsize;
+
+       /* Check that the normal and PWM sizes are valid */
+       if (db_bar_size < norm_regsize) {
+               DP_ERR(p_hwfn->cdev,
+                      "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n",
+                      db_bar_size, norm_regsize);
+               return -EINVAL;
+       }
+
+       if (pwm_regsize < QED_MIN_PWM_REGION) {
+               DP_ERR(p_hwfn->cdev,
+                      "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n",
+                      pwm_regsize,
+                      QED_MIN_PWM_REGION, db_bar_size, norm_regsize);
+               return -EINVAL;
+       }
+
+       /* Calculate number of DPIs */
+       roce_edpm_mode = p_hwfn->pf_params.rdma_pf_params.roce_edpm_mode;
+       if ((roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE) ||
+           ((roce_edpm_mode == QED_ROCE_EDPM_MODE_FORCE_ON))) {
+               /* Either EDPM is mandatory, or we are attempting to allocate a
+                * WID per CPU.
+                */
+               n_cpus = num_present_cpus();
+               rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
        }
-       /* pretend to original PF */
-       qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
 
-       return rc;
-}
+       cond = (rc && (roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE)) ||
+              (roce_edpm_mode == QED_ROCE_EDPM_MODE_DISABLE);
+       if (cond || p_hwfn->dcbx_no_edpm) {
+               /* Either EDPM is disabled from user configuration, or it is
+                * disabled via DCBx, or it is not mandatory and we failed to
+                * allocated a WID per CPU.
+                */
+               n_cpus = 1;
+               rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
 
-static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
-                           struct qed_ptt *p_ptt,
-                           int hw_mode)
-{
-       int rc = 0;
+               if (cond)
+                       qed_rdma_dpm_bar(p_hwfn, p_ptt);
+       }
 
-       rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, hw_mode);
-       if (rc != 0)
-               return rc;
+       DP_INFO(p_hwfn,
+               "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s\n",
+               norm_regsize,
+               pwm_regsize,
+               p_hwfn->dpi_size,
+               p_hwfn->dpi_count,
+               ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ?
+               "disabled" : "enabled");
 
-       if (hw_mode & (1 << MODE_MF_SI)) {
-               u8 pf_id = 0;
+       if (rc) {
+               DP_ERR(p_hwfn,
+                      "Failed to allocate enough DPIs. Allocated %d but the current minimum is %d.\n",
+                      p_hwfn->dpi_count,
+                      p_hwfn->pf_params.rdma_pf_params.min_dpis);
+               return -EINVAL;
+       }
 
-               if (!qed_hw_init_first_eth(p_hwfn, p_ptt, &pf_id)) {
-                       DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
-                                  "PF[%08x] is first eth on engine\n", pf_id);
+       p_hwfn->dpi_start_offset = norm_regsize;
 
-                       /* We should have configured BIT for ppfid, i.e., the
-                        * relative function number in the port. But there's a
-                        * bug in LLH in BB where the ppfid is actually engine
-                        * based, so we need to take this into account.
-                        */
-                       qed_wr(p_hwfn, p_ptt,
-                              NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR, 1 << pf_id);
-               }
+       /* DEMS size is configured log2 of DWORDs, hence the division by 4 */
+       pf_dems_shift = ilog2(QED_PF_DEMS_SIZE / 4);
+       qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift);
+       qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1);
 
-               /* Take the protocol-based hit vector if there is a hit,
-                * otherwise take the other vector.
-                */
-               qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_CLS_TYPE_DUALMODE, 0x2);
-       }
-       return rc;
+       return 0;
+}
+
+static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
+                           struct qed_ptt *p_ptt, int hw_mode)
+{
+       return qed_init_run(p_hwfn, p_ptt, PHASE_PORT,
+                           p_hwfn->port_id, hw_mode);
 }
 
 static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
@@ -843,12 +1473,12 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
                p_hwfn->qm_info.pf_rl = 100000;
        }
 
-       qed_cxt_hw_init_pf(p_hwfn);
+       qed_cxt_hw_init_pf(p_hwfn, p_ptt);
 
        qed_int_igu_init_rt(p_hwfn);
 
        /* Set VLAN in NIG if needed */
-       if (hw_mode & (1 << MODE_MF_SD)) {
+       if (hw_mode & BIT(MODE_MF_SD)) {
                DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n");
                STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
                STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
@@ -856,7 +1486,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
        }
 
        /* Enable classification by MAC if needed */
-       if (hw_mode & (1 << MODE_MF_SI)) {
+       if (hw_mode & BIT(MODE_MF_SI)) {
                DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
                           "Configuring TAGMAC_CLS_TYPE\n");
                STORE_RT_REG(p_hwfn,
@@ -866,12 +1496,13 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
        /* Protocl Configuration  */
        STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET,
                     (p_hwfn->hw_info.personality == QED_PCI_ISCSI) ? 1 : 0);
-       STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 0);
+       STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET,
+                    (p_hwfn->hw_info.personality == QED_PCI_FCOE) ? 1 : 0);
        STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
 
        /* Cleanup chip from previous driver if such remains exist */
        rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false);
-       if (rc != 0)
+       if (rc)
                return rc;
 
        /* PF Init sequence */
@@ -887,20 +1518,9 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
        /* Pure runtime initializations - directly to the HW  */
        qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
 
-       if (hw_mode & (1 << MODE_MF_SI)) {
-               u8 pf_id = 0;
-               u32 val = 0;
-
-               if (!qed_hw_init_first_eth(p_hwfn, p_ptt, &pf_id)) {
-                       if (p_hwfn->rel_pf_id == pf_id) {
-                               DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
-                                          "PF[%d] is first ETH on engine\n",
-                                          pf_id);
-                               val = 1;
-                       }
-                       qed_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, val);
-               }
-       }
+       rc = qed_hw_init_pf_doorbell_bar(p_hwfn, p_ptt);
+       if (rc)
+               return rc;
 
        if (b_hw_start) {
                /* enable interrupts */
@@ -909,8 +1529,16 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
                /* send function start command */
                rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode,
                                     allow_npar_tx_switch);
-               if (rc)
+               if (rc) {
                        DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
+                       return rc;
+               }
+               if (p_hwfn->hw_info.personality == QED_PCI_FCOE) {
+                       qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1, BIT(2));
+                       qed_wr(p_hwfn, p_ptt,
+                              PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST,
+                              0x100);
+               }
        }
        return rc;
 }
@@ -950,34 +1578,50 @@ static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
        /* Read shadow of current MFW mailbox */
        qed_mcp_read_mb(p_hwfn, p_main_ptt);
        memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
-              p_hwfn->mcp_info->mfw_mb_cur,
-              p_hwfn->mcp_info->mfw_mb_length);
+              p_hwfn->mcp_info->mfw_mb_cur, p_hwfn->mcp_info->mfw_mb_length);
+}
+
+static void
+qed_fill_load_req_params(struct qed_load_req_params *p_load_req,
+                        struct qed_drv_load_params *p_drv_load)
+{
+       memset(p_load_req, 0, sizeof(*p_load_req));
+
+       p_load_req->drv_role = p_drv_load->is_crash_kernel ?
+                              QED_DRV_ROLE_KDUMP : QED_DRV_ROLE_OS;
+       p_load_req->timeout_val = p_drv_load->mfw_timeout_val;
+       p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset;
+       p_load_req->override_force_load = p_drv_load->override_force_load;
 }
 
-int qed_hw_init(struct qed_dev *cdev,
-               struct qed_tunn_start_params *p_tunn,
-               bool b_hw_start,
-               enum qed_int_mode int_mode,
-               bool allow_npar_tx_switch,
-               const u8 *bin_fw_data)
+int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
 {
-       u32 load_code, param;
-       int rc, mfw_rc, i;
+       struct qed_load_req_params load_req_params;
+       u32 load_code, param, drv_mb_param;
+       bool b_default_mtu = true;
+       struct qed_hwfn *p_hwfn;
+       int rc = 0, mfw_rc, i;
 
-       if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
+       if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
                DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
                return -EINVAL;
        }
 
        if (IS_PF(cdev)) {
-               rc = qed_init_fw_data(cdev, bin_fw_data);
-               if (rc != 0)
+               rc = qed_init_fw_data(cdev, p_params->bin_fw_data);
+               if (rc)
                        return rc;
        }
 
        for_each_hwfn(cdev, i) {
                struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 
+               /* If management didn't provide a default, set one of our own */
+               if (!p_hwfn->hw_info.mtu) {
+                       p_hwfn->hw_info.mtu = 1500;
+                       b_default_mtu = false;
+               }
+
                if (IS_VF(cdev)) {
                        p_hwfn->b_int_enabled = 1;
                        continue;
@@ -986,29 +1630,29 @@ int qed_hw_init(struct qed_dev *cdev,
                /* Enable DMAE in PXP */
                rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
 
-               qed_calc_hw_mode(p_hwfn);
+               rc = qed_calc_hw_mode(p_hwfn);
+               if (rc)
+                       return rc;
 
+               qed_fill_load_req_params(&load_req_params,
+                                        p_params->p_drv_load_params);
                rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
-                                     &load_code);
+                                     &load_req_params);
                if (rc) {
-                       DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
+                       DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
                        return rc;
                }
 
-               qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
-
+               load_code = load_req_params.load_code;
                DP_VERBOSE(p_hwfn, QED_MSG_SP,
-                          "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
-                          rc, load_code);
+                          "Load request was sent. Load code: 0x%x\n",
+                          load_code);
+
+               qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
 
                p_hwfn->first_on_engine = (load_code ==
                                           FW_MSG_CODE_DRV_LOAD_ENGINE);
 
-               if (!qm_lock_init) {
-                       spin_lock_init(&qm_lock);
-                       qm_lock_init = true;
-               }
-
                switch (load_code) {
                case FW_MSG_CODE_DRV_LOAD_ENGINE:
                        rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
@@ -1025,11 +1669,15 @@ int qed_hw_init(struct qed_dev *cdev,
                /* Fall into */
                case FW_MSG_CODE_DRV_LOAD_FUNCTION:
                        rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
-                                           p_tunn, p_hwfn->hw_info.hw_mode,
-                                           b_hw_start, int_mode,
-                                           allow_npar_tx_switch);
+                                           p_params->p_tunn,
+                                           p_hwfn->hw_info.hw_mode,
+                                           p_params->b_hw_start,
+                                           p_params->int_mode,
+                                           p_params->allow_npar_tx_switch);
                        break;
                default:
+                       DP_NOTICE(p_hwfn,
+                                 "Unexpected load code [0x%08x]", load_code);
                        rc = -EINVAL;
                        break;
                }
@@ -1067,13 +1715,41 @@ int qed_hw_init(struct qed_dev *cdev,
                p_hwfn->hw_init_done = true;
        }
 
+       if (IS_PF(cdev)) {
+               p_hwfn = QED_LEADING_HWFN(cdev);
+               drv_mb_param = STORM_FW_VERSION;
+               rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+                                DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER,
+                                drv_mb_param, &load_code, &param);
+               if (rc)
+                       DP_INFO(p_hwfn, "Failed to update firmware version\n");
+
+               if (!b_default_mtu) {
+                       rc = qed_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt,
+                                                  p_hwfn->hw_info.mtu);
+                       if (rc)
+                               DP_INFO(p_hwfn,
+                                       "Failed to update default mtu\n");
+               }
+
+               rc = qed_mcp_ov_update_driver_state(p_hwfn,
+                                                   p_hwfn->p_main_ptt,
+                                                 QED_OV_DRIVER_STATE_DISABLED);
+               if (rc)
+                       DP_INFO(p_hwfn, "Failed to update driver state\n");
+
+               rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt,
+                                              QED_OV_ESWITCH_VEB);
+               if (rc)
+                       DP_INFO(p_hwfn, "Failed to update eswitch mode\n");
+       }
+
        return 0;
 }
 
 #define QED_HW_STOP_RETRY_LIMIT (10)
-static inline void qed_hw_timers_stop(struct qed_dev *cdev,
-                                     struct qed_hwfn *p_hwfn,
-                                     struct qed_ptt *p_ptt)
+static void qed_hw_timers_stop(struct qed_dev *cdev,
+                              struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
        int i;
 
@@ -1084,8 +1760,7 @@ static inline void qed_hw_timers_stop(struct qed_dev *cdev,
        for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
                if ((!qed_rd(p_hwfn, p_ptt,
                             TM_REG_PF_SCAN_ACTIVE_CONN)) &&
-                   (!qed_rd(p_hwfn, p_ptt,
-                            TM_REG_PF_SCAN_ACTIVE_TASK)))
+                   (!qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)))
                        break;
 
                /* Dependent on number of connection/tasks, possibly
@@ -1117,27 +1792,53 @@ void qed_hw_timers_stop_all(struct qed_dev *cdev)
 
 int qed_hw_stop(struct qed_dev *cdev)
 {
-       int rc = 0, t_rc;
+       struct qed_hwfn *p_hwfn;
+       struct qed_ptt *p_ptt;
+       int rc, rc2 = 0;
        int j;
 
        for_each_hwfn(cdev, j) {
-               struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
-               struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+               p_hwfn = &cdev->hwfns[j];
+               p_ptt = p_hwfn->p_main_ptt;
 
                DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
 
                if (IS_VF(cdev)) {
                        qed_vf_pf_int_cleanup(p_hwfn);
+                       rc = qed_vf_pf_reset(p_hwfn);
+                       if (rc) {
+                               DP_NOTICE(p_hwfn,
+                                         "qed_vf_pf_reset failed. rc = %d.\n",
+                                         rc);
+                               rc2 = -EINVAL;
+                       }
                        continue;
                }
 
                /* mark the hw as uninitialized... */
                p_hwfn->hw_init_done = false;
 
+               /* Send unload command to MCP */
+               rc = qed_mcp_unload_req(p_hwfn, p_ptt);
+               if (rc) {
+                       DP_NOTICE(p_hwfn,
+                                 "Failed sending a UNLOAD_REQ command. rc = %d.\n",
+                                 rc);
+                       rc2 = -EINVAL;
+               }
+
+               qed_slowpath_irq_sync(p_hwfn);
+
+               /* After this point no MFW attentions are expected, e.g. prevent
+                * race between pf stop and dcbx pf update.
+                */
                rc = qed_sp_pf_stop(p_hwfn);
-               if (rc)
+               if (rc) {
                        DP_NOTICE(p_hwfn,
-                                 "Failed to close PF against FW. Continue to stop HW to prevent illegal host access by the device\n");
+                                 "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n",
+                                 rc);
+                       rc2 = -EINVAL;
+               }
 
                qed_wr(p_hwfn, p_ptt,
                       NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
@@ -1160,38 +1861,57 @@ int qed_hw_stop(struct qed_dev *cdev)
 
                /* Need to wait 1ms to guarantee SBs are cleared */
                usleep_range(1000, 2000);
+
+               /* Disable PF in HW blocks */
+               qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0);
+               qed_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0);
+
+               qed_mcp_unload_done(p_hwfn, p_ptt);
+               if (rc) {
+                       DP_NOTICE(p_hwfn,
+                                 "Failed sending a UNLOAD_DONE command. rc = %d.\n",
+                                 rc);
+                       rc2 = -EINVAL;
+               }
        }
 
        if (IS_PF(cdev)) {
+               p_hwfn = QED_LEADING_HWFN(cdev);
+               p_ptt = QED_LEADING_HWFN(cdev)->p_main_ptt;
+
                /* Disable DMAE in PXP - in CMT, this should only be done for
                 * first hw-function, and only after all transactions have
                 * stopped for all active hw-functions.
                 */
-               t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
-                                          cdev->hwfns[0].p_main_ptt, false);
-               if (t_rc != 0)
-                       rc = t_rc;
+               rc = qed_change_pci_hwfn(p_hwfn, p_ptt, false);
+               if (rc) {
+                       DP_NOTICE(p_hwfn,
+                                 "qed_change_pci_hwfn failed. rc = %d.\n", rc);
+                       rc2 = -EINVAL;
+               }
        }
 
-       return rc;
+       return rc2;
 }
 
-void qed_hw_stop_fastpath(struct qed_dev *cdev)
+int qed_hw_stop_fastpath(struct qed_dev *cdev)
 {
        int j;
 
        for_each_hwfn(cdev, j) {
                struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
-               struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+               struct qed_ptt *p_ptt;
 
                if (IS_VF(cdev)) {
                        qed_vf_pf_int_cleanup(p_hwfn);
                        continue;
                }
+               p_ptt = qed_ptt_acquire(p_hwfn);
+               if (!p_ptt)
+                       return -EAGAIN;
 
                DP_VERBOSE(p_hwfn,
-                          NETIF_MSG_IFDOWN,
-                          "Shutting down the fastpath\n");
+                          NETIF_MSG_IFDOWN, "Shutting down the fastpath\n");
 
                qed_wr(p_hwfn, p_ptt,
                       NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
@@ -1206,179 +1926,482 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev)
 
                /* Need to wait 1ms to guarantee SBs are cleared */
                usleep_range(1000, 2000);
+               qed_ptt_release(p_hwfn, p_ptt);
        }
+
+       return 0;
 }
 
-void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
+int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
 {
+       struct qed_ptt *p_ptt;
+
        if (IS_VF(p_hwfn->cdev))
-               return;
+               return 0;
+
+       p_ptt = qed_ptt_acquire(p_hwfn);
+       if (!p_ptt)
+               return -EAGAIN;
 
        /* Re-open incoming traffic */
-       qed_wr(p_hwfn, p_hwfn->p_main_ptt,
-              NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
+       qed_wr(p_hwfn, p_ptt, NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
+       qed_ptt_release(p_hwfn, p_ptt);
+
+       return 0;
 }
 
-static int qed_reg_assert(struct qed_hwfn *hwfn,
-                         struct qed_ptt *ptt, u32 reg,
-                         bool expected)
+/* Free hwfn memory and resources acquired in hw_hwfn_prepare */
+static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
 {
-       u32 assert_val = qed_rd(hwfn, ptt, reg);
+       qed_ptt_pool_free(p_hwfn);
+       kfree(p_hwfn->hw_info.p_igu_info);
+}
 
-       if (assert_val != expected) {
-               DP_NOTICE(hwfn, "Value at address 0x%x != 0x%08x\n",
-                         reg, expected);
-               return -EINVAL;
+/* Setup bar access */
+static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
+{
+       /* clear indirect access */
+       if (QED_IS_AH(p_hwfn->cdev)) {
+               qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+                      PGLUE_B_REG_PGL_ADDR_E8_F0_K2, 0);
+               qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+                      PGLUE_B_REG_PGL_ADDR_EC_F0_K2, 0);
+               qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+                      PGLUE_B_REG_PGL_ADDR_F0_F0_K2, 0);
+               qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+                      PGLUE_B_REG_PGL_ADDR_F4_F0_K2, 0);
+       } else {
+               qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+                      PGLUE_B_REG_PGL_ADDR_88_F0_BB, 0);
+               qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+                      PGLUE_B_REG_PGL_ADDR_8C_F0_BB, 0);
+               qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+                      PGLUE_B_REG_PGL_ADDR_90_F0_BB, 0);
+               qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+                      PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0);
        }
 
-       return 0;
+       /* Clean Previous errors if such exist */
+       qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+              PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 1 << p_hwfn->abs_pf_id);
+
+       /* enable internal target-read */
+       qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+              PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
 }
 
-int qed_hw_reset(struct qed_dev *cdev)
+static void get_function_id(struct qed_hwfn *p_hwfn)
 {
-       int rc = 0;
-       u32 unload_resp, unload_param;
-       int i;
+       /* ME Register */
+       p_hwfn->hw_info.opaque_fid = (u16) REG_RD(p_hwfn,
+                                                 PXP_PF_ME_OPAQUE_ADDR);
 
-       for_each_hwfn(cdev, i) {
-               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+       p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
 
-               if (IS_VF(cdev)) {
-                       rc = qed_vf_pf_reset(p_hwfn);
-                       if (rc)
-                               return rc;
-                       continue;
-               }
+       p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
+       p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
+                                     PXP_CONCRETE_FID_PFID);
+       p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
+                                   PXP_CONCRETE_FID_PORT);
+
+       DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
+                  "Read ME register: Concrete 0x%08x Opaque 0x%04x\n",
+                  p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid);
+}
+
+static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
+{
+       u32 *feat_num = p_hwfn->hw_info.feat_num;
+       struct qed_sb_cnt_info sb_cnt_info;
+       u32 non_l2_sbs = 0;
 
-               DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
+       if (IS_ENABLED(CONFIG_QED_RDMA) &&
+           p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
+               /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide
+                * the status blocks equally between L2 / RoCE but with
+                * consideration as to how many l2 queues / cnqs we have.
+                */
+               feat_num[QED_RDMA_CNQ] =
+                       min_t(u32, RESC_NUM(p_hwfn, QED_SB) / 2,
+                             RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM));
+
+               non_l2_sbs = feat_num[QED_RDMA_CNQ];
+       }
+
+       if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE ||
+           p_hwfn->hw_info.personality == QED_PCI_ETH) {
+               /* Start by allocating VF queues, then PF's */
+               memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
+               qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
+               feat_num[QED_VF_L2_QUE] = min_t(u32,
+                                               RESC_NUM(p_hwfn, QED_L2_QUEUE),
+                                               sb_cnt_info.sb_iov_cnt);
+               feat_num[QED_PF_L2_QUE] = min_t(u32,
+                                               RESC_NUM(p_hwfn, QED_SB) -
+                                               non_l2_sbs,
+                                               RESC_NUM(p_hwfn,
+                                                        QED_L2_QUEUE) -
+                                               FEAT_NUM(p_hwfn,
+                                                        QED_VF_L2_QUE));
+       }
+
+       if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
+               feat_num[QED_ISCSI_CQ] = min_t(u32, RESC_NUM(p_hwfn, QED_SB),
+                                              RESC_NUM(p_hwfn,
+                                                       QED_CMDQS_CQS));
+       DP_VERBOSE(p_hwfn,
+                  NETIF_MSG_PROBE,
+                  "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d ISCSI_CQ=%d #SBS=%d\n",
+                  (int)FEAT_NUM(p_hwfn, QED_PF_L2_QUE),
+                  (int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE),
+                  (int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ),
+                  (int)FEAT_NUM(p_hwfn, QED_ISCSI_CQ),
+                  RESC_NUM(p_hwfn, QED_SB));
+}
+
+const char *qed_hw_get_resc_name(enum qed_resources res_id)
+{
+       switch (res_id) {
+       case QED_L2_QUEUE:
+               return "L2_QUEUE";
+       case QED_VPORT:
+               return "VPORT";
+       case QED_RSS_ENG:
+               return "RSS_ENG";
+       case QED_PQ:
+               return "PQ";
+       case QED_RL:
+               return "RL";
+       case QED_MAC:
+               return "MAC";
+       case QED_VLAN:
+               return "VLAN";
+       case QED_RDMA_CNQ_RAM:
+               return "RDMA_CNQ_RAM";
+       case QED_ILT:
+               return "ILT";
+       case QED_LL2_QUEUE:
+               return "LL2_QUEUE";
+       case QED_CMDQS_CQS:
+               return "CMDQS_CQS";
+       case QED_RDMA_STATS_QUEUE:
+               return "RDMA_STATS_QUEUE";
+       case QED_BDQ:
+               return "BDQ";
+       case QED_SB:
+               return "SB";
+       default:
+               return "UNKNOWN_RESOURCE";
+       }
+}
 
-               /* Check for incorrect states */
-               qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
-                              QM_REG_USG_CNT_PF_TX, 0);
-               qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
-                              QM_REG_USG_CNT_PF_OTHER, 0);
+static int
+__qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn,
+                           struct qed_ptt *p_ptt,
+                           enum qed_resources res_id,
+                           u32 resc_max_val, u32 *p_mcp_resp)
+{
+       int rc;
 
-               /* Disable PF in HW blocks */
-               qed_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
-               qed_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
-               qed_wr(p_hwfn, p_hwfn->p_main_ptt,
-                      TCFC_REG_STRONG_ENABLE_PF, 0);
-               qed_wr(p_hwfn, p_hwfn->p_main_ptt,
-                      CCFC_REG_STRONG_ENABLE_PF, 0);
+       rc = qed_mcp_set_resc_max_val(p_hwfn, p_ptt, res_id,
+                                     resc_max_val, p_mcp_resp);
+       if (rc) {
+               DP_NOTICE(p_hwfn,
+                         "MFW response failure for a max value setting of resource %d [%s]\n",
+                         res_id, qed_hw_get_resc_name(res_id));
+               return rc;
+       }
 
-               /* Send unload command to MCP */
-               rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
-                                DRV_MSG_CODE_UNLOAD_REQ,
-                                DRV_MB_PARAM_UNLOAD_WOL_MCP,
-                                &unload_resp, &unload_param);
-               if (rc) {
-                       DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n");
-                       unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
+       if (*p_mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK)
+               DP_INFO(p_hwfn,
+                       "Failed to set the max value of resource %d [%s]. mcp_resp = 0x%08x.\n",
+                       res_id, qed_hw_get_resc_name(res_id), *p_mcp_resp);
+
+       return 0;
+}
+
+static int
+qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+       bool b_ah = QED_IS_AH(p_hwfn->cdev);
+       u32 resc_max_val, mcp_resp;
+       u8 res_id;
+       int rc;
+
+       for (res_id = 0; res_id < QED_MAX_RESC; res_id++) {
+               switch (res_id) {
+               case QED_LL2_QUEUE:
+                       resc_max_val = MAX_NUM_LL2_RX_QUEUES;
+                       break;
+               case QED_RDMA_CNQ_RAM:
+                       /* No need for a case for QED_CMDQS_CQS since
+                        * CNQ/CMDQS are the same resource.
+                        */
+                       resc_max_val = NUM_OF_CMDQS_CQS;
+                       break;
+               case QED_RDMA_STATS_QUEUE:
+                       resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2
+                           : RDMA_NUM_STATISTIC_COUNTERS_BB;
+                       break;
+               case QED_BDQ:
+                       resc_max_val = BDQ_NUM_RESOURCES;
+                       break;
+               default:
+                       continue;
                }
 
-               rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
-                                DRV_MSG_CODE_UNLOAD_DONE,
-                                0, &unload_resp, &unload_param);
-               if (rc) {
-                       DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_DONE failed\n");
+               rc = __qed_hw_set_soft_resc_size(p_hwfn, p_ptt, res_id,
+                                                resc_max_val, &mcp_resp);
+               if (rc)
                        return rc;
-               }
+
+               /* There's no point to continue to the next resource if the
+                * command is not supported by the MFW.
+                * We do continue if the command is supported but the resource
+                * is unknown to the MFW. Such a resource will be later
+                * configured with the default allocation values.
+                */
+               if (mcp_resp == FW_MSG_CODE_UNSUPPORTED)
+                       return -EINVAL;
        }
 
-       return rc;
+       return 0;
 }
 
-/* Free hwfn memory and resources acquired in hw_hwfn_prepare */
-static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
+static
+int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
+                        enum qed_resources res_id,
+                        u32 *p_resc_num, u32 *p_resc_start)
 {
-       qed_ptt_pool_free(p_hwfn);
-       kfree(p_hwfn->hw_info.p_igu_info);
+       u8 num_funcs = p_hwfn->num_funcs_on_engine;
+       bool b_ah = QED_IS_AH(p_hwfn->cdev);
+       struct qed_sb_cnt_info sb_cnt_info;
+
+       switch (res_id) {
+       case QED_L2_QUEUE:
+               *p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 :
+                              MAX_NUM_L2_QUEUES_BB) / num_funcs;
+               break;
+       case QED_VPORT:
+               *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
+                              MAX_NUM_VPORTS_BB) / num_funcs;
+               break;
+       case QED_RSS_ENG:
+               *p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 :
+                              ETH_RSS_ENGINE_NUM_BB) / num_funcs;
+               break;
+       case QED_PQ:
+               *p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 :
+                              MAX_QM_TX_QUEUES_BB) / num_funcs;
+               *p_resc_num &= ~0x7;    /* The granularity of the PQs is 8 */
+               break;
+       case QED_RL:
+               *p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs;
+               break;
+       case QED_MAC:
+       case QED_VLAN:
+               /* Each VFC resource can accommodate both a MAC and a VLAN */
+               *p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
+               break;
+       case QED_ILT:
+               *p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 :
+                              PXP_NUM_ILT_RECORDS_BB) / num_funcs;
+               break;
+       case QED_LL2_QUEUE:
+               *p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
+               break;
+       case QED_RDMA_CNQ_RAM:
+       case QED_CMDQS_CQS:
+               /* CNQ/CMDQS are the same resource */
+               *p_resc_num = NUM_OF_CMDQS_CQS / num_funcs;
+               break;
+       case QED_RDMA_STATS_QUEUE:
+               *p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 :
+                              RDMA_NUM_STATISTIC_COUNTERS_BB) / num_funcs;
+               break;
+       case QED_BDQ:
+               if (p_hwfn->hw_info.personality != QED_PCI_ISCSI &&
+                   p_hwfn->hw_info.personality != QED_PCI_FCOE)
+                       *p_resc_num = 0;
+               else
+                       *p_resc_num = 1;
+               break;
+       case QED_SB:
+               memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
+               qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
+               *p_resc_num = sb_cnt_info.sb_cnt;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       switch (res_id) {
+       case QED_BDQ:
+               if (!*p_resc_num)
+                       *p_resc_start = 0;
+               else if (p_hwfn->cdev->num_ports_in_engines == 4)
+                       *p_resc_start = p_hwfn->port_id;
+               else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
+                       *p_resc_start = p_hwfn->port_id;
+               else if (p_hwfn->hw_info.personality == QED_PCI_FCOE)
+                       *p_resc_start = p_hwfn->port_id + 2;
+               break;
+       default:
+               *p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx;
+               break;
+       }
+
+       return 0;
 }
 
-/* Setup bar access */
-static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
+static int __qed_hw_set_resc_info(struct qed_hwfn *p_hwfn,
+                                 enum qed_resources res_id)
 {
-       /* clear indirect access */
-       qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
-       qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
-       qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0);
-       qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0);
+       u32 dflt_resc_num = 0, dflt_resc_start = 0;
+       u32 mcp_resp, *p_resc_num, *p_resc_start;
+       int rc;
+
+       p_resc_num = &RESC_NUM(p_hwfn, res_id);
+       p_resc_start = &RESC_START(p_hwfn, res_id);
+
+       rc = qed_hw_get_dflt_resc(p_hwfn, res_id, &dflt_resc_num,
+                                 &dflt_resc_start);
+       if (rc) {
+               DP_ERR(p_hwfn,
+                      "Failed to get default amount for resource %d [%s]\n",
+                      res_id, qed_hw_get_resc_name(res_id));
+               return rc;
+       }
+
+       rc = qed_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, res_id,
+                                  &mcp_resp, p_resc_num, p_resc_start);
+       if (rc) {
+               DP_NOTICE(p_hwfn,
+                         "MFW response failure for an allocation request for resource %d [%s]\n",
+                         res_id, qed_hw_get_resc_name(res_id));
+               return rc;
+       }
+
+       /* Default driver values are applied in the following cases:
+        * - The resource allocation MB command is not supported by the MFW
+        * - There is an internal error in the MFW while processing the request
+        * - The resource ID is unknown to the MFW
+        */
+       if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) {
+               DP_INFO(p_hwfn,
+                       "Failed to receive allocation info for resource %d [%s]. mcp_resp = 0x%x. Applying default values [%d,%d].\n",
+                       res_id,
+                       qed_hw_get_resc_name(res_id),
+                       mcp_resp, dflt_resc_num, dflt_resc_start);
+               *p_resc_num = dflt_resc_num;
+               *p_resc_start = dflt_resc_start;
+               goto out;
+       }
 
-       /* Clean Previous errors if such exist */
-       qed_wr(p_hwfn, p_hwfn->p_main_ptt,
-              PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
-              1 << p_hwfn->abs_pf_id);
+       /* Special handling for status blocks; Would be revised in future */
+       if (res_id == QED_SB) {
+               *p_resc_num -= 1;
+               *p_resc_start -= p_hwfn->enabled_func_idx;
+       }
+out:
+       /* PQs have to divide by 8 [that's the HW granularity].
+        * Reduce number so it would fit.
+        */
+       if ((res_id == QED_PQ) && ((*p_resc_num % 8) || (*p_resc_start % 8))) {
+               DP_INFO(p_hwfn,
+                       "PQs need to align by 8; Number %08x --> %08x, Start %08x --> %08x\n",
+                       *p_resc_num,
+                       (*p_resc_num) & ~0x7,
+                       *p_resc_start, (*p_resc_start) & ~0x7);
+               *p_resc_num &= ~0x7;
+               *p_resc_start &= ~0x7;
+       }
 
-       /* enable internal target-read */
-       qed_wr(p_hwfn, p_hwfn->p_main_ptt,
-              PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
+       return 0;
 }
 
-static void get_function_id(struct qed_hwfn *p_hwfn)
+static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn)
 {
-       /* ME Register */
-       p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR);
+       int rc;
+       u8 res_id;
 
-       p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
+       for (res_id = 0; res_id < QED_MAX_RESC; res_id++) {
+               rc = __qed_hw_set_resc_info(p_hwfn, res_id);
+               if (rc)
+                       return rc;
+       }
 
-       p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
-       p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
-                                     PXP_CONCRETE_FID_PFID);
-       p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
-                                   PXP_CONCRETE_FID_PORT);
+       return 0;
 }
 
-static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
-{
-       u32 *feat_num = p_hwfn->hw_info.feat_num;
-       int num_features = 1;
-
-       feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
-                                               num_features,
-                                       RESC_NUM(p_hwfn, QED_L2_QUEUE));
-       DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
-                  "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n",
-                  feat_num[QED_PF_L2_QUE], RESC_NUM(p_hwfn, QED_SB),
-                  num_features);
-}
+#define QED_RESC_ALLOC_LOCK_RETRY_CNT           10
+#define QED_RESC_ALLOC_LOCK_RETRY_INTVL_US      10000  /* 10 msec */
 
-static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
+static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-       u8 enabled_func_idx = p_hwfn->enabled_func_idx;
-       u32 *resc_start = p_hwfn->hw_info.resc_start;
-       u8 num_funcs = p_hwfn->num_funcs_on_engine;
-       u32 *resc_num = p_hwfn->hw_info.resc_num;
-       struct qed_sb_cnt_info sb_cnt_info;
-       int i, max_vf_vlan_filters;
-
-       memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
-
-#ifdef CONFIG_QED_SRIOV
-       max_vf_vlan_filters = QED_ETH_MAX_VF_NUM_VLAN_FILTERS;
-#else
-       max_vf_vlan_filters = 0;
-#endif
+       struct qed_resc_unlock_params resc_unlock_params;
+       struct qed_resc_lock_params resc_lock_params;
+       bool b_ah = QED_IS_AH(p_hwfn->cdev);
+       u8 res_id;
+       int rc;
 
-       qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
+       /* Setting the max values of the soft resources and the following
+        * resources allocation queries should be atomic. Since several PFs can
+        * run in parallel - a resource lock is needed.
+        * If either the resource lock or resource set value commands are not
+        * supported - skip the the max values setting, release the lock if
+        * needed, and proceed to the queries. Other failures, including a
+        * failure to acquire the lock, will cause this function to fail.
+        */
+       memset(&resc_lock_params, 0, sizeof(resc_lock_params));
+       resc_lock_params.resource = QED_RESC_LOCK_RESC_ALLOC;
+       resc_lock_params.retry_num = QED_RESC_ALLOC_LOCK_RETRY_CNT;
+       resc_lock_params.retry_interval = QED_RESC_ALLOC_LOCK_RETRY_INTVL_US;
+       resc_lock_params.sleep_b4_retry = true;
+       memset(&resc_unlock_params, 0, sizeof(resc_unlock_params));
+       resc_unlock_params.resource = QED_RESC_LOCK_RESC_ALLOC;
+
+       rc = qed_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params);
+       if (rc && rc != -EINVAL) {
+               return rc;
+       } else if (rc == -EINVAL) {
+               DP_INFO(p_hwfn,
+                       "Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n");
+       } else if (!rc && !resc_lock_params.b_granted) {
+               DP_NOTICE(p_hwfn,
+                         "Failed to acquire the resource lock for the resource allocation commands\n");
+               return -EBUSY;
+       } else {
+               rc = qed_hw_set_soft_resc_size(p_hwfn, p_ptt);
+               if (rc && rc != -EINVAL) {
+                       DP_NOTICE(p_hwfn,
+                                 "Failed to set the max values of the soft resources\n");
+                       goto unlock_and_exit;
+               } else if (rc == -EINVAL) {
+                       DP_INFO(p_hwfn,
+                               "Skip the max values setting of the soft resources since it is not supported by the MFW\n");
+                       rc = qed_mcp_resc_unlock(p_hwfn, p_ptt,
+                                                &resc_unlock_params);
+                       if (rc)
+                               DP_INFO(p_hwfn,
+                                       "Failed to release the resource lock for the resource allocation commands\n");
+               }
+       }
 
-       resc_num[QED_SB] = min_t(u32,
-                                (MAX_SB_PER_PATH_BB / num_funcs),
-                                sb_cnt_info.sb_cnt);
-       resc_num[QED_L2_QUEUE] = MAX_NUM_L2_QUEUES_BB / num_funcs;
-       resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs;
-       resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs;
-       resc_num[QED_PQ] = MAX_QM_TX_QUEUES_BB / num_funcs;
-       resc_num[QED_RL] = min_t(u32, 64, resc_num[QED_VPORT]);
-       resc_num[QED_MAC] = ETH_NUM_MAC_FILTERS / num_funcs;
-       resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) /
-                            num_funcs;
-       resc_num[QED_ILT] = PXP_NUM_ILT_RECORDS_BB / num_funcs;
+       rc = qed_hw_set_resc_info(p_hwfn);
+       if (rc)
+               goto unlock_and_exit;
 
-       for (i = 0; i < QED_MAX_RESC; i++)
-               resc_start[i] = resc_num[i] * enabled_func_idx;
+       if (resc_lock_params.b_granted && !resc_unlock_params.b_released) {
+               rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params);
+               if (rc)
+                       DP_INFO(p_hwfn,
+                               "Failed to release the resource lock for the resource allocation commands\n");
+       }
 
        /* Sanity for ILT */
-       if (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB) {
+       if ((b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_K2)) ||
+           (!b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB))) {
                DP_NOTICE(p_hwfn, "Can't assign ILT pages [%08x,...,%08x]\n",
                          RESC_START(p_hwfn, QED_ILT),
                          RESC_END(p_hwfn, QED_ILT) - 1);
@@ -1387,41 +2410,24 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
 
        qed_hw_set_feat(p_hwfn);
 
-       DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
-                  "The numbers for each resource are:\n"
-                  "SB = %d start = %d\n"
-                  "L2_QUEUE = %d start = %d\n"
-                  "VPORT = %d start = %d\n"
-                  "PQ = %d start = %d\n"
-                  "RL = %d start = %d\n"
-                  "MAC = %d start = %d\n"
-                  "VLAN = %d start = %d\n"
-                  "ILT = %d start = %d\n",
-                  p_hwfn->hw_info.resc_num[QED_SB],
-                  p_hwfn->hw_info.resc_start[QED_SB],
-                  p_hwfn->hw_info.resc_num[QED_L2_QUEUE],
-                  p_hwfn->hw_info.resc_start[QED_L2_QUEUE],
-                  p_hwfn->hw_info.resc_num[QED_VPORT],
-                  p_hwfn->hw_info.resc_start[QED_VPORT],
-                  p_hwfn->hw_info.resc_num[QED_PQ],
-                  p_hwfn->hw_info.resc_start[QED_PQ],
-                  p_hwfn->hw_info.resc_num[QED_RL],
-                  p_hwfn->hw_info.resc_start[QED_RL],
-                  p_hwfn->hw_info.resc_num[QED_MAC],
-                  p_hwfn->hw_info.resc_start[QED_MAC],
-                  p_hwfn->hw_info.resc_num[QED_VLAN],
-                  p_hwfn->hw_info.resc_start[QED_VLAN],
-                  p_hwfn->hw_info.resc_num[QED_ILT],
-                  p_hwfn->hw_info.resc_start[QED_ILT]);
+       for (res_id = 0; res_id < QED_MAX_RESC; res_id++)
+               DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, "%s = %d start = %d\n",
+                          qed_hw_get_resc_name(res_id),
+                          RESC_NUM(p_hwfn, res_id),
+                          RESC_START(p_hwfn, res_id));
 
        return 0;
+
+unlock_and_exit:
+       if (resc_lock_params.b_granted && !resc_unlock_params.b_released)
+               qed_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params);
+       return rc;
 }
 
-static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
-                              struct qed_ptt *p_ptt)
+static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-       u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
        u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
+       u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
        struct qed_mcp_link_params *link;
 
        /* Read global nvm_cfg address */
@@ -1468,12 +2474,17 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
        case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G:
                p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
                break;
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G:
+               p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X10G;
+               break;
        case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G:
                p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
                break;
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G:
+               p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X25G;
+               break;
        default:
-               DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n",
-                         core_cfg);
+               DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", core_cfg);
                break;
        }
 
@@ -1484,11 +2495,11 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
        link_temp = qed_rd(p_hwfn, p_ptt,
                           port_cfg_addr +
                           offsetof(struct nvm_cfg1_port, speed_cap_mask));
-       link->speed.advertised_speeds =
-               link_temp & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
+       link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
+       link->speed.advertised_speeds = link_temp;
 
-       p_hwfn->mcp_info->link_capabilities.speed_capabilities =
-                                               link->speed.advertised_speeds;
+       link_temp = link->speed.advertised_speeds;
+       p_hwfn->mcp_info->link_capabilities.speed_capabilities = link_temp;
 
        link_temp = qed_rd(p_hwfn, p_ptt,
                           port_cfg_addr +
@@ -1517,8 +2528,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
                link->speed.forced_speed = 100000;
                break;
        default:
-               DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n",
-                         link_temp);
+               DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n", link_temp);
        }
 
        link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
@@ -1569,6 +2579,9 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
        if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
                __set_bit(QED_DEV_CAP_ETH,
                          &p_hwfn->hw_info.device_capabilities);
+       if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE)
+               __set_bit(QED_DEV_CAP_FCOE,
+                         &p_hwfn->hw_info.device_capabilities);
        if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI)
                __set_bit(QED_DEV_CAP_ISCSI,
                          &p_hwfn->hw_info.device_capabilities);
@@ -1583,8 +2596,9 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
        u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id;
        u32 reg_function_hide, tmp, eng_mask, low_pfs_mask;
+       struct qed_dev *cdev = p_hwfn->cdev;
 
-       num_funcs = MAX_NUM_PFS_BB;
+       num_funcs = QED_IS_AH(cdev) ? MAX_NUM_PFS_K2 : MAX_NUM_PFS_BB;
 
        /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
         * in the other bits are selected.
@@ -1597,12 +2611,17 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
        reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
 
        if (reg_function_hide & 0x1) {
-               if (QED_PATH_ID(p_hwfn) && p_hwfn->cdev->num_hwfns == 1) {
-                       num_funcs = 0;
-                       eng_mask = 0xaaaa;
+               if (QED_IS_BB(cdev)) {
+                       if (QED_PATH_ID(p_hwfn) && cdev->num_hwfns == 1) {
+                               num_funcs = 0;
+                               eng_mask = 0xaaaa;
+                       } else {
+                               num_funcs = 1;
+                               eng_mask = 0x5554;
+                       }
                } else {
                        num_funcs = 1;
-                       eng_mask = 0x5554;
+                       eng_mask = 0xfffe;
                }
 
                /* Get the number of the enabled functions on the engine */
@@ -1628,30 +2647,18 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 
        DP_VERBOSE(p_hwfn,
                   NETIF_MSG_PROBE,
-                  "PF [rel_id %d, abs_id %d] within the %d enabled functions on the engine\n",
+                  "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n",
                   p_hwfn->rel_pf_id,
                   p_hwfn->abs_pf_id,
-                  p_hwfn->num_funcs_on_engine);
+                  p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine);
 }
 
-static int
-qed_get_hw_info(struct qed_hwfn *p_hwfn,
-               struct qed_ptt *p_ptt,
-               enum qed_pci_personality personality)
+static void qed_hw_info_port_num_bb(struct qed_hwfn *p_hwfn,
+                                   struct qed_ptt *p_ptt)
 {
        u32 port_mode;
-       int rc;
-
-       /* Since all information is common, only first hwfns should do this */
-       if (IS_LEAD_HWFN(p_hwfn)) {
-               rc = qed_iov_hw_info(p_hwfn);
-               if (rc)
-                       return rc;
-       }
 
-       /* Read the port mode */
-       port_mode = qed_rd(p_hwfn, p_ptt,
-                          CNIG_REG_NW_PORT_MODE_BB_B0);
+       port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0);
 
        if (port_mode < 3) {
                p_hwfn->cdev->num_ports_in_engines = 1;
@@ -1664,6 +2671,54 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
                /* Default num_ports_in_engines to something */
                p_hwfn->cdev->num_ports_in_engines = 1;
        }
+}
+
+static void qed_hw_info_port_num_ah(struct qed_hwfn *p_hwfn,
+                                   struct qed_ptt *p_ptt)
+{
+       u32 port;
+       int i;
+
+       p_hwfn->cdev->num_ports_in_engines = 0;
+
+       for (i = 0; i < MAX_NUM_PORTS_K2; i++) {
+               port = qed_rd(p_hwfn, p_ptt,
+                             CNIG_REG_NIG_PORT0_CONF_K2 + (i * 4));
+               if (port & 1)
+                       p_hwfn->cdev->num_ports_in_engines++;
+       }
+
+       if (!p_hwfn->cdev->num_ports_in_engines) {
+               DP_NOTICE(p_hwfn, "All NIG ports are inactive\n");
+
+               /* Default num_ports_in_engine to something */
+               p_hwfn->cdev->num_ports_in_engines = 1;
+       }
+}
+
+static void qed_hw_info_port_num(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+       if (QED_IS_BB(p_hwfn->cdev))
+               qed_hw_info_port_num_bb(p_hwfn, p_ptt);
+       else
+               qed_hw_info_port_num_ah(p_hwfn, p_ptt);
+}
+
+static int
+qed_get_hw_info(struct qed_hwfn *p_hwfn,
+               struct qed_ptt *p_ptt,
+               enum qed_pci_personality personality)
+{
+       int rc;
+
+       /* Since all information is common, only first hwfns should do this */
+       if (IS_LEAD_HWFN(p_hwfn)) {
+               rc = qed_iov_hw_info(p_hwfn);
+               if (rc)
+                       return rc;
+       }
+
+       qed_hw_info_port_num(p_hwfn, p_ptt);
 
        qed_hw_get_nvm_info(p_hwfn, p_ptt);
 
@@ -1692,31 +2747,48 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
                p_hwfn->hw_info.personality = protocol;
        }
 
+       p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2;
+       p_hwfn->hw_info.num_active_tc = 1;
+
        qed_get_num_funcs(p_hwfn, p_ptt);
 
-       return qed_hw_get_resc(p_hwfn);
+       if (qed_mcp_is_init(p_hwfn))
+               p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu;
+
+       return qed_hw_get_resc(p_hwfn, p_ptt);
 }
 
-static int qed_get_dev_info(struct qed_dev *cdev)
+static int qed_get_dev_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-       struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_dev *cdev = p_hwfn->cdev;
+       u16 device_id_mask;
        u32 tmp;
 
        /* Read Vendor Id / Device Id */
-       pci_read_config_word(cdev->pdev, PCI_VENDOR_ID,
-                            &cdev->vendor_id);
-       pci_read_config_word(cdev->pdev, PCI_DEVICE_ID,
-                            &cdev->device_id);
-       cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
-                                    MISCS_REG_CHIP_NUM);
-       cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
-                                    MISCS_REG_CHIP_REV);
+       pci_read_config_word(cdev->pdev, PCI_VENDOR_ID, &cdev->vendor_id);
+       pci_read_config_word(cdev->pdev, PCI_DEVICE_ID, &cdev->device_id);
+
+       /* Determine type */
+       device_id_mask = cdev->device_id & QED_DEV_ID_MASK;
+       switch (device_id_mask) {
+       case QED_DEV_ID_MASK_BB:
+               cdev->type = QED_DEV_TYPE_BB;
+               break;
+       case QED_DEV_ID_MASK_AH:
+               cdev->type = QED_DEV_TYPE_AH;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, "Unknown device id 0x%x\n", cdev->device_id);
+               return -EBUSY;
+       }
+
+       cdev->chip_num = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_NUM);
+       cdev->chip_rev = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV);
+
        MASK_FIELD(CHIP_REV, cdev->chip_rev);
 
-       cdev->type = QED_DEV_TYPE_BB;
        /* Learn number of HW-functions */
-       tmp = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
-                    MISCS_REG_CMT_ENABLED_FOR_PAIR);
+       tmp = qed_rd(p_hwfn, p_ptt, MISCS_REG_CMT_ENABLED_FOR_PAIR);
 
        if (tmp & (1 << p_hwfn->rel_pf_id)) {
                DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
@@ -1725,15 +2797,17 @@ static int qed_get_dev_info(struct qed_dev *cdev)
                cdev->num_hwfns = 1;
        }
 
-       cdev->chip_bond_id = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
+       cdev->chip_bond_id = qed_rd(p_hwfn, p_ptt,
                                    MISCS_REG_CHIP_TEST_REG) >> 4;
        MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
-       cdev->chip_metal = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
-                                      MISCS_REG_CHIP_METAL);
+       cdev->chip_metal = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL);
        MASK_FIELD(CHIP_METAL, cdev->chip_metal);
 
        DP_INFO(cdev->hwfns,
-               "Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
+               "Chip details - %s %c%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
+               QED_IS_BB(cdev) ? "BB" : "AH",
+               'A' + cdev->chip_rev,
+               (int)cdev->chip_metal,
                cdev->chip_num, cdev->chip_rev,
                cdev->chip_bond_id, cdev->chip_metal);
 
@@ -1771,18 +2845,16 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
 
        /* Allocate PTT pool */
        rc = qed_ptt_pool_alloc(p_hwfn);
-       if (rc) {
-               DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n");
+       if (rc)
                goto err0;
-       }
 
        /* Allocate the main PTT */
        p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
 
        /* First hwfn learns basic information, e.g., number of hwfns */
        if (!p_hwfn->my_id) {
-               rc = qed_get_dev_info(p_hwfn->cdev);
-               if (rc != 0)
+               rc = qed_get_dev_info(p_hwfn, p_hwfn->p_main_ptt);
+               if (rc)
                        goto err1;
        }
 
@@ -1802,12 +2874,19 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
                goto err2;
        }
 
+       /* Sending a mailbox to the MFW should be done after qed_get_hw_info()
+        * is called as it sets the ports number in an engine.
+        */
+       if (IS_LEAD_HWFN(p_hwfn)) {
+               rc = qed_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt);
+               if (rc)
+                       DP_NOTICE(p_hwfn, "Failed to initiate PF FLR\n");
+       }
+
        /* Allocate the init RT array and initialize the init-ops engine */
        rc = qed_init_alloc(p_hwfn);
-       if (rc) {
-               DP_NOTICE(p_hwfn, "Failed to allocate the init array\n");
+       if (rc)
                goto err2;
-       }
 
        return rc;
 err2:
@@ -1845,11 +2924,14 @@ int qed_hw_prepare(struct qed_dev *cdev,
                u8 __iomem *addr;
 
                /* adjust bar offset for second engine */
-               addr = cdev->regview + qed_hw_bar_size(p_hwfn, BAR_ID_0) / 2;
+               addr = cdev->regview +
+                      qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
+                                      BAR_ID_0) / 2;
                p_regview = addr;
 
-               /* adjust doorbell bar offset for second engine */
-               addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, BAR_ID_1) / 2;
+               addr = cdev->doorbells +
+                      qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
+                                      BAR_ID_1) / 2;
                p_doorbell = addr;
 
                /* prepare second hw function */
@@ -1873,8 +2955,13 @@ int qed_hw_prepare(struct qed_dev *cdev,
 
 void qed_hw_remove(struct qed_dev *cdev)
 {
+       struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
        int i;
 
+       if (IS_PF(cdev))
+               qed_mcp_ov_update_driver_state(p_hwfn, p_hwfn->p_main_ptt,
+                                              QED_OV_DRIVER_STATE_NOT_LOADED);
+
        for_each_hwfn(cdev, i) {
                struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 
@@ -1935,12 +3022,12 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
 {
        void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl;
        u32 page_cnt = p_chain->page_cnt, i, pbl_size;
-       u8 *p_pbl_virt = p_chain->pbl.p_virt_table;
+       u8 *p_pbl_virt = p_chain->pbl_sp.p_virt_table;
 
        if (!pp_virt_addr_tbl)
                return;
 
-       if (!p_chain->pbl.p_virt_table)
+       if (!p_pbl_virt)
                goto out;
 
        for (i = 0; i < page_cnt; i++) {
@@ -1958,7 +3045,8 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
        pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
        dma_free_coherent(&cdev->pdev->dev,
                          pbl_size,
-                         p_chain->pbl.p_virt_table, p_chain->pbl.p_phys_table);
+                         p_chain->pbl_sp.p_virt_table,
+                         p_chain->pbl_sp.p_phys_table);
 out:
        vfree(p_chain->pbl.pp_virt_addr_tbl);
 }
@@ -1992,9 +3080,8 @@ qed_chain_alloc_sanity_check(struct qed_dev *cdev,
         * size/capacity fields are of a u32 type.
         */
        if ((cnt_type == QED_CHAIN_CNT_TYPE_U16 &&
-            chain_size > 0x10000) ||
-           (cnt_type == QED_CHAIN_CNT_TYPE_U32 &&
-            chain_size > 0x100000000ULL)) {
+            chain_size > ((u32)U16_MAX + 1)) ||
+           (cnt_type == QED_CHAIN_CNT_TYPE_U32 && chain_size > U32_MAX)) {
                DP_NOTICE(cdev,
                          "The actual chain size (0x%llx) is larger than the maximal possible value\n",
                          chain_size);
@@ -2015,10 +3102,8 @@ qed_chain_alloc_next_ptr(struct qed_dev *cdev, struct qed_chain *p_chain)
                p_virt = dma_alloc_coherent(&cdev->pdev->dev,
                                            QED_CHAIN_PAGE_SIZE,
                                            &p_phys, GFP_KERNEL);
-               if (!p_virt) {
-                       DP_NOTICE(cdev, "Failed to allocate chain memory\n");
+               if (!p_virt)
                        return -ENOMEM;
-               }
 
                if (i == 0) {
                        qed_chain_init_mem(p_chain, p_virt, p_phys);
@@ -2048,10 +3133,8 @@ qed_chain_alloc_single(struct qed_dev *cdev, struct qed_chain *p_chain)
 
        p_virt = dma_alloc_coherent(&cdev->pdev->dev,
                                    QED_CHAIN_PAGE_SIZE, &p_phys, GFP_KERNEL);
-       if (!p_virt) {
-               DP_NOTICE(cdev, "Failed to allocate chain memory\n");
+       if (!p_virt)
                return -ENOMEM;
-       }
 
        qed_chain_init_mem(p_chain, p_virt, p_phys);
        qed_chain_reset(p_chain);
@@ -2068,13 +3151,9 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
        void *p_virt = NULL;
 
        size = page_cnt * sizeof(*pp_virt_addr_tbl);
-       pp_virt_addr_tbl = vmalloc(size);
-       if (!pp_virt_addr_tbl) {
-               DP_NOTICE(cdev,
-                         "Failed to allocate memory for the chain virtual addresses table\n");
+       pp_virt_addr_tbl = vzalloc(size);
+       if (!pp_virt_addr_tbl)
                return -ENOMEM;
-       }
-       memset(pp_virt_addr_tbl, 0, size);
 
        /* The allocation of the PBL table is done with its full size, since it
         * is expected to be successive.
@@ -2087,19 +3166,15 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
                                        size, &p_pbl_phys, GFP_KERNEL);
        qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
                               pp_virt_addr_tbl);
-       if (!p_pbl_virt) {
-               DP_NOTICE(cdev, "Failed to allocate chain pbl memory\n");
+       if (!p_pbl_virt)
                return -ENOMEM;
-       }
 
        for (i = 0; i < page_cnt; i++) {
                p_virt = dma_alloc_coherent(&cdev->pdev->dev,
                                            QED_CHAIN_PAGE_SIZE,
                                            &p_phys, GFP_KERNEL);
-               if (!p_virt) {
-                       DP_NOTICE(cdev, "Failed to allocate chain memory\n");
+               if (!p_virt)
                        return -ENOMEM;
-               }
 
                if (i == 0) {
                        qed_chain_init_mem(p_chain, p_virt, p_phys);
@@ -2134,7 +3209,8 @@ int qed_chain_alloc(struct qed_dev *cdev,
        rc = qed_chain_alloc_sanity_check(cdev, cnt_type, elem_size, page_cnt);
        if (rc) {
                DP_NOTICE(cdev,
-                         "Cannot allocate a chain with the given arguments:\n"
+                         "Cannot allocate a chain with the given arguments:\n");
+               DP_NOTICE(cdev,
                          "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
                          intended_use, mode, cnt_type, num_elems, elem_size);
                return rc;
@@ -2183,8 +3259,7 @@ int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id)
        return 0;
 }
 
-int qed_fw_vport(struct qed_hwfn *p_hwfn,
-                u8 src_id, u8 *dst_id)
+int qed_fw_vport(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id)
 {
        if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) {
                u8 min, max;
@@ -2203,8 +3278,7 @@ int qed_fw_vport(struct qed_hwfn *p_hwfn,
        return 0;
 }
 
-int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
-                  u8 src_id, u8 *dst_id)
+int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id)
 {
        if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) {
                u8 min, max;
@@ -2223,6 +3297,269 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
        return 0;
 }
 
+static void qed_llh_mac_to_filter(u32 *p_high, u32 *p_low,
+                                 u8 *p_filter)
+{
+       *p_high = p_filter[1] | (p_filter[0] << 8);
+       *p_low = p_filter[5] | (p_filter[4] << 8) |
+                (p_filter[3] << 16) | (p_filter[2] << 24);
+}
+
+int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn,
+                          struct qed_ptt *p_ptt, u8 *p_filter)
+{
+       u32 high = 0, low = 0, en;
+       int i;
+
+       if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+               return 0;
+
+       qed_llh_mac_to_filter(&high, &low, p_filter);
+
+       /* Find a free entry and utilize it */
+       for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+               en = qed_rd(p_hwfn, p_ptt,
+                           NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32));
+               if (en)
+                       continue;
+               qed_wr(p_hwfn, p_ptt,
+                      NIG_REG_LLH_FUNC_FILTER_VALUE +
+                      2 * i * sizeof(u32), low);
+               qed_wr(p_hwfn, p_ptt,
+                      NIG_REG_LLH_FUNC_FILTER_VALUE +
+                      (2 * i + 1) * sizeof(u32), high);
+               qed_wr(p_hwfn, p_ptt,
+                      NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0);
+               qed_wr(p_hwfn, p_ptt,
+                      NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
+                      i * sizeof(u32), 0);
+               qed_wr(p_hwfn, p_ptt,
+                      NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1);
+               break;
+       }
+       if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
+               DP_NOTICE(p_hwfn,
+                         "Failed to find an empty LLH filter to utilize\n");
+               return -EINVAL;
+       }
+
+       DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+                  "mac: %pM is added at %d\n",
+                  p_filter, i);
+
+       return 0;
+}
+
+void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn,
+                              struct qed_ptt *p_ptt, u8 *p_filter)
+{
+       u32 high = 0, low = 0;
+       int i;
+
+       if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+               return;
+
+       qed_llh_mac_to_filter(&high, &low, p_filter);
+
+       /* Find the entry and clean it */
+       for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+               if (qed_rd(p_hwfn, p_ptt,
+                          NIG_REG_LLH_FUNC_FILTER_VALUE +
+                          2 * i * sizeof(u32)) != low)
+                       continue;
+               if (qed_rd(p_hwfn, p_ptt,
+                          NIG_REG_LLH_FUNC_FILTER_VALUE +
+                          (2 * i + 1) * sizeof(u32)) != high)
+                       continue;
+
+               qed_wr(p_hwfn, p_ptt,
+                      NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
+               qed_wr(p_hwfn, p_ptt,
+                      NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * i * sizeof(u32), 0);
+               qed_wr(p_hwfn, p_ptt,
+                      NIG_REG_LLH_FUNC_FILTER_VALUE +
+                      (2 * i + 1) * sizeof(u32), 0);
+
+               DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+                          "mac: %pM is removed from %d\n",
+                          p_filter, i);
+               break;
+       }
+       if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
+               DP_NOTICE(p_hwfn, "Tried to remove a non-configured filter\n");
+}
+
+int
+qed_llh_add_protocol_filter(struct qed_hwfn *p_hwfn,
+                           struct qed_ptt *p_ptt,
+                           u16 source_port_or_eth_type,
+                           u16 dest_port, enum qed_llh_port_filter_type_t type)
+{
+       u32 high = 0, low = 0, en;
+       int i;
+
+       if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+               return 0;
+
+       switch (type) {
+       case QED_LLH_FILTER_ETHERTYPE:
+               high = source_port_or_eth_type;
+               break;
+       case QED_LLH_FILTER_TCP_SRC_PORT:
+       case QED_LLH_FILTER_UDP_SRC_PORT:
+               low = source_port_or_eth_type << 16;
+               break;
+       case QED_LLH_FILTER_TCP_DEST_PORT:
+       case QED_LLH_FILTER_UDP_DEST_PORT:
+               low = dest_port;
+               break;
+       case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
+       case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
+               low = (source_port_or_eth_type << 16) | dest_port;
+               break;
+       default:
+               DP_NOTICE(p_hwfn,
+                         "Non valid LLH protocol filter type %d\n", type);
+               return -EINVAL;
+       }
+       /* Find a free entry and utilize it */
+       for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+               en = qed_rd(p_hwfn, p_ptt,
+                           NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32));
+               if (en)
+                       continue;
+               qed_wr(p_hwfn, p_ptt,
+                      NIG_REG_LLH_FUNC_FILTER_VALUE +
+                      2 * i * sizeof(u32), low);
+               qed_wr(p_hwfn, p_ptt,
+                      NIG_REG_LLH_FUNC_FILTER_VALUE +
+                      (2 * i + 1) * sizeof(u32), high);
+               qed_wr(p_hwfn, p_ptt,
+                      NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 1);
+               qed_wr(p_hwfn, p_ptt,
+                      NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
+                      i * sizeof(u32), 1 << type);
+               qed_wr(p_hwfn, p_ptt,
+                      NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1);
+               break;
+       }
+       if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
+               DP_NOTICE(p_hwfn,
+                         "Failed to find an empty LLH filter to utilize\n");
+               return -EINVAL;
+       }
+       switch (type) {
+       case QED_LLH_FILTER_ETHERTYPE:
+               DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+                          "ETH type %x is added at %d\n",
+                          source_port_or_eth_type, i);
+               break;
+       case QED_LLH_FILTER_TCP_SRC_PORT:
+               DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+                          "TCP src port %x is added at %d\n",
+                          source_port_or_eth_type, i);
+               break;
+       case QED_LLH_FILTER_UDP_SRC_PORT:
+               DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+                          "UDP src port %x is added at %d\n",
+                          source_port_or_eth_type, i);
+               break;
+       case QED_LLH_FILTER_TCP_DEST_PORT:
+               DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+                          "TCP dst port %x is added at %d\n", dest_port, i);
+               break;
+       case QED_LLH_FILTER_UDP_DEST_PORT:
+               DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+                          "UDP dst port %x is added at %d\n", dest_port, i);
+               break;
+       case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
+               DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+                          "TCP src/dst ports %x/%x are added at %d\n",
+                          source_port_or_eth_type, dest_port, i);
+               break;
+       case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
+               DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+                          "UDP src/dst ports %x/%x are added at %d\n",
+                          source_port_or_eth_type, dest_port, i);
+               break;
+       }
+       return 0;
+}
+
+void
+qed_llh_remove_protocol_filter(struct qed_hwfn *p_hwfn,
+                              struct qed_ptt *p_ptt,
+                              u16 source_port_or_eth_type,
+                              u16 dest_port,
+                              enum qed_llh_port_filter_type_t type)
+{
+       u32 high = 0, low = 0;
+       int i;
+
+       if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+               return;
+
+       switch (type) {
+       case QED_LLH_FILTER_ETHERTYPE:
+               high = source_port_or_eth_type;
+               break;
+       case QED_LLH_FILTER_TCP_SRC_PORT:
+       case QED_LLH_FILTER_UDP_SRC_PORT:
+               low = source_port_or_eth_type << 16;
+               break;
+       case QED_LLH_FILTER_TCP_DEST_PORT:
+       case QED_LLH_FILTER_UDP_DEST_PORT:
+               low = dest_port;
+               break;
+       case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
+       case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
+               low = (source_port_or_eth_type << 16) | dest_port;
+               break;
+       default:
+               DP_NOTICE(p_hwfn,
+                         "Non valid LLH protocol filter type %d\n", type);
+               return;
+       }
+
+       for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+               if (!qed_rd(p_hwfn, p_ptt,
+                           NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32)))
+                       continue;
+               if (!qed_rd(p_hwfn, p_ptt,
+                           NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32)))
+                       continue;
+               if (!(qed_rd(p_hwfn, p_ptt,
+                            NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
+                            i * sizeof(u32)) & BIT(type)))
+                       continue;
+               if (qed_rd(p_hwfn, p_ptt,
+                          NIG_REG_LLH_FUNC_FILTER_VALUE +
+                          2 * i * sizeof(u32)) != low)
+                       continue;
+               if (qed_rd(p_hwfn, p_ptt,
+                          NIG_REG_LLH_FUNC_FILTER_VALUE +
+                          (2 * i + 1) * sizeof(u32)) != high)
+                       continue;
+
+               qed_wr(p_hwfn, p_ptt,
+                      NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
+               qed_wr(p_hwfn, p_ptt,
+                      NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0);
+               qed_wr(p_hwfn, p_ptt,
+                      NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
+                      i * sizeof(u32), 0);
+               qed_wr(p_hwfn, p_ptt,
+                      NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * i * sizeof(u32), 0);
+               qed_wr(p_hwfn, p_ptt,
+                      NIG_REG_LLH_FUNC_FILTER_VALUE +
+                      (2 * i + 1) * sizeof(u32), 0);
+               break;
+       }
+
+       if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
+               DP_NOTICE(p_hwfn, "Tried to remove a non-configured filter\n");
+}
+
 static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
                            u32 hw_addr, void *p_eth_qzone,
                            size_t eth_qzone_size, u8 timeset)
@@ -2386,8 +3723,7 @@ static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
  * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
  */
 static int qed_init_wfq_param(struct qed_hwfn *p_hwfn,
-                             u16 vport_id, u32 req_rate,
-                             u32 min_pf_rate)
+                             u16 vport_id, u32 req_rate, u32 min_pf_rate)
 {
        u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
        int non_requested_count = 0, req_count = 0, i, num_vports;
@@ -2471,7 +3807,7 @@ static int __qed_configure_vport_wfq(struct qed_hwfn *p_hwfn,
 
        rc = qed_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate);
 
-       if (rc == 0)
+       if (!rc)
                qed_configure_wfq_for_all_vports(p_hwfn, p_ptt,
                                                 p_link->min_pf_rate);
        else
@@ -2552,7 +3888,8 @@ int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate)
 }
 
 /* API to configure WFQ from mcp link change */
-void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate)
+void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
+                                        struct qed_ptt *p_ptt, u32 min_pf_rate)
 {
        int i;
 
@@ -2566,8 +3903,7 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate)
        for_each_hwfn(cdev, i) {
                struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 
-               __qed_configure_vp_wfq_on_link_change(p_hwfn,
-                                                     p_hwfn->p_dpc_ptt,
+               __qed_configure_vp_wfq_on_link_change(p_hwfn, p_ptt,
                                                      min_pf_rate);
        }
 }
@@ -2718,3 +4054,8 @@ void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
        memset(p_hwfn->qm_info.wfq_data, 0,
               sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports);
 }
+
+int qed_device_num_engines(struct qed_dev *cdev)
+{
+       return QED_IS_BB(cdev) ? 2 : 1;
+}
index 343bb0344f623c53ff8013473ac0470f1a2e81e2..341636da9964b2801009215bca8dba88433ef64a 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef _QED_DEV_API_H
@@ -58,26 +82,63 @@ int qed_resc_alloc(struct qed_dev *cdev);
  */
 void qed_resc_setup(struct qed_dev *cdev);
 
+enum qed_override_force_load {
+       QED_OVERRIDE_FORCE_LOAD_NONE,
+       QED_OVERRIDE_FORCE_LOAD_ALWAYS,
+       QED_OVERRIDE_FORCE_LOAD_NEVER,
+};
+
+struct qed_drv_load_params {
+       /* Indicates whether the driver is running over a crash kernel.
+        * As part of the load request, this will be used for providing the
+        * driver role to the MFW.
+        * In case of a crash kernel over PDA - this should be set to false.
+        */
+       bool is_crash_kernel;
+
+       /* The timeout value that the MFW should use when locking the engine for
+        * the driver load process.
+        * A value of '0' means the default value, and '255' means no timeout.
+        */
+       u8 mfw_timeout_val;
+#define QED_LOAD_REQ_LOCK_TO_DEFAULT    0
+#define QED_LOAD_REQ_LOCK_TO_NONE       255
+
+       /* Avoid engine reset when first PF loads on it */
+       bool avoid_eng_reset;
+
+       /* Allow overriding the default force load behavior */
+       enum qed_override_force_load override_force_load;
+};
+
+struct qed_hw_init_params {
+       /* Tunneling parameters */
+       struct qed_tunn_start_params *p_tunn;
+
+       bool b_hw_start;
+
+       /* Interrupt mode [msix, inta, etc.] to use */
+       enum qed_int_mode int_mode;
+
+       /* NPAR tx switching to be used for vports for tx-switching */
+       bool allow_npar_tx_switch;
+
+       /* Binary fw data pointer in binary fw file */
+       const u8 *bin_fw_data;
+
+       /* Driver load parameters */
+       struct qed_drv_load_params *p_drv_load_params;
+};
+
 /**
  * @brief qed_hw_init -
  *
  * @param cdev
- * @param p_tunn
- * @param b_hw_start
- * @param int_mode - interrupt mode [msix, inta, etc.] to use.
- * @param allow_npar_tx_switch - npar tx switching to be used
- *       for vports configured for tx-switching.
- * @param bin_fw_data - binary fw data pointer in binary fw file.
- *                     Pass NULL if not using binary fw file.
+ * @param p_params
  *
  * @return int
  */
-int qed_hw_init(struct qed_dev *cdev,
-               struct qed_tunn_start_params *p_tunn,
-               bool b_hw_start,
-               enum qed_int_mode int_mode,
-               bool allow_npar_tx_switch,
-               const u8 *bin_fw_data);
+int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params);
 
 /**
  * @brief qed_hw_timers_stop_all - stop the timers HW block
@@ -104,26 +165,20 @@ int qed_hw_stop(struct qed_dev *cdev);
  *
  * @param cdev
  *
+ * @return int
  */
-void qed_hw_stop_fastpath(struct qed_dev *cdev);
+int qed_hw_stop_fastpath(struct qed_dev *cdev);
 
 /**
  * @brief qed_hw_start_fastpath -restart fastpath traffic,
  *             only if hw_stop_fastpath was called
  *
- * @param cdev
- *
- */
-void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn);
-
-/**
- * @brief qed_hw_reset -
- *
- * @param cdev
+ * @param p_hwfn
  *
  * @return int
  */
-int qed_hw_reset(struct qed_dev *cdev);
+int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn);
+
 
 /**
  * @brief qed_hw_prepare -
@@ -309,6 +364,68 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
                   u8 src_id,
                   u8 *dst_id);
 
+/**
+ * @brief qed_llh_add_mac_filter - configures a MAC filter in llh
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_filter - MAC to add
+ */
+int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn,
+                          struct qed_ptt *p_ptt, u8 *p_filter);
+
+/**
+ * @brief qed_llh_remove_mac_filter - removes a MAC filter from llh
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_filter - MAC to remove
+ */
+void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn,
+                              struct qed_ptt *p_ptt, u8 *p_filter);
+
+enum qed_llh_port_filter_type_t {
+       QED_LLH_FILTER_ETHERTYPE,
+       QED_LLH_FILTER_TCP_SRC_PORT,
+       QED_LLH_FILTER_TCP_DEST_PORT,
+       QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT,
+       QED_LLH_FILTER_UDP_SRC_PORT,
+       QED_LLH_FILTER_UDP_DEST_PORT,
+       QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT
+};
+
+/**
+ * @brief qed_llh_add_protocol_filter - configures a protocol filter in llh
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param source_port_or_eth_type - source port or ethertype to add
+ * @param dest_port - destination port to add
+ * @param type - type of filters and comparing
+ */
+int
+qed_llh_add_protocol_filter(struct qed_hwfn *p_hwfn,
+                           struct qed_ptt *p_ptt,
+                           u16 source_port_or_eth_type,
+                           u16 dest_port,
+                           enum qed_llh_port_filter_type_t type);
+
+/**
+ * @brief qed_llh_remove_protocol_filter - remove a protocol filter in llh
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param source_port_or_eth_type - source port or ethertype to add
+ * @param dest_port - destination port to add
+ * @param type - type of filters and comparing
+ */
+void
+qed_llh_remove_protocol_filter(struct qed_hwfn *p_hwfn,
+                              struct qed_ptt *p_ptt,
+                              u16 source_port_or_eth_type,
+                              u16 dest_port,
+                              enum qed_llh_port_filter_type_t type);
+
 /**
  * *@brief Cleanup of previous driver remains prior to load
  *
@@ -355,4 +472,6 @@ int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
  */
 int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
                         u16 coalesce, u8 qid, u16 sb_id);
+
+const char *qed_hw_get_resc_name(enum qed_resources res_id);
 #endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
new file mode 100644 (file)
index 0000000..21a58ff
--- /dev/null
@@ -0,0 +1,1032 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/param.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#define __PREVENT_DUMP_MEM_ARR__
+#define __PREVENT_PXP_GLOBAL_WIN__
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include "qed_fcoe.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_ll2.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+#include "qed_sriov.h"
+#include <linux/qed/qed_fcoe_if.h>
+
+struct qed_fcoe_conn {
+       struct list_head list_entry;
+       bool free_on_delete;
+
+       u16 conn_id;
+       u32 icid;
+       u32 fw_cid;
+       u8 layer_code;
+
+       dma_addr_t sq_pbl_addr;
+       dma_addr_t sq_curr_page_addr;
+       dma_addr_t sq_next_page_addr;
+       dma_addr_t xferq_pbl_addr;
+       void *xferq_pbl_addr_virt_addr;
+       dma_addr_t xferq_addr[4];
+       void *xferq_addr_virt_addr[4];
+       dma_addr_t confq_pbl_addr;
+       void *confq_pbl_addr_virt_addr;
+       dma_addr_t confq_addr[2];
+       void *confq_addr_virt_addr[2];
+
+       dma_addr_t terminate_params;
+
+       u16 dst_mac_addr_lo;
+       u16 dst_mac_addr_mid;
+       u16 dst_mac_addr_hi;
+       u16 src_mac_addr_lo;
+       u16 src_mac_addr_mid;
+       u16 src_mac_addr_hi;
+
+       u16 tx_max_fc_pay_len;
+       u16 e_d_tov_timer_val;
+       u16 rec_tov_timer_val;
+       u16 rx_max_fc_pay_len;
+       u16 vlan_tag;
+       u16 physical_q0;
+
+       struct fc_addr_nw s_id;
+       u8 max_conc_seqs_c3;
+       struct fc_addr_nw d_id;
+       u8 flags;
+       u8 def_q_idx;
+};
+
+static int
+qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
+                      enum spq_mode comp_mode,
+                      struct qed_spq_comp_cb *p_comp_addr)
+{
+       struct qed_fcoe_pf_params *fcoe_pf_params = NULL;
+       struct fcoe_init_ramrod_params *p_ramrod = NULL;
+       struct fcoe_init_func_ramrod_data *p_data;
+       struct fcoe_conn_context *p_cxt = NULL;
+       struct qed_spq_entry *p_ent = NULL;
+       struct qed_sp_init_data init_data;
+       struct qed_cxt_info cxt_info;
+       u32 dummy_cid;
+       int rc = 0;
+       u16 tmp;
+       u8 i;
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = qed_spq_get_cid(p_hwfn);
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = comp_mode;
+       init_data.p_comp_data = p_comp_addr;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                FCOE_RAMROD_CMD_ID_INIT_FUNC,
+                                PROTOCOLID_FCOE, &init_data);
+       if (rc)
+               return rc;
+
+       p_ramrod = &p_ent->ramrod.fcoe_init;
+       p_data = &p_ramrod->init_ramrod_data;
+       fcoe_pf_params = &p_hwfn->pf_params.fcoe_pf_params;
+
+       p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu);
+       tmp = cpu_to_le16(fcoe_pf_params->sq_num_pbl_pages);
+       p_data->sq_num_pages_in_pbl = tmp;
+
+       rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid);
+       if (rc)
+               return rc;
+
+       cxt_info.iid = dummy_cid;
+       rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
+       if (rc) {
+               DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n",
+                         dummy_cid);
+               return rc;
+       }
+       p_cxt = cxt_info.p_cxt;
+       SET_FIELD(p_cxt->tstorm_ag_context.flags3,
+                 TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
+
+       fcoe_pf_params->dummy_icid = (u16)dummy_cid;
+
+       tmp = cpu_to_le16(fcoe_pf_params->num_tasks);
+       p_data->func_params.num_tasks = tmp;
+       p_data->func_params.log_page_size = fcoe_pf_params->log_page_size;
+       p_data->func_params.debug_mode = fcoe_pf_params->debug_mode;
+
+       DMA_REGPAIR_LE(p_data->q_params.glbl_q_params_addr,
+                      fcoe_pf_params->glbl_q_params_addr);
+
+       tmp = cpu_to_le16(fcoe_pf_params->cq_num_entries);
+       p_data->q_params.cq_num_entries = tmp;
+
+       tmp = cpu_to_le16(fcoe_pf_params->cmdq_num_entries);
+       p_data->q_params.cmdq_num_entries = tmp;
+
+       tmp = fcoe_pf_params->num_cqs;
+       p_data->q_params.num_queues = (u8)tmp;
+
+       tmp = (u16)p_hwfn->hw_info.resc_start[QED_CMDQS_CQS];
+       p_data->q_params.queue_relative_offset = (u8)tmp;
+
+       for (i = 0; i < fcoe_pf_params->num_cqs; i++) {
+               tmp = cpu_to_le16(p_hwfn->sbs_info[i]->igu_sb_id);
+               p_data->q_params.cq_cmdq_sb_num_arr[i] = tmp;
+       }
+
+       p_data->q_params.cq_sb_pi = fcoe_pf_params->gl_rq_pi;
+       p_data->q_params.cmdq_sb_pi = fcoe_pf_params->gl_cmd_pi;
+
+       p_data->q_params.bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ);
+
+       DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_RQ],
+                      fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_RQ]);
+       p_data->q_params.bdq_pbl_num_entries[BDQ_ID_RQ] =
+           fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_RQ];
+       tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_RQ];
+       p_data->q_params.bdq_xoff_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp);
+       tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_RQ];
+       p_data->q_params.bdq_xon_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp);
+
+       DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_IMM_DATA],
+                      fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_IMM_DATA]);
+       p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA] =
+           fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_IMM_DATA];
+       tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_IMM_DATA];
+       p_data->q_params.bdq_xoff_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp);
+       tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_IMM_DATA];
+       p_data->q_params.bdq_xon_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp);
+       tmp = fcoe_pf_params->rq_buffer_size;
+       p_data->q_params.rq_buffer_size = cpu_to_le16(tmp);
+
+       if (fcoe_pf_params->is_target) {
+               SET_FIELD(p_data->q_params.q_validity,
+                         SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1);
+               if (p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA])
+                       SET_FIELD(p_data->q_params.q_validity,
+                                 SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID, 1);
+               SET_FIELD(p_data->q_params.q_validity,
+                         SCSI_INIT_FUNC_QUEUES_CMD_VALID, 1);
+       } else {
+               SET_FIELD(p_data->q_params.q_validity,
+                         SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1);
+       }
+
+       rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+       return rc;
+}
+
+static int
+qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn,
+                        struct qed_fcoe_conn *p_conn,
+                        enum spq_mode comp_mode,
+                        struct qed_spq_comp_cb *p_comp_addr)
+{
+       struct fcoe_conn_offload_ramrod_params *p_ramrod = NULL;
+       struct fcoe_conn_offload_ramrod_data *p_data;
+       struct qed_spq_entry *p_ent = NULL;
+       struct qed_sp_init_data init_data;
+       u16 physical_q0, tmp;
+       int rc;
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = p_conn->icid;
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = comp_mode;
+       init_data.p_comp_data = p_comp_addr;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                FCOE_RAMROD_CMD_ID_OFFLOAD_CONN,
+                                PROTOCOLID_FCOE, &init_data);
+       if (rc)
+               return rc;
+
+       p_ramrod = &p_ent->ramrod.fcoe_conn_ofld;
+       p_data = &p_ramrod->offload_ramrod_data;
+
+       /* Transmission PQ is the first of the PF */
+       physical_q0 = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+       p_conn->physical_q0 = cpu_to_le16(physical_q0);
+       p_data->physical_q0 = cpu_to_le16(physical_q0);
+
+       p_data->conn_id = cpu_to_le16(p_conn->conn_id);
+       DMA_REGPAIR_LE(p_data->sq_pbl_addr, p_conn->sq_pbl_addr);
+       DMA_REGPAIR_LE(p_data->sq_curr_page_addr, p_conn->sq_curr_page_addr);
+       DMA_REGPAIR_LE(p_data->sq_next_page_addr, p_conn->sq_next_page_addr);
+       DMA_REGPAIR_LE(p_data->xferq_pbl_addr, p_conn->xferq_pbl_addr);
+       DMA_REGPAIR_LE(p_data->xferq_curr_page_addr, p_conn->xferq_addr[0]);
+       DMA_REGPAIR_LE(p_data->xferq_next_page_addr, p_conn->xferq_addr[1]);
+
+       DMA_REGPAIR_LE(p_data->respq_pbl_addr, p_conn->confq_pbl_addr);
+       DMA_REGPAIR_LE(p_data->respq_curr_page_addr, p_conn->confq_addr[0]);
+       DMA_REGPAIR_LE(p_data->respq_next_page_addr, p_conn->confq_addr[1]);
+
+       p_data->dst_mac_addr_lo = cpu_to_le16(p_conn->dst_mac_addr_lo);
+       p_data->dst_mac_addr_mid = cpu_to_le16(p_conn->dst_mac_addr_mid);
+       p_data->dst_mac_addr_hi = cpu_to_le16(p_conn->dst_mac_addr_hi);
+       p_data->src_mac_addr_lo = cpu_to_le16(p_conn->src_mac_addr_lo);
+       p_data->src_mac_addr_mid = cpu_to_le16(p_conn->src_mac_addr_mid);
+       p_data->src_mac_addr_hi = cpu_to_le16(p_conn->src_mac_addr_hi);
+
+       tmp = cpu_to_le16(p_conn->tx_max_fc_pay_len);
+       p_data->tx_max_fc_pay_len = tmp;
+       tmp = cpu_to_le16(p_conn->e_d_tov_timer_val);
+       p_data->e_d_tov_timer_val = tmp;
+       tmp = cpu_to_le16(p_conn->rec_tov_timer_val);
+       p_data->rec_rr_tov_timer_val = tmp;
+       tmp = cpu_to_le16(p_conn->rx_max_fc_pay_len);
+       p_data->rx_max_fc_pay_len = tmp;
+
+       p_data->vlan_tag = cpu_to_le16(p_conn->vlan_tag);
+       p_data->s_id.addr_hi = p_conn->s_id.addr_hi;
+       p_data->s_id.addr_mid = p_conn->s_id.addr_mid;
+       p_data->s_id.addr_lo = p_conn->s_id.addr_lo;
+       p_data->max_conc_seqs_c3 = p_conn->max_conc_seqs_c3;
+       p_data->d_id.addr_hi = p_conn->d_id.addr_hi;
+       p_data->d_id.addr_mid = p_conn->d_id.addr_mid;
+       p_data->d_id.addr_lo = p_conn->d_id.addr_lo;
+       p_data->flags = p_conn->flags;
+       p_data->def_q_idx = p_conn->def_q_idx;
+
+       return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_sp_fcoe_conn_destroy(struct qed_hwfn *p_hwfn,
+                        struct qed_fcoe_conn *p_conn,
+                        enum spq_mode comp_mode,
+                        struct qed_spq_comp_cb *p_comp_addr)
+{
+       struct fcoe_conn_terminate_ramrod_params *p_ramrod = NULL;
+       struct qed_spq_entry *p_ent = NULL;
+       struct qed_sp_init_data init_data;
+       int rc = 0;
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = p_conn->icid;
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = comp_mode;
+       init_data.p_comp_data = p_comp_addr;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                FCOE_RAMROD_CMD_ID_TERMINATE_CONN,
+                                PROTOCOLID_FCOE, &init_data);
+       if (rc)
+               return rc;
+
+       p_ramrod = &p_ent->ramrod.fcoe_conn_terminate;
+       DMA_REGPAIR_LE(p_ramrod->terminate_ramrod_data.terminate_params_addr,
+                      p_conn->terminate_params);
+
+       return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_sp_fcoe_func_stop(struct qed_hwfn *p_hwfn,
+                     struct qed_ptt *p_ptt,
+                     enum spq_mode comp_mode,
+                     struct qed_spq_comp_cb *p_comp_addr)
+{
+       struct qed_spq_entry *p_ent = NULL;
+       struct qed_sp_init_data init_data;
+       u32 active_segs = 0;
+       int rc = 0;
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = p_hwfn->pf_params.fcoe_pf_params.dummy_icid;
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = comp_mode;
+       init_data.p_comp_data = p_comp_addr;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                FCOE_RAMROD_CMD_ID_DESTROY_FUNC,
+                                PROTOCOLID_FCOE, &init_data);
+       if (rc)
+               return rc;
+
+       active_segs = qed_rd(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK);
+       active_segs &= ~BIT(QED_CXT_FCOE_TID_SEG);
+       qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, active_segs);
+
+       return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_fcoe_allocate_connection(struct qed_hwfn *p_hwfn,
+                            struct qed_fcoe_conn **p_out_conn)
+{
+       struct qed_fcoe_conn *p_conn = NULL;
+       void *p_addr;
+       u32 i;
+
+       spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
+       if (!list_empty(&p_hwfn->p_fcoe_info->free_list))
+               p_conn =
+                   list_first_entry(&p_hwfn->p_fcoe_info->free_list,
+                                    struct qed_fcoe_conn, list_entry);
+       if (p_conn) {
+               list_del(&p_conn->list_entry);
+               spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
+               *p_out_conn = p_conn;
+               return 0;
+       }
+       spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
+
+       p_conn = kzalloc(sizeof(*p_conn), GFP_KERNEL);
+       if (!p_conn)
+               return -ENOMEM;
+
+       p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+                                   QED_CHAIN_PAGE_SIZE,
+                                   &p_conn->xferq_pbl_addr, GFP_KERNEL);
+       if (!p_addr)
+               goto nomem_pbl_xferq;
+       p_conn->xferq_pbl_addr_virt_addr = p_addr;
+
+       for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) {
+               p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+                                           QED_CHAIN_PAGE_SIZE,
+                                           &p_conn->xferq_addr[i], GFP_KERNEL);
+               if (!p_addr)
+                       goto nomem_xferq;
+               p_conn->xferq_addr_virt_addr[i] = p_addr;
+
+               p_addr = p_conn->xferq_pbl_addr_virt_addr;
+               ((dma_addr_t *)p_addr)[i] = p_conn->xferq_addr[i];
+       }
+
+       p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+                                   QED_CHAIN_PAGE_SIZE,
+                                   &p_conn->confq_pbl_addr, GFP_KERNEL);
+       if (!p_addr)
+               goto nomem_xferq;
+       p_conn->confq_pbl_addr_virt_addr = p_addr;
+
+       for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) {
+               p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+                                           QED_CHAIN_PAGE_SIZE,
+                                           &p_conn->confq_addr[i], GFP_KERNEL);
+               if (!p_addr)
+                       goto nomem_confq;
+               p_conn->confq_addr_virt_addr[i] = p_addr;
+
+               p_addr = p_conn->confq_pbl_addr_virt_addr;
+               ((dma_addr_t *)p_addr)[i] = p_conn->confq_addr[i];
+       }
+
+       p_conn->free_on_delete = true;
+       *p_out_conn = p_conn;
+       return 0;
+
+nomem_confq:
+       dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                         QED_CHAIN_PAGE_SIZE,
+                         p_conn->confq_pbl_addr_virt_addr,
+                         p_conn->confq_pbl_addr);
+       for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++)
+               if (p_conn->confq_addr_virt_addr[i])
+                       dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                                         QED_CHAIN_PAGE_SIZE,
+                                         p_conn->confq_addr_virt_addr[i],
+                                         p_conn->confq_addr[i]);
+nomem_xferq:
+       dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                         QED_CHAIN_PAGE_SIZE,
+                         p_conn->xferq_pbl_addr_virt_addr,
+                         p_conn->xferq_pbl_addr);
+       for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++)
+               if (p_conn->xferq_addr_virt_addr[i])
+                       dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                                         QED_CHAIN_PAGE_SIZE,
+                                         p_conn->xferq_addr_virt_addr[i],
+                                         p_conn->xferq_addr[i]);
+nomem_pbl_xferq:
+       kfree(p_conn);
+       return -ENOMEM;
+}
+
+static void qed_fcoe_free_connection(struct qed_hwfn *p_hwfn,
+                                    struct qed_fcoe_conn *p_conn)
+{
+       u32 i;
+
+       if (!p_conn)
+               return;
+
+       if (p_conn->confq_pbl_addr_virt_addr)
+               dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                                 QED_CHAIN_PAGE_SIZE,
+                                 p_conn->confq_pbl_addr_virt_addr,
+                                 p_conn->confq_pbl_addr);
+
+       for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) {
+               if (!p_conn->confq_addr_virt_addr[i])
+                       continue;
+               dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                                 QED_CHAIN_PAGE_SIZE,
+                                 p_conn->confq_addr_virt_addr[i],
+                                 p_conn->confq_addr[i]);
+       }
+
+       if (p_conn->xferq_pbl_addr_virt_addr)
+               dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                                 QED_CHAIN_PAGE_SIZE,
+                                 p_conn->xferq_pbl_addr_virt_addr,
+                                 p_conn->xferq_pbl_addr);
+
+       for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) {
+               if (!p_conn->xferq_addr_virt_addr[i])
+                       continue;
+               dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                                 QED_CHAIN_PAGE_SIZE,
+                                 p_conn->xferq_addr_virt_addr[i],
+                                 p_conn->xferq_addr[i]);
+       }
+       kfree(p_conn);
+}
+
+static void __iomem *qed_fcoe_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
+{
+       return (u8 __iomem *)p_hwfn->doorbells +
+              qed_db_addr(cid, DQ_DEMS_LEGACY);
+}
+
+static void __iomem *qed_fcoe_get_primary_bdq_prod(struct qed_hwfn *p_hwfn,
+                                                  u8 bdq_id)
+{
+       if (RESC_NUM(p_hwfn, QED_BDQ)) {
+               return (u8 __iomem *)p_hwfn->regview +
+                      GTT_BAR0_MAP_REG_MSDM_RAM +
+                      MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
+                                                                 QED_BDQ),
+                                                      bdq_id);
+       } else {
+               DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
+               return NULL;
+       }
+}
+
+static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
+                                                    u8 bdq_id)
+{
+       if (RESC_NUM(p_hwfn, QED_BDQ)) {
+               return (u8 __iomem *)p_hwfn->regview +
+                      GTT_BAR0_MAP_REG_TSDM_RAM +
+                      TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
+                                                                 QED_BDQ),
+                                                      bdq_id);
+       } else {
+               DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
+               return NULL;
+       }
+}
+
+struct qed_fcoe_info *qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
+{
+       struct qed_fcoe_info *p_fcoe_info;
+
+       /* Allocate LL2's set struct */
+       p_fcoe_info = kzalloc(sizeof(*p_fcoe_info), GFP_KERNEL);
+       if (!p_fcoe_info) {
+               DP_NOTICE(p_hwfn, "Failed to allocate qed_fcoe_info'\n");
+               return NULL;
+       }
+       INIT_LIST_HEAD(&p_fcoe_info->free_list);
+       return p_fcoe_info;
+}
+
+void qed_fcoe_setup(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info)
+{
+       struct fcoe_task_context *p_task_ctx = NULL;
+       int rc;
+       u32 i;
+
+       spin_lock_init(&p_fcoe_info->lock);
+       for (i = 0; i < p_hwfn->pf_params.fcoe_pf_params.num_tasks; i++) {
+               rc = qed_cxt_get_task_ctx(p_hwfn, i,
+                                         QED_CTX_WORKING_MEM,
+                                         (void **)&p_task_ctx);
+               if (rc)
+                       continue;
+
+               memset(p_task_ctx, 0, sizeof(struct fcoe_task_context));
+               SET_FIELD(p_task_ctx->timer_context.logical_client_0,
+                         TIMERS_CONTEXT_VALIDLC0, 1);
+               SET_FIELD(p_task_ctx->timer_context.logical_client_1,
+                         TIMERS_CONTEXT_VALIDLC1, 1);
+               SET_FIELD(p_task_ctx->tstorm_ag_context.flags0,
+                         TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
+       }
+}
+
+void qed_fcoe_free(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info)
+{
+       struct qed_fcoe_conn *p_conn = NULL;
+
+       if (!p_fcoe_info)
+               return;
+
+       while (!list_empty(&p_fcoe_info->free_list)) {
+               p_conn = list_first_entry(&p_fcoe_info->free_list,
+                                         struct qed_fcoe_conn, list_entry);
+               if (!p_conn)
+                       break;
+               list_del(&p_conn->list_entry);
+               qed_fcoe_free_connection(p_hwfn, p_conn);
+       }
+
+       kfree(p_fcoe_info);
+}
+
+static int
+qed_fcoe_acquire_connection(struct qed_hwfn *p_hwfn,
+                           struct qed_fcoe_conn *p_in_conn,
+                           struct qed_fcoe_conn **p_out_conn)
+{
+       struct qed_fcoe_conn *p_conn = NULL;
+       int rc = 0;
+       u32 icid;
+
+       spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
+       rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &icid);
+       spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
+       if (rc)
+               return rc;
+
+       /* Use input connection [if provided] or allocate a new one */
+       if (p_in_conn) {
+               p_conn = p_in_conn;
+       } else {
+               rc = qed_fcoe_allocate_connection(p_hwfn, &p_conn);
+               if (rc) {
+                       spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
+                       qed_cxt_release_cid(p_hwfn, icid);
+                       spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
+                       return rc;
+               }
+       }
+
+       p_conn->icid = icid;
+       p_conn->fw_cid = (p_hwfn->hw_info.opaque_fid << 16) | icid;
+       *p_out_conn = p_conn;
+
+       return rc;
+}
+
+static void qed_fcoe_release_connection(struct qed_hwfn *p_hwfn,
+                                       struct qed_fcoe_conn *p_conn)
+{
+       spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
+       list_add_tail(&p_conn->list_entry, &p_hwfn->p_fcoe_info->free_list);
+       qed_cxt_release_cid(p_hwfn, p_conn->icid);
+       spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
+}
+
+static void _qed_fcoe_get_tstats(struct qed_hwfn *p_hwfn,
+                                struct qed_ptt *p_ptt,
+                                struct qed_fcoe_stats *p_stats)
+{
+       struct fcoe_rx_stat tstats;
+       u32 tstats_addr;
+
+       memset(&tstats, 0, sizeof(tstats));
+       tstats_addr = BAR0_MAP_REG_TSDM_RAM +
+           TSTORM_FCOE_RX_STATS_OFFSET(p_hwfn->rel_pf_id);
+       qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
+
+       p_stats->fcoe_rx_byte_cnt = HILO_64_REGPAIR(tstats.fcoe_rx_byte_cnt);
+       p_stats->fcoe_rx_data_pkt_cnt =
+           HILO_64_REGPAIR(tstats.fcoe_rx_data_pkt_cnt);
+       p_stats->fcoe_rx_xfer_pkt_cnt =
+           HILO_64_REGPAIR(tstats.fcoe_rx_xfer_pkt_cnt);
+       p_stats->fcoe_rx_other_pkt_cnt =
+           HILO_64_REGPAIR(tstats.fcoe_rx_other_pkt_cnt);
+
+       p_stats->fcoe_silent_drop_pkt_cmdq_full_cnt =
+           le32_to_cpu(tstats.fcoe_silent_drop_pkt_cmdq_full_cnt);
+       p_stats->fcoe_silent_drop_pkt_rq_full_cnt =
+           le32_to_cpu(tstats.fcoe_silent_drop_pkt_rq_full_cnt);
+       p_stats->fcoe_silent_drop_pkt_crc_error_cnt =
+           le32_to_cpu(tstats.fcoe_silent_drop_pkt_crc_error_cnt);
+       p_stats->fcoe_silent_drop_pkt_task_invalid_cnt =
+           le32_to_cpu(tstats.fcoe_silent_drop_pkt_task_invalid_cnt);
+       p_stats->fcoe_silent_drop_total_pkt_cnt =
+           le32_to_cpu(tstats.fcoe_silent_drop_total_pkt_cnt);
+}
+
+static void _qed_fcoe_get_pstats(struct qed_hwfn *p_hwfn,
+                                struct qed_ptt *p_ptt,
+                                struct qed_fcoe_stats *p_stats)
+{
+       struct fcoe_tx_stat pstats;
+       u32 pstats_addr;
+
+       memset(&pstats, 0, sizeof(pstats));
+       pstats_addr = BAR0_MAP_REG_PSDM_RAM +
+           PSTORM_FCOE_TX_STATS_OFFSET(p_hwfn->rel_pf_id);
+       qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
+
+       p_stats->fcoe_tx_byte_cnt = HILO_64_REGPAIR(pstats.fcoe_tx_byte_cnt);
+       p_stats->fcoe_tx_data_pkt_cnt =
+           HILO_64_REGPAIR(pstats.fcoe_tx_data_pkt_cnt);
+       p_stats->fcoe_tx_xfer_pkt_cnt =
+           HILO_64_REGPAIR(pstats.fcoe_tx_xfer_pkt_cnt);
+       p_stats->fcoe_tx_other_pkt_cnt =
+           HILO_64_REGPAIR(pstats.fcoe_tx_other_pkt_cnt);
+}
+
+static int qed_fcoe_get_stats(struct qed_hwfn *p_hwfn,
+                             struct qed_fcoe_stats *p_stats)
+{
+       struct qed_ptt *p_ptt;
+
+       memset(p_stats, 0, sizeof(*p_stats));
+
+       p_ptt = qed_ptt_acquire(p_hwfn);
+
+       if (!p_ptt) {
+               DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+               return -EINVAL;
+       }
+
+       _qed_fcoe_get_tstats(p_hwfn, p_ptt, p_stats);
+       _qed_fcoe_get_pstats(p_hwfn, p_ptt, p_stats);
+
+       qed_ptt_release(p_hwfn, p_ptt);
+
+       return 0;
+}
+
+struct qed_hash_fcoe_con {
+       struct hlist_node node;
+       struct qed_fcoe_conn *con;
+};
+
+static int qed_fill_fcoe_dev_info(struct qed_dev *cdev,
+                                 struct qed_dev_fcoe_info *info)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       int rc;
+
+       memset(info, 0, sizeof(*info));
+       rc = qed_fill_dev_info(cdev, &info->common);
+
+       info->primary_dbq_rq_addr =
+           qed_fcoe_get_primary_bdq_prod(hwfn, BDQ_ID_RQ);
+       info->secondary_bdq_rq_addr =
+           qed_fcoe_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ);
+
+       return rc;
+}
+
+static void qed_register_fcoe_ops(struct qed_dev *cdev,
+                                 struct qed_fcoe_cb_ops *ops, void *cookie)
+{
+       cdev->protocol_ops.fcoe = ops;
+       cdev->ops_cookie = cookie;
+}
+
+static struct qed_hash_fcoe_con *qed_fcoe_get_hash(struct qed_dev *cdev,
+                                                  u32 handle)
+{
+       struct qed_hash_fcoe_con *hash_con = NULL;
+
+       if (!(cdev->flags & QED_FLAG_STORAGE_STARTED))
+               return NULL;
+
+       hash_for_each_possible(cdev->connections, hash_con, node, handle) {
+               if (hash_con->con->icid == handle)
+                       break;
+       }
+
+       if (!hash_con || (hash_con->con->icid != handle))
+               return NULL;
+
+       return hash_con;
+}
+
+static int qed_fcoe_stop(struct qed_dev *cdev)
+{
+       struct qed_ptt *p_ptt;
+       int rc;
+
+       if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) {
+               DP_NOTICE(cdev, "fcoe already stopped\n");
+               return 0;
+       }
+
+       if (!hash_empty(cdev->connections)) {
+               DP_NOTICE(cdev,
+                         "Can't stop fcoe - not all connections were returned\n");
+               return -EINVAL;
+       }
+
+       p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+       if (!p_ptt)
+               return -EAGAIN;
+
+       /* Stop the fcoe */
+       rc = qed_sp_fcoe_func_stop(QED_LEADING_HWFN(cdev), p_ptt,
+                                  QED_SPQ_MODE_EBLOCK, NULL);
+       cdev->flags &= ~QED_FLAG_STORAGE_STARTED;
+       qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
+
+       return rc;
+}
+
+static int qed_fcoe_start(struct qed_dev *cdev, struct qed_fcoe_tid *tasks)
+{
+       int rc;
+
+       if (cdev->flags & QED_FLAG_STORAGE_STARTED) {
+               DP_NOTICE(cdev, "fcoe already started;\n");
+               return 0;
+       }
+
+       rc = qed_sp_fcoe_func_start(QED_LEADING_HWFN(cdev),
+                                   QED_SPQ_MODE_EBLOCK, NULL);
+       if (rc) {
+               DP_NOTICE(cdev, "Failed to start fcoe\n");
+               return rc;
+       }
+
+       cdev->flags |= QED_FLAG_STORAGE_STARTED;
+       hash_init(cdev->connections);
+
+       if (tasks) {
+               struct qed_tid_mem *tid_info = kzalloc(sizeof(*tid_info),
+                                                      GFP_ATOMIC);
+
+               if (!tid_info) {
+                       DP_NOTICE(cdev,
+                                 "Failed to allocate tasks information\n");
+                       qed_fcoe_stop(cdev);
+                       return -ENOMEM;
+               }
+
+               rc = qed_cxt_get_tid_mem_info(QED_LEADING_HWFN(cdev), tid_info);
+               if (rc) {
+                       DP_NOTICE(cdev, "Failed to gather task information\n");
+                       qed_fcoe_stop(cdev);
+                       kfree(tid_info);
+                       return rc;
+               }
+
+               /* Fill task information */
+               tasks->size = tid_info->tid_size;
+               tasks->num_tids_per_block = tid_info->num_tids_per_block;
+               memcpy(tasks->blocks, tid_info->blocks,
+                      MAX_TID_BLOCKS_FCOE * sizeof(u8 *));
+
+               kfree(tid_info);
+       }
+
+       return 0;
+}
+
+static int qed_fcoe_acquire_conn(struct qed_dev *cdev,
+                                u32 *handle,
+                                u32 *fw_cid, void __iomem **p_doorbell)
+{
+       struct qed_hash_fcoe_con *hash_con;
+       int rc;
+
+       /* Allocate a hashed connection */
+       hash_con = kzalloc(sizeof(*hash_con), GFP_KERNEL);
+       if (!hash_con) {
+               DP_NOTICE(cdev, "Failed to allocate hashed connection\n");
+               return -ENOMEM;
+       }
+
+       /* Acquire the connection */
+       rc = qed_fcoe_acquire_connection(QED_LEADING_HWFN(cdev), NULL,
+                                        &hash_con->con);
+       if (rc) {
+               DP_NOTICE(cdev, "Failed to acquire Connection\n");
+               kfree(hash_con);
+               return rc;
+       }
+
+       /* Added the connection to hash table */
+       *handle = hash_con->con->icid;
+       *fw_cid = hash_con->con->fw_cid;
+       hash_add(cdev->connections, &hash_con->node, *handle);
+
+       if (p_doorbell)
+               *p_doorbell = qed_fcoe_get_db_addr(QED_LEADING_HWFN(cdev),
+                                                  *handle);
+
+       return 0;
+}
+
+static int qed_fcoe_release_conn(struct qed_dev *cdev, u32 handle)
+{
+       struct qed_hash_fcoe_con *hash_con;
+
+       hash_con = qed_fcoe_get_hash(cdev, handle);
+       if (!hash_con) {
+               DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+                         handle);
+               return -EINVAL;
+       }
+
+       hlist_del(&hash_con->node);
+       qed_fcoe_release_connection(QED_LEADING_HWFN(cdev), hash_con->con);
+       kfree(hash_con);
+
+       return 0;
+}
+
+static int qed_fcoe_offload_conn(struct qed_dev *cdev,
+                                u32 handle,
+                                struct qed_fcoe_params_offload *conn_info)
+{
+       struct qed_hash_fcoe_con *hash_con;
+       struct qed_fcoe_conn *con;
+
+       hash_con = qed_fcoe_get_hash(cdev, handle);
+       if (!hash_con) {
+               DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+                         handle);
+               return -EINVAL;
+       }
+
+       /* Update the connection with information from the params */
+       con = hash_con->con;
+
+       con->sq_pbl_addr = conn_info->sq_pbl_addr;
+       con->sq_curr_page_addr = conn_info->sq_curr_page_addr;
+       con->sq_next_page_addr = conn_info->sq_next_page_addr;
+       con->tx_max_fc_pay_len = conn_info->tx_max_fc_pay_len;
+       con->e_d_tov_timer_val = conn_info->e_d_tov_timer_val;
+       con->rec_tov_timer_val = conn_info->rec_tov_timer_val;
+       con->rx_max_fc_pay_len = conn_info->rx_max_fc_pay_len;
+       con->vlan_tag = conn_info->vlan_tag;
+       con->max_conc_seqs_c3 = conn_info->max_conc_seqs_c3;
+       con->flags = conn_info->flags;
+       con->def_q_idx = conn_info->def_q_idx;
+
+       con->src_mac_addr_hi = (conn_info->src_mac[5] << 8) |
+           conn_info->src_mac[4];
+       con->src_mac_addr_mid = (conn_info->src_mac[3] << 8) |
+           conn_info->src_mac[2];
+       con->src_mac_addr_lo = (conn_info->src_mac[1] << 8) |
+           conn_info->src_mac[0];
+       con->dst_mac_addr_hi = (conn_info->dst_mac[5] << 8) |
+           conn_info->dst_mac[4];
+       con->dst_mac_addr_mid = (conn_info->dst_mac[3] << 8) |
+           conn_info->dst_mac[2];
+       con->dst_mac_addr_lo = (conn_info->dst_mac[1] << 8) |
+           conn_info->dst_mac[0];
+
+       con->s_id.addr_hi = conn_info->s_id.addr_hi;
+       con->s_id.addr_mid = conn_info->s_id.addr_mid;
+       con->s_id.addr_lo = conn_info->s_id.addr_lo;
+       con->d_id.addr_hi = conn_info->d_id.addr_hi;
+       con->d_id.addr_mid = conn_info->d_id.addr_mid;
+       con->d_id.addr_lo = conn_info->d_id.addr_lo;
+
+       return qed_sp_fcoe_conn_offload(QED_LEADING_HWFN(cdev), con,
+                                       QED_SPQ_MODE_EBLOCK, NULL);
+}
+
+static int qed_fcoe_destroy_conn(struct qed_dev *cdev,
+                                u32 handle, dma_addr_t terminate_params)
+{
+       struct qed_hash_fcoe_con *hash_con;
+       struct qed_fcoe_conn *con;
+
+       hash_con = qed_fcoe_get_hash(cdev, handle);
+       if (!hash_con) {
+               DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+                         handle);
+               return -EINVAL;
+       }
+
+       /* Update the connection with information from the params */
+       con = hash_con->con;
+       con->terminate_params = terminate_params;
+
+       return qed_sp_fcoe_conn_destroy(QED_LEADING_HWFN(cdev), con,
+                                       QED_SPQ_MODE_EBLOCK, NULL);
+}
+
+static int qed_fcoe_stats(struct qed_dev *cdev, struct qed_fcoe_stats *stats)
+{
+       return qed_fcoe_get_stats(QED_LEADING_HWFN(cdev), stats);
+}
+
+void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
+                                struct qed_mcp_fcoe_stats *stats)
+{
+       struct qed_fcoe_stats proto_stats;
+
+       /* Retrieve FW statistics */
+       memset(&proto_stats, 0, sizeof(proto_stats));
+       if (qed_fcoe_stats(cdev, &proto_stats)) {
+               DP_VERBOSE(cdev, QED_MSG_STORAGE,
+                          "Failed to collect FCoE statistics\n");
+               return;
+       }
+
+       /* Translate FW statistics into struct */
+       stats->rx_pkts = proto_stats.fcoe_rx_data_pkt_cnt +
+                        proto_stats.fcoe_rx_xfer_pkt_cnt +
+                        proto_stats.fcoe_rx_other_pkt_cnt;
+       stats->tx_pkts = proto_stats.fcoe_tx_data_pkt_cnt +
+                        proto_stats.fcoe_tx_xfer_pkt_cnt +
+                        proto_stats.fcoe_tx_other_pkt_cnt;
+       stats->fcs_err = proto_stats.fcoe_silent_drop_pkt_crc_error_cnt;
+
+       /* Request protocol driver to fill-in the rest */
+       if (cdev->protocol_ops.fcoe && cdev->ops_cookie) {
+               struct qed_fcoe_cb_ops *ops = cdev->protocol_ops.fcoe;
+               void *cookie = cdev->ops_cookie;
+
+               if (ops->get_login_failures)
+                       stats->login_failure = ops->get_login_failures(cookie);
+       }
+}
+
+static const struct qed_fcoe_ops qed_fcoe_ops_pass = {
+       .common = &qed_common_ops_pass,
+       .ll2 = &qed_ll2_ops_pass,
+       .fill_dev_info = &qed_fill_fcoe_dev_info,
+       .start = &qed_fcoe_start,
+       .stop = &qed_fcoe_stop,
+       .register_ops = &qed_register_fcoe_ops,
+       .acquire_conn = &qed_fcoe_acquire_conn,
+       .release_conn = &qed_fcoe_release_conn,
+       .offload_conn = &qed_fcoe_offload_conn,
+       .destroy_conn = &qed_fcoe_destroy_conn,
+       .get_stats = &qed_fcoe_stats,
+};
+
+const struct qed_fcoe_ops *qed_get_fcoe_ops(void)
+{
+       return &qed_fcoe_ops_pass;
+}
+EXPORT_SYMBOL(qed_get_fcoe_ops);
+
+void qed_put_fcoe_ops(void)
+{
+}
+EXPORT_SYMBOL(qed_put_fcoe_ops);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.h b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h
new file mode 100644 (file)
index 0000000..472af34
--- /dev/null
@@ -0,0 +1,87 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _QED_FCOE_H
+#define _QED_FCOE_H
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/qed/qed_fcoe_if.h>
+#include <linux/qed/qed_chain.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_mcp.h"
+#include "qed_sp.h"
+
+struct qed_fcoe_info {
+       spinlock_t lock; /* Connection resources. */
+       struct list_head free_list;
+};
+
+#if IS_ENABLED(CONFIG_QED_FCOE)
+struct qed_fcoe_info *qed_fcoe_alloc(struct qed_hwfn *p_hwfn);
+
+void qed_fcoe_setup(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info);
+
+void qed_fcoe_free(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info);
+void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
+                                struct qed_mcp_fcoe_stats *stats);
+#else /* CONFIG_QED_FCOE */
+static inline struct qed_fcoe_info *
+qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
+{
+       return NULL;
+}
+
+static inline void qed_fcoe_setup(struct qed_hwfn *p_hwfn,
+                                 struct qed_fcoe_info *p_fcoe_info)
+{
+}
+
+static inline void qed_fcoe_free(struct qed_hwfn *p_hwfn,
+                                struct qed_fcoe_info *p_fcoe_info)
+{
+}
+
+static inline void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
+                                              struct qed_mcp_fcoe_stats *stats)
+{
+}
+#endif /* CONFIG_QED_FCOE */
+
+#ifdef CONFIG_QED_LL2
+extern const struct qed_common_ops qed_common_ops_pass;
+extern const struct qed_ll2_ops qed_ll2_ops_pass;
+#endif
+
+#endif /* _QED_FCOE_H */
index 6f9d3b831a2a0d545ef44b3dc6247ae5f141f3d0..858a57a735894d9f7788f30cf8fab386ca965808 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef _QED_HSI_H
 #include <linux/qed/common_hsi.h>
 #include <linux/qed/storage_common.h>
 #include <linux/qed/tcp_common.h>
+#include <linux/qed/fcoe_common.h>
 #include <linux/qed/eth_common.h>
 #include <linux/qed/iscsi_common.h>
 #include <linux/qed/rdma_common.h>
 #include <linux/qed/roce_common.h>
+#include <linux/qed/qed_fcoe_if.h>
 
 struct qed_hwfn;
 struct qed_ptt;
@@ -536,6 +562,256 @@ struct core_conn_context {
        struct regpair ustorm_st_padding[2];
 };
 
+enum core_error_handle {
+       LL2_DROP_PACKET,
+       LL2_DO_NOTHING,
+       LL2_ASSERT,
+       MAX_CORE_ERROR_HANDLE
+};
+
+enum core_event_opcode {
+       CORE_EVENT_TX_QUEUE_START,
+       CORE_EVENT_TX_QUEUE_STOP,
+       CORE_EVENT_RX_QUEUE_START,
+       CORE_EVENT_RX_QUEUE_STOP,
+       CORE_EVENT_RX_QUEUE_FLUSH,
+       MAX_CORE_EVENT_OPCODE
+};
+
+enum core_l4_pseudo_checksum_mode {
+       CORE_L4_PSEUDO_CSUM_CORRECT_LENGTH,
+       CORE_L4_PSEUDO_CSUM_ZERO_LENGTH,
+       MAX_CORE_L4_PSEUDO_CHECKSUM_MODE
+};
+
+struct core_ll2_port_stats {
+       struct regpair gsi_invalid_hdr;
+       struct regpair gsi_invalid_pkt_length;
+       struct regpair gsi_unsupported_pkt_typ;
+       struct regpair gsi_crcchksm_error;
+};
+
+struct core_ll2_pstorm_per_queue_stat {
+       struct regpair sent_ucast_bytes;
+       struct regpair sent_mcast_bytes;
+       struct regpair sent_bcast_bytes;
+       struct regpair sent_ucast_pkts;
+       struct regpair sent_mcast_pkts;
+       struct regpair sent_bcast_pkts;
+};
+
+struct core_ll2_rx_prod {
+       __le16 bd_prod;
+       __le16 cqe_prod;
+       __le32 reserved;
+};
+
+struct core_ll2_tstorm_per_queue_stat {
+       struct regpair packet_too_big_discard;
+       struct regpair no_buff_discard;
+};
+
+struct core_ll2_ustorm_per_queue_stat {
+       struct regpair rcv_ucast_bytes;
+       struct regpair rcv_mcast_bytes;
+       struct regpair rcv_bcast_bytes;
+       struct regpair rcv_ucast_pkts;
+       struct regpair rcv_mcast_pkts;
+       struct regpair rcv_bcast_pkts;
+};
+
+enum core_ramrod_cmd_id {
+       CORE_RAMROD_UNUSED,
+       CORE_RAMROD_RX_QUEUE_START,
+       CORE_RAMROD_TX_QUEUE_START,
+       CORE_RAMROD_RX_QUEUE_STOP,
+       CORE_RAMROD_TX_QUEUE_STOP,
+       CORE_RAMROD_RX_QUEUE_FLUSH,
+       MAX_CORE_RAMROD_CMD_ID
+};
+
+enum core_roce_flavor_type {
+       CORE_ROCE,
+       CORE_RROCE,
+       MAX_CORE_ROCE_FLAVOR_TYPE
+};
+
+struct core_rx_action_on_error {
+       u8 error_type;
+#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_MASK    0x3
+#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0
+#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK   0x3
+#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT  2
+#define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK  0xF
+#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT 4
+};
+
+struct core_rx_bd {
+       struct regpair addr;
+       __le16 reserved[4];
+};
+
+struct core_rx_bd_with_buff_len {
+       struct regpair addr;
+       __le16 buff_length;
+       __le16 reserved[3];
+};
+
+union core_rx_bd_union {
+       struct core_rx_bd rx_bd;
+       struct core_rx_bd_with_buff_len rx_bd_with_len;
+};
+
+struct core_rx_cqe_opaque_data {
+       __le32 data[2];
+};
+
+enum core_rx_cqe_type {
+       CORE_RX_CQE_ILLIGAL_TYPE,
+       CORE_RX_CQE_TYPE_REGULAR,
+       CORE_RX_CQE_TYPE_GSI_OFFLOAD,
+       CORE_RX_CQE_TYPE_SLOW_PATH,
+       MAX_CORE_RX_CQE_TYPE
+};
+
+struct core_rx_fast_path_cqe {
+       u8 type;
+       u8 placement_offset;
+       struct parsing_and_err_flags parse_flags;
+       __le16 packet_length;
+       __le16 vlan;
+       struct core_rx_cqe_opaque_data opaque_data;
+       __le32 reserved[4];
+};
+
+struct core_rx_gsi_offload_cqe {
+       u8 type;
+       u8 data_length_error;
+       struct parsing_and_err_flags parse_flags;
+       __le16 data_length;
+       __le16 vlan;
+       __le32 src_mac_addrhi;
+       __le16 src_mac_addrlo;
+       u8 reserved1[2];
+       __le32 gid_dst[4];
+};
+
+struct core_rx_slow_path_cqe {
+       u8 type;
+       u8 ramrod_cmd_id;
+       __le16 echo;
+       struct core_rx_cqe_opaque_data opaque_data;
+       __le32 reserved1[5];
+};
+
+union core_rx_cqe_union {
+       struct core_rx_fast_path_cqe rx_cqe_fp;
+       struct core_rx_gsi_offload_cqe rx_cqe_gsi;
+       struct core_rx_slow_path_cqe rx_cqe_sp;
+};
+
+struct core_rx_start_ramrod_data {
+       struct regpair bd_base;
+       struct regpair cqe_pbl_addr;
+       __le16 mtu;
+       __le16 sb_id;
+       u8 sb_index;
+       u8 complete_cqe_flg;
+       u8 complete_event_flg;
+       u8 drop_ttl0_flg;
+       __le16 num_of_pbl_pages;
+       u8 inner_vlan_removal_en;
+       u8 queue_id;
+       u8 main_func_queue;
+       u8 mf_si_bcast_accept_all;
+       u8 mf_si_mcast_accept_all;
+       struct core_rx_action_on_error action_on_error;
+       u8 gsi_offload_flag;
+       u8 reserved[7];
+};
+
+struct core_rx_stop_ramrod_data {
+       u8 complete_cqe_flg;
+       u8 complete_event_flg;
+       u8 queue_id;
+       u8 reserved1;
+       __le16 reserved2[2];
+};
+
+struct core_tx_bd_data {
+       __le16 as_bitfield;
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK   0x1
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT     0
+#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK    0x1
+#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT      1
+#define CORE_TX_BD_DATA_START_BD_MASK  0x1
+#define CORE_TX_BD_DATA_START_BD_SHIFT            2
+#define CORE_TX_BD_DATA_IP_CSUM_MASK   0x1
+#define CORE_TX_BD_DATA_IP_CSUM_SHIFT             3
+#define CORE_TX_BD_DATA_L4_CSUM_MASK   0x1
+#define CORE_TX_BD_DATA_L4_CSUM_SHIFT             4
+#define CORE_TX_BD_DATA_IPV6_EXT_MASK  0x1
+#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT            5
+#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK       0x1
+#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT         6
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK       0x1
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7
+#define CORE_TX_BD_DATA_NBDS_MASK      0xF
+#define CORE_TX_BD_DATA_NBDS_SHIFT                8
+#define CORE_TX_BD_DATA_ROCE_FLAV_MASK 0x1
+#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT           12
+#define CORE_TX_BD_DATA_IP_LEN_MASK    0x1
+#define CORE_TX_BD_DATA_IP_LEN_SHIFT              13
+#define CORE_TX_BD_DATA_RESERVED0_MASK            0x3
+#define CORE_TX_BD_DATA_RESERVED0_SHIFT           14
+};
+
+struct core_tx_bd {
+       struct regpair addr;
+       __le16 nbytes;
+       __le16 nw_vlan_or_lb_echo;
+       struct core_tx_bd_data bd_data;
+       __le16 bitfield1;
+#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK        0x3FFF
+#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
+#define CORE_TX_BD_TX_DST_MASK 0x1
+#define CORE_TX_BD_TX_DST_SHIFT        14
+#define CORE_TX_BD_RESERVED_MASK         0x1
+#define CORE_TX_BD_RESERVED_SHIFT        15
+};
+
+enum core_tx_dest {
+       CORE_TX_DEST_NW,
+       CORE_TX_DEST_LB,
+       MAX_CORE_TX_DEST
+};
+
+struct core_tx_start_ramrod_data {
+       struct regpair pbl_base_addr;
+       __le16 mtu;
+       __le16 sb_id;
+       u8 sb_index;
+       u8 stats_en;
+       u8 stats_id;
+       u8 conn_type;
+       __le16 pbl_size;
+       __le16 qm_pq_id;
+       u8 gsi_offload_flag;
+       u8 resrved[3];
+};
+
+struct core_tx_stop_ramrod_data {
+       __le32 reserved0[2];
+};
+
+enum dcb_dhcp_update_flag {
+       DONT_UPDATE_DCB_DHCP,
+       UPDATE_DCB,
+       UPDATE_DSCP,
+       UPDATE_DCB_DSCP,
+       MAX_DCB_DHCP_UPDATE_FLAG
+};
+
 struct eth_mstorm_per_pf_stat {
        struct regpair gre_discard_pkts;
        struct regpair vxlan_discard_pkts;
@@ -629,6 +905,12 @@ union event_ring_element {
        struct event_ring_next_addr next_addr;
 };
 
+enum fw_flow_ctrl_mode {
+       flow_ctrl_pause,
+       flow_ctrl_pfc,
+       MAX_FW_FLOW_CTRL_MODE
+};
+
 /* Major and Minor hsi Versions */
 struct hsi_fp_ver_struct {
        u8 minor_ver_arr[2];
@@ -636,9 +918,34 @@ struct hsi_fp_ver_struct {
 };
 
 /* Mstorm non-triggering VF zone */
+enum malicious_vf_error_id {
+       MALICIOUS_VF_NO_ERROR,
+       VF_PF_CHANNEL_NOT_READY,
+       VF_ZONE_MSG_NOT_VALID,
+       VF_ZONE_FUNC_NOT_ENABLED,
+       ETH_PACKET_TOO_SMALL,
+       ETH_ILLEGAL_VLAN_MODE,
+       ETH_MTU_VIOLATION,
+       ETH_ILLEGAL_INBAND_TAGS,
+       ETH_VLAN_INSERT_AND_INBAND_VLAN,
+       ETH_ILLEGAL_NBDS,
+       ETH_FIRST_BD_WO_SOP,
+       ETH_INSUFFICIENT_BDS,
+       ETH_ILLEGAL_LSO_HDR_NBDS,
+       ETH_ILLEGAL_LSO_MSS,
+       ETH_ZERO_SIZE_BD,
+       ETH_ILLEGAL_LSO_HDR_LEN,
+       ETH_INSUFFICIENT_PAYLOAD,
+       ETH_EDPM_OUT_OF_SYNC,
+       ETH_TUNN_IPV6_EXT_NBD_ERR,
+       ETH_CONTROL_PACKET_VIOLATION,
+       ETH_ANTI_SPOOFING_ERR,
+       MAX_MALICIOUS_VF_ERROR_ID
+};
+
 struct mstorm_non_trigger_vf_zone {
        struct eth_mstorm_per_queue_stat eth_queue_stat;
-       struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF];
+       struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD];
 };
 
 /* Mstorm VF zone */
@@ -651,7 +958,7 @@ struct mstorm_vf_zone {
 enum personality_type {
        BAD_PERSONALITY_TYP,
        PERSONALITY_ISCSI,
-       PERSONALITY_RESERVED2,
+       PERSONALITY_FCOE,
        PERSONALITY_RDMA_AND_ETH,
        PERSONALITY_RESERVED3,
        PERSONALITY_CORE,
@@ -705,13 +1012,17 @@ struct pf_start_ramrod_data {
 
 struct protocol_dcb_data {
        u8 dcb_enable_flag;
+       u8 reserved_a;
        u8 dcb_priority;
        u8 dcb_tc;
-       u8 reserved;
+       u8 reserved_b;
+       u8 reserved0;
 };
 
 struct pf_update_tunnel_config {
        u8 update_rx_pf_clss;
+       u8 update_rx_def_ucast_clss;
+       u8 update_rx_def_non_ucast_clss;
        u8 update_tx_pf_clss;
        u8 set_vxlan_udp_port_flg;
        u8 set_geneve_udp_port_flg;
@@ -727,7 +1038,7 @@ struct pf_update_tunnel_config {
        u8 tunnel_clss_ipgre;
        __le16 vxlan_udp_port;
        __le16 geneve_udp_port;
-       __le16 reserved[3];
+       __le16 reserved[2];
 };
 
 struct pf_update_ramrod_data {
@@ -736,16 +1047,17 @@ struct pf_update_ramrod_data {
        u8 update_fcoe_dcb_data_flag;
        u8 update_iscsi_dcb_data_flag;
        u8 update_roce_dcb_data_flag;
+       u8 update_rroce_dcb_data_flag;
        u8 update_iwarp_dcb_data_flag;
        u8 update_mf_vlan_flag;
-       u8 reserved;
        struct protocol_dcb_data eth_dcb_data;
        struct protocol_dcb_data fcoe_dcb_data;
        struct protocol_dcb_data iscsi_dcb_data;
        struct protocol_dcb_data roce_dcb_data;
+       struct protocol_dcb_data rroce_dcb_data;
        struct protocol_dcb_data iwarp_dcb_data;
        __le16 mf_vlan;
-       __le16 reserved2;
+       __le16 reserved;
        struct pf_update_tunnel_config tunnel_config;
 };
 
@@ -766,10 +1078,14 @@ enum protocol_version_array_key {
        MAX_PROTOCOL_VERSION_ARRAY_KEY
 };
 
-/* Pstorm non-triggering VF zone */
+struct rdma_sent_stats {
+       struct regpair sent_bytes;
+       struct regpair sent_pkts;
+};
+
 struct pstorm_non_trigger_vf_zone {
        struct eth_pstorm_per_queue_stat eth_queue_stat;
-       struct regpair reserved[2];
+       struct rdma_sent_stats rdma_stats;
 };
 
 /* Pstorm VF zone */
@@ -786,7 +1102,11 @@ struct ramrod_header {
        __le16 echo;
 };
 
-/* Slowpath Element (SPQE) */
+struct rdma_rcv_stats {
+       struct regpair rcv_bytes;
+       struct regpair rcv_pkts;
+};
+
 struct slow_path_element {
        struct ramrod_header hdr;
        struct regpair data_ptr;
@@ -794,7 +1114,7 @@ struct slow_path_element {
 
 /* Tstorm non-triggering VF zone */
 struct tstorm_non_trigger_vf_zone {
-       struct regpair reserved[2];
+       struct rdma_rcv_stats rdma_stats;
 };
 
 struct tstorm_per_port_stat {
@@ -802,9 +1122,15 @@ struct tstorm_per_port_stat {
        struct regpair mac_error_discard;
        struct regpair mftag_filter_discard;
        struct regpair eth_mac_filter_discard;
-       struct regpair reserved[5];
+       struct regpair ll2_mac_filter_discard;
+       struct regpair ll2_conn_disabled_discard;
+       struct regpair iscsi_irregular_pkt;
+       struct regpair fcoe_irregular_pkt;
+       struct regpair roce_irregular_pkt;
+       struct regpair reserved;
        struct regpair eth_irregular_pkt;
-       struct regpair reserved1[2];
+       struct regpair reserved1;
+       struct regpair preroce_irregular_pkt;
        struct regpair eth_gre_tunn_filter_discard;
        struct regpair eth_vxlan_tunn_filter_discard;
        struct regpair eth_geneve_tunn_filter_discard;
@@ -870,7 +1196,13 @@ struct vf_stop_ramrod_data {
        __le32 reserved2;
 };
 
-/* Attentions status block */
+enum vf_zone_size_mode {
+       VF_ZONE_SIZE_MODE_DEFAULT,
+       VF_ZONE_SIZE_MODE_DOUBLE,
+       VF_ZONE_SIZE_MODE_QUAD,
+       MAX_VF_ZONE_SIZE_MODE
+};
+
 struct atten_status_block {
        __le32 atten_bits;
        __le32 atten_ack;
@@ -1336,6 +1668,11 @@ enum block_addr {
        GRCBASE_MS = 0x6a0000,
        GRCBASE_PHY_PCIE = 0x620000,
        GRCBASE_LED = 0x6b8000,
+       GRCBASE_AVS_WRAP = 0x6b0000,
+       GRCBASE_RGFS = 0x19d0000,
+       GRCBASE_TGFS = 0x19e0000,
+       GRCBASE_PTLD = 0x19f0000,
+       GRCBASE_YPLD = 0x1a10000,
        GRCBASE_MISC_AEU = 0x8000,
        GRCBASE_BAR0_MAP = 0x1c00000,
        MAX_BLOCK_ADDR
@@ -1420,6 +1757,11 @@ enum block_id {
        BLOCK_MS,
        BLOCK_PHY_PCIE,
        BLOCK_LED,
+       BLOCK_AVS_WRAP,
+       BLOCK_RGFS,
+       BLOCK_TGFS,
+       BLOCK_PTLD,
+       BLOCK_YPLD,
        BLOCK_MISC_AEU,
        BLOCK_BAR0_MAP,
        MAX_BLOCK_ID
@@ -1442,13 +1784,6 @@ enum bin_dbg_buffer_type {
        MAX_BIN_DBG_BUFFER_TYPE
 };
 
-/* Chip IDs */
-enum chip_ids {
-       CHIP_RESERVED,
-       CHIP_BB_B0,
-       CHIP_RESERVED2,
-       MAX_CHIP_IDS
-};
 
 /* Attention bit mapping */
 struct dbg_attn_bit_mapping {
@@ -1478,9 +1813,9 @@ struct dbg_attn_reg_result {
        __le32 data;
 #define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK   0xFFFFFF
 #define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT  0
-#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_MASK  0xFF
-#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_SHIFT 24
-       __le16 attn_idx_offset;
+#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK  0xFF
+#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24
+       __le16 block_attn_offset;
        __le16 reserved;
        __le32 sts_val;
        __le32 mask_val;
@@ -1510,12 +1845,12 @@ struct dbg_mode_hdr {
 /* Attention register */
 struct dbg_attn_reg {
        struct dbg_mode_hdr mode;
-       __le16 attn_idx_offset;
+       __le16 block_attn_offset;
        __le32 data;
 #define DBG_ATTN_REG_STS_ADDRESS_MASK  0xFFFFFF
 #define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0
-#define DBG_ATTN_REG_NUM_ATTN_IDX_MASK 0xFF
-#define DBG_ATTN_REG_NUM_ATTN_IDX_SHIFT        24
+#define DBG_ATTN_REG_NUM_REG_ATTN_MASK 0xFF
+#define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24
        __le32 sts_clr_address;
        __le32 mask_address;
 };
@@ -1527,6 +1862,439 @@ enum dbg_attn_type {
        MAX_DBG_ATTN_TYPE
 };
 
+/* condition header for registers dump */
+struct dbg_dump_cond_hdr {
+       struct dbg_mode_hdr mode; /* Mode header */
+       u8 block_id; /* block ID */
+       u8 data_size; /* size in dwords of the data following this header */
+};
+
+/* memory data for registers dump */
+struct dbg_dump_mem {
+       __le32 dword0;
+#define DBG_DUMP_MEM_ADDRESS_MASK       0xFFFFFF
+#define DBG_DUMP_MEM_ADDRESS_SHIFT      0
+#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK  0xFF
+#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24
+       __le32 dword1;
+#define DBG_DUMP_MEM_LENGTH_MASK        0xFFFFFF
+#define DBG_DUMP_MEM_LENGTH_SHIFT       0
+#define DBG_DUMP_MEM_RESERVED_MASK      0xFF
+#define DBG_DUMP_MEM_RESERVED_SHIFT     24
+};
+
+/* register data for registers dump */
+struct dbg_dump_reg {
+       __le32 data;
+#define DBG_DUMP_REG_ADDRESS_MASK  0xFFFFFF /* register address (in dwords) */
+#define DBG_DUMP_REG_ADDRESS_SHIFT 0
+#define DBG_DUMP_REG_LENGTH_MASK   0xFF /* register size (in dwords) */
+#define DBG_DUMP_REG_LENGTH_SHIFT  24
+};
+
+/* split header for registers dump */
+struct dbg_dump_split_hdr {
+       __le32 hdr;
+#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK      0xFFFFFF
+#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT     0
+#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK  0xFF
+#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24
+};
+
+/* condition header for idle check */
+struct dbg_idle_chk_cond_hdr {
+       struct dbg_mode_hdr mode; /* Mode header */
+       __le16 data_size; /* size in dwords of the data following this header */
+};
+
+/* Idle Check condition register */
+struct dbg_idle_chk_cond_reg {
+       __le32 data;
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK   0xFFFFFF
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT  0
+#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK  0xFF
+#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24
+       __le16 num_entries; /* number of registers entries to check */
+       u8 entry_size; /* size of registers entry (in dwords) */
+       u8 start_entry; /* index of the first entry to check */
+};
+
+/* Idle Check info register */
+struct dbg_idle_chk_info_reg {
+       __le32 data;
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK   0xFFFFFF
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT  0
+#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK  0xFF
+#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24
+       __le16 size; /* register size in dwords */
+       struct dbg_mode_hdr mode; /* Mode header */
+};
+
+/* Idle Check register */
+union dbg_idle_chk_reg {
+       struct dbg_idle_chk_cond_reg cond_reg; /* condition register */
+       struct dbg_idle_chk_info_reg info_reg; /* info register */
+};
+
+/* Idle Check result header */
+struct dbg_idle_chk_result_hdr {
+       __le16 rule_id; /* Failing rule index */
+       __le16 mem_entry_id; /* Failing memory entry index */
+       u8 num_dumped_cond_regs; /* number of dumped condition registers */
+       u8 num_dumped_info_regs; /* number of dumped condition registers */
+       u8 severity; /* from dbg_idle_chk_severity_types enum */
+       u8 reserved;
+};
+
+/* Idle Check result register header */
+struct dbg_idle_chk_result_reg_hdr {
+       u8 data;
+#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_MASK  0x1
+#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_SHIFT 0
+#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_MASK  0x7F
+#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_SHIFT 1
+       u8 start_entry; /* index of the first checked entry */
+       __le16 size; /* register size in dwords */
+};
+
+/* Idle Check rule */
+struct dbg_idle_chk_rule {
+       __le16 rule_id; /* Idle Check rule ID */
+       u8 severity; /* value from dbg_idle_chk_severity_types enum */
+       u8 cond_id; /* Condition ID */
+       u8 num_cond_regs; /* number of condition registers */
+       u8 num_info_regs; /* number of info registers */
+       u8 num_imms; /* number of immediates in the condition */
+       u8 reserved1;
+       __le16 reg_offset; /* offset of this rules registers in the idle check
+                           * register array (in dbg_idle_chk_reg units).
+                           */
+       __le16 imm_offset; /* offset of this rules immediate values in the
+                           * immediate values array (in dwords).
+                           */
+};
+
+/* Idle Check rule parsing data */
+struct dbg_idle_chk_rule_parsing_data {
+       __le32 data;
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK  0x1
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK  0x7FFFFFFF
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1
+};
+
+/* idle check severity types */
+enum dbg_idle_chk_severity_types {
+       /* idle check failure should cause an error */
+       IDLE_CHK_SEVERITY_ERROR,
+       /* idle check failure should cause an error only if theres no traffic */
+       IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC,
+       /* idle check failure should cause a warning */
+       IDLE_CHK_SEVERITY_WARNING,
+       MAX_DBG_IDLE_CHK_SEVERITY_TYPES
+};
+
+/* Debug Bus block data */
+struct dbg_bus_block_data {
+       u8 enabled; /* Indicates if the block is enabled for recording (0/1) */
+       u8 hw_id; /* HW ID associated with the block */
+       u8 line_num; /* Debug line number to select */
+       u8 right_shift; /* Number of units to  right the debug data (0-3) */
+       u8 cycle_en; /* 4-bit value: bit i set -> unit i is enabled. */
+       u8 force_valid; /* 4-bit value: bit i set -> unit i is forced valid. */
+       u8 force_frame; /* 4-bit value: bit i set -> unit i frame bit is forced.
+                        */
+       u8 reserved;
+};
+
+/* Debug Bus Clients */
+enum dbg_bus_clients {
+       DBG_BUS_CLIENT_RBCN,
+       DBG_BUS_CLIENT_RBCP,
+       DBG_BUS_CLIENT_RBCR,
+       DBG_BUS_CLIENT_RBCT,
+       DBG_BUS_CLIENT_RBCU,
+       DBG_BUS_CLIENT_RBCF,
+       DBG_BUS_CLIENT_RBCX,
+       DBG_BUS_CLIENT_RBCS,
+       DBG_BUS_CLIENT_RBCH,
+       DBG_BUS_CLIENT_RBCZ,
+       DBG_BUS_CLIENT_OTHER_ENGINE,
+       DBG_BUS_CLIENT_TIMESTAMP,
+       DBG_BUS_CLIENT_CPU,
+       DBG_BUS_CLIENT_RBCY,
+       DBG_BUS_CLIENT_RBCQ,
+       DBG_BUS_CLIENT_RBCM,
+       DBG_BUS_CLIENT_RBCB,
+       DBG_BUS_CLIENT_RBCW,
+       DBG_BUS_CLIENT_RBCV,
+       MAX_DBG_BUS_CLIENTS
+};
+
+enum dbg_bus_constraint_ops {
+       DBG_BUS_CONSTRAINT_OP_EQ,
+       DBG_BUS_CONSTRAINT_OP_NE,
+       DBG_BUS_CONSTRAINT_OP_LT,
+       DBG_BUS_CONSTRAINT_OP_LTC,
+       DBG_BUS_CONSTRAINT_OP_LE,
+       DBG_BUS_CONSTRAINT_OP_LEC,
+       DBG_BUS_CONSTRAINT_OP_GT,
+       DBG_BUS_CONSTRAINT_OP_GTC,
+       DBG_BUS_CONSTRAINT_OP_GE,
+       DBG_BUS_CONSTRAINT_OP_GEC,
+       MAX_DBG_BUS_CONSTRAINT_OPS
+};
+
+/* Debug Bus memory address */
+struct dbg_bus_mem_addr {
+       __le32 lo;
+       __le32 hi;
+};
+
+/* Debug Bus PCI buffer data */
+struct dbg_bus_pci_buf_data {
+       struct dbg_bus_mem_addr phys_addr; /* PCI buffer physical address */
+       struct dbg_bus_mem_addr virt_addr; /* PCI buffer virtual address */
+       __le32 size; /* PCI buffer size in bytes */
+};
+
+/* Debug Bus Storm EID range filter params */
+struct dbg_bus_storm_eid_range_params {
+       u8 min; /* Minimal event ID to filter on */
+       u8 max; /* Maximal event ID to filter on */
+};
+
+/* Debug Bus Storm EID mask filter params */
+struct dbg_bus_storm_eid_mask_params {
+       u8 val; /* Event ID value */
+       u8 mask; /* Event ID mask. 1s in the mask = dont care bits. */
+};
+
+/* Debug Bus Storm EID filter params */
+union dbg_bus_storm_eid_params {
+       struct dbg_bus_storm_eid_range_params range;
+       struct dbg_bus_storm_eid_mask_params mask;
+};
+
+/* Debug Bus Storm data */
+struct dbg_bus_storm_data {
+       u8 fast_enabled;
+       u8 fast_mode;
+       u8 slow_enabled;
+       u8 slow_mode;
+       u8 hw_id;
+       u8 eid_filter_en;
+       u8 eid_range_not_mask;
+       u8 cid_filter_en;
+       union dbg_bus_storm_eid_params eid_filter_params;
+       __le16 reserved;
+       __le32 cid;
+};
+
+/* Debug Bus data */
+struct dbg_bus_data {
+       __le32 app_version; /* The tools version number of the application */
+       u8 state; /* The current debug bus state */
+       u8 hw_dwords; /* HW dwords per cycle */
+       u8 next_hw_id; /* Next HW ID to be associated with an input */
+       u8 num_enabled_blocks; /* Number of blocks enabled for recording */
+       u8 num_enabled_storms; /* Number of Storms enabled for recording */
+       u8 target; /* Output target */
+       u8 next_trigger_state; /* ID of next trigger state to be added */
+       u8 next_constraint_id; /* ID of next filter/trigger constraint to be
+                               * added.
+                               */
+       u8 one_shot_en; /* Indicates if one-shot mode is enabled (0/1) */
+       u8 grc_input_en; /* Indicates if GRC recording is enabled (0/1) */
+       u8 timestamp_input_en; /* Indicates if timestamp recording is enabled
+                               * (0/1).
+                               */
+       u8 filter_en; /* Indicates if the recording filter is enabled (0/1) */
+       u8 trigger_en; /* Indicates if the recording trigger is enabled (0/1) */
+       u8 adding_filter; /* If true, the next added constraint belong to the
+                          * filter. Otherwise, it belongs to the last added
+                          * trigger state. Valid only if either filter or
+                          * triggers are enabled.
+                          */
+       u8 filter_pre_trigger; /* Indicates if the recording filter should be
+                               * applied before the trigger. Valid only if both
+                               * filter and trigger are enabled (0/1).
+                               */
+       u8 filter_post_trigger; /* Indicates if the recording filter should be
+                                * applied after the trigger. Valid only if both
+                                * filter and trigger are enabled (0/1).
+                                */
+       u8 unify_inputs; /* If true, all inputs are associated with HW ID 0.
+                         * Otherwise, each input is assigned a different HW ID
+                         * (0/1).
+                         */
+       u8 rcv_from_other_engine; /* Indicates if the other engine sends it NW
+                                  * recording to this engine (0/1).
+                                  */
+       struct dbg_bus_pci_buf_data pci_buf; /* Debug Bus PCI buffer data. Valid
+                                             * only when the target is
+                                             * DBG_BUS_TARGET_ID_PCI.
+                                             */
+       __le16 reserved;
+       struct dbg_bus_block_data blocks[88];/* Debug Bus data for each block */
+       struct dbg_bus_storm_data storms[6]; /* Debug Bus data for each block */
+};
+
+enum dbg_bus_filter_types {
+       DBG_BUS_FILTER_TYPE_OFF,
+       DBG_BUS_FILTER_TYPE_PRE,
+       DBG_BUS_FILTER_TYPE_POST,
+       DBG_BUS_FILTER_TYPE_ON,
+       MAX_DBG_BUS_FILTER_TYPES
+};
+
+/* Debug bus frame modes */
+enum dbg_bus_frame_modes {
+       DBG_BUS_FRAME_MODE_0HW_4ST = 0, /* 0 HW dwords, 4 Storm dwords */
+       DBG_BUS_FRAME_MODE_4HW_0ST = 3, /* 4 HW dwords, 0 Storm dwords */
+       DBG_BUS_FRAME_MODE_8HW_0ST = 4, /* 8 HW dwords, 0 Storm dwords */
+       MAX_DBG_BUS_FRAME_MODES
+};
+
+enum dbg_bus_input_types {
+       DBG_BUS_INPUT_TYPE_STORM,
+       DBG_BUS_INPUT_TYPE_BLOCK,
+       MAX_DBG_BUS_INPUT_TYPES
+};
+
+enum dbg_bus_other_engine_modes {
+       DBG_BUS_OTHER_ENGINE_MODE_NONE,
+       DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
+       DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_RX,
+       DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX,
+       DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX,
+       MAX_DBG_BUS_OTHER_ENGINE_MODES
+};
+
+enum dbg_bus_post_trigger_types {
+       DBG_BUS_POST_TRIGGER_RECORD,
+       DBG_BUS_POST_TRIGGER_DROP,
+       MAX_DBG_BUS_POST_TRIGGER_TYPES
+};
+
+enum dbg_bus_pre_trigger_types {
+       DBG_BUS_PRE_TRIGGER_START_FROM_ZERO,
+       DBG_BUS_PRE_TRIGGER_NUM_CHUNKS,
+       DBG_BUS_PRE_TRIGGER_DROP,
+       MAX_DBG_BUS_PRE_TRIGGER_TYPES
+};
+
+enum dbg_bus_semi_frame_modes {
+       DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST = 0,
+       DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST = 3,
+       MAX_DBG_BUS_SEMI_FRAME_MODES
+};
+
+/* Debug bus states */
+enum dbg_bus_states {
+       DBG_BUS_STATE_IDLE, /* debug bus idle state (not recording) */
+       DBG_BUS_STATE_READY, /* debug bus is ready for configuration and
+                             * recording.
+                             */
+       DBG_BUS_STATE_RECORDING, /* debug bus is currently recording */
+       DBG_BUS_STATE_STOPPED, /* debug bus recording has stopped */
+       MAX_DBG_BUS_STATES
+};
+
+enum dbg_bus_storm_modes {
+       DBG_BUS_STORM_MODE_PRINTF,
+       DBG_BUS_STORM_MODE_PRAM_ADDR,
+       DBG_BUS_STORM_MODE_DRA_RW,
+       DBG_BUS_STORM_MODE_DRA_W,
+       DBG_BUS_STORM_MODE_LD_ST_ADDR,
+       DBG_BUS_STORM_MODE_DRA_FSM,
+       DBG_BUS_STORM_MODE_RH,
+       DBG_BUS_STORM_MODE_FOC,
+       DBG_BUS_STORM_MODE_EXT_STORE,
+       MAX_DBG_BUS_STORM_MODES
+};
+
+/* Debug bus target IDs */
+enum dbg_bus_targets {
+       /* records debug bus to DBG block internal buffer */
+       DBG_BUS_TARGET_ID_INT_BUF,
+       /* records debug bus to the NW */
+       DBG_BUS_TARGET_ID_NIG,
+       /* records debug bus to a PCI buffer */
+       DBG_BUS_TARGET_ID_PCI,
+       MAX_DBG_BUS_TARGETS
+};
+
+/* GRC Dump data */
+struct dbg_grc_data {
+       u8 params_initialized;
+       u8 reserved1;
+       __le16 reserved2;
+       __le32 param_val[48];
+};
+
+/* Debug GRC params */
+enum dbg_grc_params {
+       DBG_GRC_PARAM_DUMP_TSTORM, /* dump Tstorm memories (0/1) */
+       DBG_GRC_PARAM_DUMP_MSTORM, /* dump Mstorm memories (0/1) */
+       DBG_GRC_PARAM_DUMP_USTORM, /* dump Ustorm memories (0/1) */
+       DBG_GRC_PARAM_DUMP_XSTORM, /* dump Xstorm memories (0/1) */
+       DBG_GRC_PARAM_DUMP_YSTORM, /* dump Ystorm memories (0/1) */
+       DBG_GRC_PARAM_DUMP_PSTORM, /* dump Pstorm memories (0/1) */
+       DBG_GRC_PARAM_DUMP_REGS, /* dump non-memory registers (0/1) */
+       DBG_GRC_PARAM_DUMP_RAM, /* dump Storm internal RAMs (0/1) */
+       DBG_GRC_PARAM_DUMP_PBUF, /* dump Storm passive buffer (0/1) */
+       DBG_GRC_PARAM_DUMP_IOR, /* dump Storm IORs (0/1) */
+       DBG_GRC_PARAM_DUMP_VFC, /* dump VFC memories (0/1) */
+       DBG_GRC_PARAM_DUMP_CM_CTX, /* dump CM contexts (0/1) */
+       DBG_GRC_PARAM_DUMP_PXP, /* dump PXP memories (0/1) */
+       DBG_GRC_PARAM_DUMP_RSS, /* dump RSS memories (0/1) */
+       DBG_GRC_PARAM_DUMP_CAU, /* dump CAU memories (0/1) */
+       DBG_GRC_PARAM_DUMP_QM, /* dump QM memories (0/1) */
+       DBG_GRC_PARAM_DUMP_MCP, /* dump MCP memories (0/1) */
+       DBG_GRC_PARAM_RESERVED, /* reserved */
+       DBG_GRC_PARAM_DUMP_CFC, /* dump CFC memories (0/1) */
+       DBG_GRC_PARAM_DUMP_IGU, /* dump IGU memories (0/1) */
+       DBG_GRC_PARAM_DUMP_BRB, /* dump BRB memories (0/1) */
+       DBG_GRC_PARAM_DUMP_BTB, /* dump BTB memories (0/1) */
+       DBG_GRC_PARAM_DUMP_BMB, /* dump BMB memories (0/1) */
+       DBG_GRC_PARAM_DUMP_NIG, /* dump NIG memories (0/1) */
+       DBG_GRC_PARAM_DUMP_MULD, /* dump MULD memories (0/1) */
+       DBG_GRC_PARAM_DUMP_PRS, /* dump PRS memories (0/1) */
+       DBG_GRC_PARAM_DUMP_DMAE, /* dump PRS memories (0/1) */
+       DBG_GRC_PARAM_DUMP_TM, /* dump TM (timers) memories (0/1) */
+       DBG_GRC_PARAM_DUMP_SDM, /* dump SDM memories (0/1) */
+       DBG_GRC_PARAM_DUMP_DIF, /* dump DIF memories (0/1) */
+       DBG_GRC_PARAM_DUMP_STATIC, /* dump static debug data (0/1) */
+       DBG_GRC_PARAM_UNSTALL, /* un-stall Storms after dump (0/1) */
+       DBG_GRC_PARAM_NUM_LCIDS, /* number of LCIDs (0..320) */
+       DBG_GRC_PARAM_NUM_LTIDS, /* number of LTIDs (0..320) */
+       /* preset: exclude all memories from dump (1 only) */
+       DBG_GRC_PARAM_EXCLUDE_ALL,
+       /* preset: include memories for crash dump (1 only) */
+       DBG_GRC_PARAM_CRASH,
+       /* perform dump only if MFW is responding (0/1) */
+       DBG_GRC_PARAM_PARITY_SAFE,
+       DBG_GRC_PARAM_DUMP_CM, /* dump CM memories (0/1) */
+       DBG_GRC_PARAM_DUMP_PHY, /* dump PHY memories (0/1) */
+       DBG_GRC_PARAM_NO_MCP,
+       DBG_GRC_PARAM_NO_FW_VER,
+       MAX_DBG_GRC_PARAMS
+};
+
+/* Debug reset registers */
+enum dbg_reset_regs {
+       DBG_RESET_REG_MISCS_PL_UA,
+       DBG_RESET_REG_MISCS_PL_HV,
+       DBG_RESET_REG_MISCS_PL_HV_2,
+       DBG_RESET_REG_MISC_PL_UA,
+       DBG_RESET_REG_MISC_PL_HV,
+       DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
+       DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
+       DBG_RESET_REG_MISC_PL_PDA_VAUX,
+       MAX_DBG_RESET_REGS
+};
+
 /* Debug status codes */
 enum dbg_status {
        DBG_STATUS_OK,
@@ -1579,9 +2347,45 @@ enum dbg_status {
        DBG_STATUS_REG_FIFO_BAD_DATA,
        DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA,
        DBG_STATUS_DBG_ARRAY_NOT_SET,
+       DBG_STATUS_MULTI_BLOCKS_WITH_FILTER,
        MAX_DBG_STATUS
 };
 
+/* Debug Storms IDs */
+enum dbg_storms {
+       DBG_TSTORM_ID,
+       DBG_MSTORM_ID,
+       DBG_USTORM_ID,
+       DBG_XSTORM_ID,
+       DBG_YSTORM_ID,
+       DBG_PSTORM_ID,
+       MAX_DBG_STORMS
+};
+
+/* Idle Check data */
+struct idle_chk_data {
+       __le32 buf_size; /* Idle check buffer size in dwords */
+       u8 buf_size_set; /* Indicates if the idle check buffer size was set
+                         * (0/1).
+                         */
+       u8 reserved1;
+       __le16 reserved2;
+};
+
+/* Debug Tools data (per HW function) */
+struct dbg_tools_data {
+       struct dbg_grc_data grc; /* GRC Dump data */
+       struct dbg_bus_data bus; /* Debug Bus data */
+       struct idle_chk_data idle_chk; /* Idle Check data */
+       u8 mode_enable[40]; /* Indicates if a mode is enabled (0/1) */
+       u8 block_in_reset[88]; /* Indicates if a block is in reset state (0/1).
+                               */
+       u8 chip_id; /* Chip ID (from enum chip_ids) */
+       u8 platform_id; /* Platform ID (from enum platform_ids) */
+       u8 initialized; /* Indicates if the data was initialized */
+       u8 reserved;
+};
+
 /********************************/
 /* HSI Init Functions constants */
 /********************************/
@@ -1589,7 +2393,41 @@ enum dbg_status {
 /* Number of VLAN priorities */
 #define NUM_OF_VLAN_PRIORITIES 8
 
-/* QM per-port init parameters */
+struct init_brb_ram_req {
+       __le32 guranteed_per_tc;
+       __le32 headroom_per_tc;
+       __le32 min_pkt_size;
+       __le32 max_ports_per_engine;
+       u8 num_active_tcs[MAX_NUM_PORTS];
+};
+
+struct init_ets_tc_req {
+       u8 use_sp;
+       u8 use_wfq;
+       __le16 weight;
+};
+
+struct init_ets_req {
+       __le32 mtu;
+       struct init_ets_tc_req tc_req[NUM_OF_TCS];
+};
+
+struct init_nig_lb_rl_req {
+       __le16 lb_mac_rate;
+       __le16 lb_rate;
+       __le32 mtu;
+       __le16 tc_rate[NUM_OF_PHYS_TCS];
+};
+
+struct init_nig_pri_tc_map_entry {
+       u8 tc_id;
+       u8 valid;
+};
+
+struct init_nig_pri_tc_map_req {
+       struct init_nig_pri_tc_map_entry pri[NUM_OF_VLAN_PRIORITIES];
+};
+
 struct init_qm_port_params {
        u8 active;
        u8 active_phys_tcs;
@@ -1619,7 +2457,7 @@ struct init_qm_vport_params {
 
 /* Width of GRC address in bits (addresses are specified in dwords) */
 #define GRC_ADDR_BITS  23
-#define MAX_GRC_ADDR   ((1 << GRC_ADDR_BITS) - 1)
+#define MAX_GRC_ADDR   (BIT(GRC_ADDR_BITS) - 1)
 
 /* indicates an init that should be applied to any phase ID */
 #define ANY_PHASE_ID   0xffff
@@ -1627,15 +2465,50 @@ struct init_qm_vport_params {
 /* Max size in dwords of a zipped array */
 #define MAX_ZIPPED_SIZE        8192
 
+struct fw_asserts_ram_section {
+       __le16 section_ram_line_offset;
+       __le16 section_ram_line_size;
+       u8 list_dword_offset;
+       u8 list_element_dword_size;
+       u8 list_num_elements;
+       u8 list_next_index_dword_offset;
+};
+
+struct fw_ver_num {
+       u8 major; /* Firmware major version number */
+       u8 minor; /* Firmware minor version number */
+       u8 rev; /* Firmware revision version number */
+       u8 eng; /* Firmware engineering version number (for bootleg versions) */
+};
+
+struct fw_ver_info {
+       __le16 tools_ver; /* Tools version number */
+       u8 image_id; /* FW image ID (e.g. main) */
+       u8 reserved1;
+       struct fw_ver_num num; /* FW version number */
+       __le32 timestamp; /* FW Timestamp in unix time  (sec. since 1970) */
+       __le32 reserved2;
+};
+
+struct fw_info {
+       struct fw_ver_info ver;
+       struct fw_asserts_ram_section fw_asserts_section;
+};
+
+struct fw_info_location {
+       __le32 grc_addr;
+       __le32 size;
+};
+
 enum init_modes {
        MODE_RESERVED,
-       MODE_BB_B0,
-       MODE_RESERVED2,
+       MODE_BB,
+       MODE_K2,
        MODE_ASIC,
+       MODE_RESERVED2,
        MODE_RESERVED3,
        MODE_RESERVED4,
        MODE_RESERVED5,
-       MODE_RESERVED6,
        MODE_SF,
        MODE_MF_SD,
        MODE_MF_SI,
@@ -1643,8 +2516,7 @@ enum init_modes {
        MODE_PORTS_PER_ENG_2,
        MODE_PORTS_PER_ENG_4,
        MODE_100G,
-       MODE_40G,
-       MODE_RESERVED7,
+       MODE_RESERVED6,
        MAX_INIT_MODES
 };
 
@@ -1674,11 +2546,11 @@ struct bin_buffer_hdr {
 
 /* binary init buffer types */
 enum bin_init_buffer_type {
-       BIN_BUF_FW_VER_INFO,
+       BIN_BUF_INIT_FW_VER_INFO,
        BIN_BUF_INIT_CMD,
        BIN_BUF_INIT_VAL,
        BIN_BUF_INIT_MODE_TREE,
-       BIN_BUF_IRO,
+       BIN_BUF_INIT_IRO,
        MAX_BIN_INIT_BUFFER_TYPE
 };
 
@@ -1902,8 +2774,283 @@ struct iro {
        __le16 size;
 };
 
+/***************************** Public Functions *******************************/
+/**
+ * @brief qed_dbg_set_bin_ptr - Sets a pointer to the binary data with debug
+ *     arrays.
+ *
+ * @param bin_ptr - a pointer to the binary data with debug arrays.
+ */
+enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr);
+/**
+ * @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their
+ *     default value.
+ *
+ * @param p_hwfn               - HW device data
+ */
+void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn);
+/**
+ * @brief qed_dbg_grc_get_dump_buf_size - Returns the required buffer size for
+ *     GRC Dump.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf_size - OUT: required buffer size (in dwords) for the GRC Dump
+ *     data.
+ *
+ * @return error if one of the following holds:
+ *     - the version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+                                             struct qed_ptt *p_ptt,
+                                             u32 *buf_size);
+/**
+ * @brief qed_dbg_grc_dump - Dumps GRC data into the specified buffer.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param dump_buf - Pointer to write the collected GRC data into.
+ * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+ * @param num_dumped_dwords - OUT: number of dumped dwords.
+ *
+ * @return error if one of the following holds:
+ *     - the version wasn't set
+ *     - the specified dump buffer is too small
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
+                                struct qed_ptt *p_ptt,
+                                u32 *dump_buf,
+                                u32 buf_size_in_dwords,
+                                u32 *num_dumped_dwords);
+/**
+ * @brief qed_dbg_idle_chk_get_dump_buf_size - Returns the required buffer size
+ *     for idle check results.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf_size - OUT: required buffer size (in dwords) for the idle check
+ *     data.
+ *
+ * @return error if one of the following holds:
+ *     - the version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+                                                  struct qed_ptt *p_ptt,
+                                                  u32 *buf_size);
+/**
+ * @brief qed_dbg_idle_chk_dump - Performs idle check and writes the results
+ *     into the specified buffer.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param dump_buf - Pointer to write the idle check data into.
+ * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+ * @param num_dumped_dwords - OUT: number of dumped dwords.
+ *
+ * @return error if one of the following holds:
+ *     - the version wasn't set
+ *     - the specified buffer is too small
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
+                                     struct qed_ptt *p_ptt,
+                                     u32 *dump_buf,
+                                     u32 buf_size_in_dwords,
+                                     u32 *num_dumped_dwords);
+/**
+ * @brief qed_dbg_mcp_trace_get_dump_buf_size - Returns the required buffer size
+ *     for mcp trace results.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf_size - OUT: required buffer size (in dwords) for mcp trace data.
+ *
+ * @return error if one of the following holds:
+ *     - the version wasn't set
+ *     - the trace data in MCP scratchpad contain an invalid signature
+ *     - the bundle ID in NVRAM is invalid
+ *     - the trace meta data cannot be found (in NVRAM or image file)
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+                                                   struct qed_ptt *p_ptt,
+                                                   u32 *buf_size);
+/**
+ * @brief qed_dbg_mcp_trace_dump - Performs mcp trace and writes the results
+ *     into the specified buffer.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param dump_buf - Pointer to write the mcp trace data into.
+ * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+ * @param num_dumped_dwords - OUT: number of dumped dwords.
+ *
+ * @return error if one of the following holds:
+ *     - the version wasn't set
+ *     - the specified buffer is too small
+ *     - the trace data in MCP scratchpad contain an invalid signature
+ *     - the bundle ID in NVRAM is invalid
+ *     - the trace meta data cannot be found (in NVRAM or image file)
+ *     - the trace meta data cannot be read (from NVRAM or image file)
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
+                                      struct qed_ptt *p_ptt,
+                                      u32 *dump_buf,
+                                      u32 buf_size_in_dwords,
+                                      u32 *num_dumped_dwords);
+/**
+ * @brief qed_dbg_reg_fifo_get_dump_buf_size - Returns the required buffer size
+ *     for grc trace fifo results.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf_size - OUT: required buffer size (in dwords) for reg fifo data.
+ *
+ * @return error if one of the following holds:
+ *     - the version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+                                                  struct qed_ptt *p_ptt,
+                                                  u32 *buf_size);
+/**
+ * @brief qed_dbg_reg_fifo_dump - Reads the reg fifo and writes the results into
+ *     the specified buffer.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param dump_buf - Pointer to write the reg fifo data into.
+ * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+ * @param num_dumped_dwords - OUT: number of dumped dwords.
+ *
+ * @return error if one of the following holds:
+ *     - the version wasn't set
+ *     - the specified buffer is too small
+ *     - DMAE transaction failed
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
+                                     struct qed_ptt *p_ptt,
+                                     u32 *dump_buf,
+                                     u32 buf_size_in_dwords,
+                                     u32 *num_dumped_dwords);
+/**
+ * @brief qed_dbg_igu_fifo_get_dump_buf_size - Returns the required buffer size
+ *     for the IGU fifo results.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf_size - OUT: required buffer size (in dwords) for the IGU fifo
+ *     data.
+ *
+ * @return error if one of the following holds:
+ *     - the version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+                                                  struct qed_ptt *p_ptt,
+                                                  u32 *buf_size);
+/**
+ * @brief qed_dbg_igu_fifo_dump - Reads the IGU fifo and writes the results into
+ *     the specified buffer.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param dump_buf - Pointer to write the IGU fifo data into.
+ * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+ * @param num_dumped_dwords - OUT: number of dumped dwords.
+ *
+ * @return error if one of the following holds:
+ *     - the version wasn't set
+ *     - the specified buffer is too small
+ *     - DMAE transaction failed
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
+                                     struct qed_ptt *p_ptt,
+                                     u32 *dump_buf,
+                                     u32 buf_size_in_dwords,
+                                     u32 *num_dumped_dwords);
+/**
+ * @brief qed_dbg_protection_override_get_dump_buf_size - Returns the required
+ *     buffer size for protection override window results.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf_size - OUT: required buffer size (in dwords) for protection
+ *     override data.
+ *
+ * @return error if one of the following holds:
+ *     - the version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status
+qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+                                             struct qed_ptt *p_ptt,
+                                             u32 *buf_size);
+/**
+ * @brief qed_dbg_protection_override_dump - Reads protection override window
+ *     entries and writes the results into the specified buffer.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param dump_buf - Pointer to write the protection override data into.
+ * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+ * @param num_dumped_dwords - OUT: number of dumped dwords.
+ *
+ * @return error if one of the following holds:
+ *     - the version wasn't set
+ *     - the specified buffer is too small
+ *     - DMAE transaction failed
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
+                                                struct qed_ptt *p_ptt,
+                                                u32 *dump_buf,
+                                                u32 buf_size_in_dwords,
+                                                u32 *num_dumped_dwords);
+/**
+ * @brief qed_dbg_fw_asserts_get_dump_buf_size - Returns the required buffer
+ *     size for FW Asserts results.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf_size - OUT: required buffer size (in dwords) for FW Asserts data.
+ *
+ * @return error if one of the following holds:
+ *     - the version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+                                                    struct qed_ptt *p_ptt,
+                                                    u32 *buf_size);
 /**
- * @brief qed_dbg_print_attn - Prints attention registers values in the specified results struct.
+ * @brief qed_dbg_fw_asserts_dump - Reads the FW Asserts and writes the results
+ *     into the specified buffer.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param dump_buf - Pointer to write the FW Asserts data into.
+ * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+ * @param num_dumped_dwords - OUT: number of dumped dwords.
+ *
+ * @return error if one of the following holds:
+ *     - the version wasn't set
+ *     - the specified buffer is too small
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
+                                       struct qed_ptt *p_ptt,
+                                       u32 *dump_buf,
+                                       u32 buf_size_in_dwords,
+                                       u32 *num_dumped_dwords);
+/**
+ * @brief qed_dbg_print_attn - Prints attention registers values in the
+ *     specified results struct.
  *
  * @param p_hwfn
  * @param results - Pointer to the attention read results
@@ -1915,47 +3062,241 @@ struct iro {
 enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn,
                                   struct dbg_attn_block_result *results);
 
+/******************************** Constants **********************************/
+
 #define MAX_NAME_LEN   16
 
+/***************************** Public Functions *******************************/
+/**
+ * @brief qed_dbg_user_set_bin_ptr - Sets a pointer to the binary data with
+ *     debug arrays.
+ *
+ * @param bin_ptr - a pointer to the binary data with debug arrays.
+ */
+enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr);
+/**
+ * @brief qed_dbg_get_status_str - Returns a string for the specified status.
+ *
+ * @param status - a debug status code.
+ *
+ * @return a string for the specified status
+ */
+const char *qed_dbg_get_status_str(enum dbg_status status);
+/**
+ * @brief qed_get_idle_chk_results_buf_size - Returns the required buffer size
+ *     for idle check results (in bytes).
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - idle check dump buffer.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
+ *     results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
+                                                 u32 *dump_buf,
+                                                 u32  num_dumped_dwords,
+                                                 u32 *results_buf_size);
+/**
+ * @brief qed_print_idle_chk_results - Prints idle check results
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - idle check dump buffer.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf - buffer for printing the idle check results.
+ * @param num_errors - OUT: number of errors found in idle check.
+ * @param num_warnings - OUT: number of warnings found in idle check.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
+                                          u32 *dump_buf,
+                                          u32 num_dumped_dwords,
+                                          char *results_buf,
+                                          u32 *num_errors,
+                                          u32 *num_warnings);
+/**
+ * @brief qed_get_mcp_trace_results_buf_size - Returns the required buffer size
+ *     for MCP Trace results (in bytes).
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - MCP Trace dump buffer.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
+ *     results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
+                                                  u32 *dump_buf,
+                                                  u32 num_dumped_dwords,
+                                                  u32 *results_buf_size);
+/**
+ * @brief qed_print_mcp_trace_results - Prints MCP Trace results
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - mcp trace dump buffer, starting from the header.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf - buffer for printing the mcp trace results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
+                                           u32 *dump_buf,
+                                           u32 num_dumped_dwords,
+                                           char *results_buf);
+/**
+ * @brief qed_get_reg_fifo_results_buf_size - Returns the required buffer size
+ *     for reg_fifo results (in bytes).
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - reg fifo dump buffer.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
+ *     results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+                                                 u32 *dump_buf,
+                                                 u32 num_dumped_dwords,
+                                                 u32 *results_buf_size);
+/**
+ * @brief qed_print_reg_fifo_results - Prints reg fifo results
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - reg fifo dump buffer, starting from the header.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf - buffer for printing the reg fifo results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
+                                          u32 *dump_buf,
+                                          u32 num_dumped_dwords,
+                                          char *results_buf);
+/**
+ * @brief qed_get_igu_fifo_results_buf_size - Returns the required buffer size
+ *     for igu_fifo results (in bytes).
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - IGU fifo dump buffer.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
+ *     results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+                                                 u32 *dump_buf,
+                                                 u32 num_dumped_dwords,
+                                                 u32 *results_buf_size);
+/**
+ * @brief qed_print_igu_fifo_results - Prints IGU fifo results
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - IGU fifo dump buffer, starting from the header.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf - buffer for printing the IGU fifo results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
+                                          u32 *dump_buf,
+                                          u32 num_dumped_dwords,
+                                          char *results_buf);
+/**
+ * @brief qed_get_protection_override_results_buf_size - Returns the required
+ *     buffer size for protection override results (in bytes).
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - protection override dump buffer.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
+ *     results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status
+qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
+                                            u32 *dump_buf,
+                                            u32 num_dumped_dwords,
+                                            u32 *results_buf_size);
+/**
+ * @brief qed_print_protection_override_results - Prints protection override
+ *     results.
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - protection override dump buffer, starting from the header.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf - buffer for printing the reg fifo results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
+                                                     u32 *dump_buf,
+                                                     u32 num_dumped_dwords,
+                                                     char *results_buf);
+/**
+ * @brief qed_get_fw_asserts_results_buf_size - Returns the required buffer size
+ *     for FW Asserts results (in bytes).
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - FW Asserts dump buffer.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
+ *     results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
+                                                   u32 *dump_buf,
+                                                   u32 num_dumped_dwords,
+                                                   u32 *results_buf_size);
+/**
+ * @brief qed_print_fw_asserts_results - Prints FW Asserts results
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - FW Asserts dump buffer, starting from the header.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf - buffer for printing the FW Asserts results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
+                                            u32 *dump_buf,
+                                            u32 num_dumped_dwords,
+                                            char *results_buf);
 /* Win 2 */
-#define GTT_BAR0_MAP_REG_IGU_CMD \
-       0x00f000UL
+#define GTT_BAR0_MAP_REG_IGU_CMD       0x00f000UL
 
 /* Win 3 */
-#define GTT_BAR0_MAP_REG_TSDM_RAM \
-       0x010000UL
+#define GTT_BAR0_MAP_REG_TSDM_RAM      0x010000UL
 
 /* Win 4 */
-#define GTT_BAR0_MAP_REG_MSDM_RAM \
-       0x011000UL
+#define GTT_BAR0_MAP_REG_MSDM_RAM      0x011000UL
 
 /* Win 5 */
-#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 \
-       0x012000UL
+#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 0x012000UL
 
 /* Win 6 */
-#define GTT_BAR0_MAP_REG_USDM_RAM \
-       0x013000UL
+#define GTT_BAR0_MAP_REG_USDM_RAM      0x013000UL
 
 /* Win 7 */
-#define GTT_BAR0_MAP_REG_USDM_RAM_1024 \
-       0x014000UL
+#define GTT_BAR0_MAP_REG_USDM_RAM_1024 0x014000UL
 
 /* Win 8 */
-#define GTT_BAR0_MAP_REG_USDM_RAM_2048 \
-       0x015000UL
+#define GTT_BAR0_MAP_REG_USDM_RAM_2048 0x015000UL
 
 /* Win 9 */
-#define GTT_BAR0_MAP_REG_XSDM_RAM \
-       0x016000UL
+#define GTT_BAR0_MAP_REG_XSDM_RAM      0x016000UL
 
 /* Win 10 */
-#define GTT_BAR0_MAP_REG_YSDM_RAM \
-       0x017000UL
+#define GTT_BAR0_MAP_REG_YSDM_RAM      0x017000UL
 
 /* Win 11 */
-#define GTT_BAR0_MAP_REG_PSDM_RAM \
-       0x018000UL
+#define GTT_BAR0_MAP_REG_PSDM_RAM      0x018000UL
 
 /**
  * @brief qed_qm_pf_mem_size - prepare QM ILT sizes
@@ -2003,7 +3344,7 @@ struct qed_qm_pf_rt_init_params {
        u16 num_vf_pqs;
        u8 start_vport;
        u8 num_vports;
-       u8 pf_wfq;
+       u16 pf_wfq;
        u32 pf_rl;
        struct init_qm_pq_params *pq_params;
        struct init_qm_vport_params *vport_params;
@@ -2132,12 +3473,20 @@ void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
 void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
                           struct qed_ptt *p_ptt,
                           bool eth_geneve_enable, bool ip_geneve_enable);
+void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn,
+                             struct qed_ptt *p_ptt, u16 pf_id);
+void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+                            u16 pf_id, bool tcp, bool udp,
+                            bool ipv4, bool ipv6);
 
 #define        YSTORM_FLOW_CONTROL_MODE_OFFSET                 (IRO[0].base)
 #define        YSTORM_FLOW_CONTROL_MODE_SIZE                   (IRO[0].size)
 #define        TSTORM_PORT_STAT_OFFSET(port_id) \
        (IRO[1].base + ((port_id) * IRO[1].m1))
 #define        TSTORM_PORT_STAT_SIZE                           (IRO[1].size)
+#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \
+       (IRO[2].base + ((port_id) * IRO[2].m1))
+#define TSTORM_LL2_PORT_STAT_SIZE                      (IRO[2].size)
 #define        USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \
        (IRO[3].base + ((vf_id) * IRO[3].m1))
 #define        USTORM_VF_PF_CHANNEL_READY_SIZE                 (IRO[3].size)
@@ -2153,44 +3502,96 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
 #define        USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \
        (IRO[7].base + ((queue_zone_id) * IRO[7].m1))
 #define        USTORM_COMMON_QUEUE_CONS_SIZE                   (IRO[7].size)
+#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \
+       (IRO[14].base + ((core_rx_queue_id) * IRO[14].m1))
+#define TSTORM_LL2_RX_PRODS_SIZE                       (IRO[14].size)
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
+       (IRO[15].base + ((core_rx_queue_id) * IRO[15].m1))
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE            (IRO[15].size)
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
+       (IRO[16].base + ((core_rx_queue_id) * IRO[16].m1))
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE            (IRO[16].size)
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \
+       (IRO[17].base + ((core_tx_stats_id) * IRO[17].m1))
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE    (IRO[17].       size)
 #define        MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
        (IRO[18].base + ((stat_counter_id) * IRO[18].m1))
 #define        MSTORM_QUEUE_STAT_SIZE                          (IRO[18].size)
 #define        MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \
        (IRO[19].base + ((queue_id) * IRO[19].m1))
 #define        MSTORM_ETH_PF_PRODS_SIZE                        (IRO[19].size)
-#define        MSTORM_TPA_TIMEOUT_US_OFFSET                    (IRO[20].base)
-#define        MSTORM_TPA_TIMEOUT_US_SIZE                      (IRO[20].size)
+#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) \
+       (IRO[20].base + ((vf_id) * IRO[20].m1) + ((vf_queue_id) * IRO[20].m2))
+#define MSTORM_ETH_VF_PRODS_SIZE                       (IRO[20].size)
+#define        MSTORM_TPA_TIMEOUT_US_OFFSET                    (IRO[21].base)
+#define        MSTORM_TPA_TIMEOUT_US_SIZE                      (IRO[21].size)
 #define        MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
-       (IRO[21].base + ((pf_id) * IRO[21].m1))
-#define        MSTORM_ETH_PF_STAT_SIZE                         (IRO[21].size)
+       (IRO[22].base + ((pf_id) * IRO[22].m1))
+#define        MSTORM_ETH_PF_STAT_SIZE                         (IRO[22].size)
 #define        USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
-       (IRO[22].base + ((stat_counter_id) * IRO[22].m1))
-#define        USTORM_QUEUE_STAT_SIZE                          (IRO[22].size)
+       (IRO[23].base + ((stat_counter_id) * IRO[23].m1))
+#define        USTORM_QUEUE_STAT_SIZE                          (IRO[23].size)
 #define        USTORM_ETH_PF_STAT_OFFSET(pf_id) \
-       (IRO[23].base + ((pf_id) * IRO[23].m1))
-#define        USTORM_ETH_PF_STAT_SIZE                         (IRO[23].size)
+       (IRO[24].base + ((pf_id) * IRO[24].m1))
+#define        USTORM_ETH_PF_STAT_SIZE                         (IRO[24].size)
 #define        PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
-       (IRO[24].base + ((stat_counter_id) * IRO[24].m1))
-#define        PSTORM_QUEUE_STAT_SIZE                          (IRO[24].size)
+       (IRO[25].base + ((stat_counter_id) * IRO[25].m1))
+#define        PSTORM_QUEUE_STAT_SIZE                          (IRO[25].size)
 #define        PSTORM_ETH_PF_STAT_OFFSET(pf_id) \
-       (IRO[25].base + ((pf_id) * IRO[25].m1))
-#define        PSTORM_ETH_PF_STAT_SIZE                         (IRO[25].size)
+       (IRO[26].base + ((pf_id) * IRO[26].m1))
+#define        PSTORM_ETH_PF_STAT_SIZE                         (IRO[26].size)
 #define        PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethtype) \
-       (IRO[26].base + ((ethtype) * IRO[26].m1))
-#define        PSTORM_CTL_FRAME_ETHTYPE_SIZE                   (IRO[26].size)
-#define        TSTORM_ETH_PRS_INPUT_OFFSET                     (IRO[27].base)
-#define        TSTORM_ETH_PRS_INPUT_SIZE                       (IRO[27].size)
+       (IRO[27].base + ((ethtype) * IRO[27].m1))
+#define        PSTORM_CTL_FRAME_ETHTYPE_SIZE                   (IRO[27].size)
+#define        TSTORM_ETH_PRS_INPUT_OFFSET                     (IRO[28].base)
+#define        TSTORM_ETH_PRS_INPUT_SIZE                       (IRO[28].size)
 #define        ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
-       (IRO[28].base + ((pf_id) * IRO[28].m1))
-#define        ETH_RX_RATE_LIMIT_SIZE                          (IRO[28].size)
+       (IRO[29].base + ((pf_id) * IRO[29].m1))
+#define        ETH_RX_RATE_LIMIT_SIZE                          (IRO[29].size)
 #define        XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
-       (IRO[29].base + ((queue_id) * IRO[29].m1))
-#define        XSTORM_ETH_QUEUE_ZONE_SIZE                      (IRO[29].size)
-
-static const struct iro iro_arr[46] = {
+       (IRO[30].base + ((queue_id) * IRO[30].m1))
+#define        XSTORM_ETH_QUEUE_ZONE_SIZE                      (IRO[30].size)
+#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \
+       (IRO[34].base + ((cmdq_queue_id) * IRO[34].m1))
+#define TSTORM_SCSI_CMDQ_CONS_SIZE                             (IRO[34].size)
+#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
+       (IRO[35].base + ((func_id) * IRO[35].m1) + ((bdq_id) * IRO[35].m2))
+#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE                          (IRO[35].size)
+#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
+       (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2))
+#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE                          (IRO[36].size)
+#define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
+       (IRO[37].base + ((pf_id) * IRO[37].m1))
+#define TSTORM_ISCSI_RX_STATS_SIZE                             (IRO[37].size)
+#define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
+       (IRO[38].base + ((pf_id) * IRO[38].m1))
+#define MSTORM_ISCSI_RX_STATS_SIZE                             (IRO[38].size)
+#define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
+       (IRO[39].base + ((pf_id) * IRO[39].m1))
+#define USTORM_ISCSI_RX_STATS_SIZE                             (IRO[39].size)
+#define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
+       (IRO[40].base + ((pf_id) * IRO[40].m1))
+#define XSTORM_ISCSI_TX_STATS_SIZE                             (IRO[40].size)
+#define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
+       (IRO[41].base + ((pf_id) * IRO[41].m1))
+#define YSTORM_ISCSI_TX_STATS_SIZE                             (IRO[41].size)
+#define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
+       (IRO[42].base + ((pf_id) * IRO[42].m1))
+#define PSTORM_ISCSI_TX_STATS_SIZE                             (IRO[42].size)
+#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
+       (IRO[45].base + ((rdma_stat_counter_id) * IRO[45].m1))
+#define PSTORM_RDMA_QUEUE_STAT_SIZE                            (IRO[45].size)
+#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
+       (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1))
+#define TSTORM_RDMA_QUEUE_STAT_SIZE                            (IRO[46].size)
+#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \
+       (IRO[43].base + ((pf_id) * IRO[43].m1))
+#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \
+       (IRO[44].base + ((pf_id) * IRO[44].m1))
+
+static const struct iro iro_arr[47] = {
        {0x0, 0x0, 0x0, 0x0, 0x8},
-       {0x4cb0, 0x78, 0x0, 0x0, 0x78},
+       {0x4cb0, 0x80, 0x0, 0x0, 0x80},
        {0x6318, 0x20, 0x0, 0x0, 0x20},
        {0xb00, 0x8, 0x0, 0x0, 0x4},
        {0xa80, 0x8, 0x0, 0x0, 0x4},
@@ -2201,20 +3602,21 @@ static const struct iro iro_arr[46] = {
        {0x3df0, 0x0, 0x0, 0x0, 0x78},
        {0x29b0, 0x0, 0x0, 0x0, 0x78},
        {0x4c38, 0x0, 0x0, 0x0, 0x78},
-       {0x4a48, 0x0, 0x0, 0x0, 0x78},
+       {0x4990, 0x0, 0x0, 0x0, 0x78},
        {0x7e48, 0x0, 0x0, 0x0, 0x78},
        {0xa28, 0x8, 0x0, 0x0, 0x8},
        {0x60f8, 0x10, 0x0, 0x0, 0x10},
        {0xb820, 0x30, 0x0, 0x0, 0x30},
        {0x95b8, 0x30, 0x0, 0x0, 0x30},
-       {0x4c18, 0x80, 0x0, 0x0, 0x40},
+       {0x4b60, 0x80, 0x0, 0x0, 0x40},
        {0x1f8, 0x4, 0x0, 0x0, 0x4},
-       {0xc9a8, 0x0, 0x0, 0x0, 0x4},
-       {0x4c58, 0x80, 0x0, 0x0, 0x20},
+       {0x53a0, 0x80, 0x4, 0x0, 0x4},
+       {0xc8f0, 0x0, 0x0, 0x0, 0x4},
+       {0x4ba0, 0x80, 0x0, 0x0, 0x20},
        {0x8050, 0x40, 0x0, 0x0, 0x30},
        {0xe770, 0x60, 0x0, 0x0, 0x60},
        {0x2b48, 0x80, 0x0, 0x0, 0x38},
-       {0xdf88, 0x78, 0x0, 0x0, 0x78},
+       {0xf188, 0x78, 0x0, 0x0, 0x78},
        {0x1f8, 0x4, 0x0, 0x0, 0x4},
        {0xacf0, 0x0, 0x0, 0x0, 0xf0},
        {0xade0, 0x8, 0x0, 0x0, 0x8},
@@ -2226,455 +3628,457 @@ static const struct iro iro_arr[46] = {
        {0x200, 0x10, 0x8, 0x0, 0x8},
        {0xb78, 0x10, 0x8, 0x0, 0x2},
        {0xd888, 0x38, 0x0, 0x0, 0x24},
-       {0x12120, 0x10, 0x0, 0x0, 0x8},
-       {0x11b20, 0x38, 0x0, 0x0, 0x18},
-       {0xa8c0, 0x30, 0x0, 0x0, 0x10},
-       {0x86f8, 0x28, 0x0, 0x0, 0x18},
-       {0xeff8, 0x10, 0x0, 0x0, 0x10},
+       {0x12c38, 0x10, 0x0, 0x0, 0x8},
+       {0x11aa0, 0x38, 0x0, 0x0, 0x18},
+       {0xa8c0, 0x38, 0x0, 0x0, 0x10},
+       {0x86f8, 0x30, 0x0, 0x0, 0x18},
+       {0x101f8, 0x10, 0x0, 0x0, 0x10},
        {0xdd08, 0x48, 0x0, 0x0, 0x38},
-       {0xf460, 0x20, 0x0, 0x0, 0x20},
+       {0x10660, 0x20, 0x0, 0x0, 0x20},
        {0x2b80, 0x80, 0x0, 0x0, 0x10},
-       {0x5000, 0x10, 0x0, 0x0, 0x10},
+       {0x5020, 0x10, 0x0, 0x0, 0x10},
 };
 
 /* Runtime array offsets */
-#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
-#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
-#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
-#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
-#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
-#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
-#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
-#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
-#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
-#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
-#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
-#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
-#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
-#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
-#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
-#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
-#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
-#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17
-#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18
-#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19
-#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20
-#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21
-#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22
-#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23
-#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
-#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497
-#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
-#define CAU_REG_PI_MEMORY_RT_OFFSET 2233
-#define CAU_REG_PI_MEMORY_RT_SIZE 4416
-#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649
-#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650
-#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651
-#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652
-#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653
-#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654
-#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655
-#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656
-#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657
-#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658
-#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659
-#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660
-#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661
-#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662
-#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663
-#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664
-#define SRC_REG_FIRSTFREE_RT_OFFSET 6665
-#define SRC_REG_FIRSTFREE_RT_SIZE 2
-#define SRC_REG_LASTFREE_RT_OFFSET 6667
-#define SRC_REG_LASTFREE_RT_SIZE 2
-#define SRC_REG_COUNTFREE_RT_OFFSET 6669
-#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670
-#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671
-#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672
-#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673
-#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674
-#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675
-#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6676
-#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6677
-#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6678
-#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6679
-#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6680
-#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6681
-#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6682
-#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6683
-#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6684
-#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6685
-#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6686
-#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6687
-#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6688
-#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689
-#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690
-#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6691
-#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6692
-#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6693
-#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6694
-#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6695
-#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6696
-#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697
-#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698
-#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699
-#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6700
-#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6701
-#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6702
-#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6703
-#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6704
-#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
-#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28704
-#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28705
-#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28706
-#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28707
-#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28708
-#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28709
-#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28710
-#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28711
-#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28712
-#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28713
-#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28714
-#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
-#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29130
-#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512
-#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29642
-#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29643
-#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29644
-#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29645
-#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29646
-#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29647
-#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29648
-#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29649
-#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29650
-#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29651
-#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29652
-#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29653
-#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29654
-#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29655
-#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29656
-#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29657
-#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29658
-#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29659
-#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29660
-#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29661
-#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29662
-#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29663
-#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29664
-#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29665
-#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29666
-#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29667
-#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29668
-#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29669
-#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29670
-#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29671
-#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29672
-#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29673
-#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29674
-#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29675
-#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29676
-#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29677
-#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29678
-#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29679
-#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29680
-#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29681
-#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29682
-#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29683
-#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29684
-#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29685
-#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29686
-#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29687
-#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29688
-#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29689
-#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29690
-#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29691
-#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29692
-#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29693
-#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29694
-#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29695
-#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29696
-#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29697
-#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29698
-#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29699
-#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29700
-#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29701
-#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29702
-#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29703
-#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29704
-#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29705
-#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29706
-#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29707
-#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29708
-#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29709
-#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
-#define QM_REG_VOQCRDLINE_RT_OFFSET 29837
-#define QM_REG_VOQCRDLINE_RT_SIZE 20
-#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29857
-#define QM_REG_VOQINITCRDLINE_RT_SIZE 20
-#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29877
-#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29878
-#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29879
-#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29880
-#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29881
-#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29882
-#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29883
-#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29884
-#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29885
-#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29886
-#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29887
-#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29888
-#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29889
-#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29890
-#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29891
-#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29892
-#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29893
-#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29894
-#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29895
-#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29896
-#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29897
-#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29898
-#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29899
-#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29900
-#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29901
-#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29902
-#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29903
-#define QM_REG_PQTX2PF_0_RT_OFFSET 29904
-#define QM_REG_PQTX2PF_1_RT_OFFSET 29905
-#define QM_REG_PQTX2PF_2_RT_OFFSET 29906
-#define QM_REG_PQTX2PF_3_RT_OFFSET 29907
-#define QM_REG_PQTX2PF_4_RT_OFFSET 29908
-#define QM_REG_PQTX2PF_5_RT_OFFSET 29909
-#define QM_REG_PQTX2PF_6_RT_OFFSET 29910
-#define QM_REG_PQTX2PF_7_RT_OFFSET 29911
-#define QM_REG_PQTX2PF_8_RT_OFFSET 29912
-#define QM_REG_PQTX2PF_9_RT_OFFSET 29913
-#define QM_REG_PQTX2PF_10_RT_OFFSET 29914
-#define QM_REG_PQTX2PF_11_RT_OFFSET 29915
-#define QM_REG_PQTX2PF_12_RT_OFFSET 29916
-#define QM_REG_PQTX2PF_13_RT_OFFSET 29917
-#define QM_REG_PQTX2PF_14_RT_OFFSET 29918
-#define QM_REG_PQTX2PF_15_RT_OFFSET 29919
-#define QM_REG_PQTX2PF_16_RT_OFFSET 29920
-#define QM_REG_PQTX2PF_17_RT_OFFSET 29921
-#define QM_REG_PQTX2PF_18_RT_OFFSET 29922
-#define QM_REG_PQTX2PF_19_RT_OFFSET 29923
-#define QM_REG_PQTX2PF_20_RT_OFFSET 29924
-#define QM_REG_PQTX2PF_21_RT_OFFSET 29925
-#define QM_REG_PQTX2PF_22_RT_OFFSET 29926
-#define QM_REG_PQTX2PF_23_RT_OFFSET 29927
-#define QM_REG_PQTX2PF_24_RT_OFFSET 29928
-#define QM_REG_PQTX2PF_25_RT_OFFSET 29929
-#define QM_REG_PQTX2PF_26_RT_OFFSET 29930
-#define QM_REG_PQTX2PF_27_RT_OFFSET 29931
-#define QM_REG_PQTX2PF_28_RT_OFFSET 29932
-#define QM_REG_PQTX2PF_29_RT_OFFSET 29933
-#define QM_REG_PQTX2PF_30_RT_OFFSET 29934
-#define QM_REG_PQTX2PF_31_RT_OFFSET 29935
-#define QM_REG_PQTX2PF_32_RT_OFFSET 29936
-#define QM_REG_PQTX2PF_33_RT_OFFSET 29937
-#define QM_REG_PQTX2PF_34_RT_OFFSET 29938
-#define QM_REG_PQTX2PF_35_RT_OFFSET 29939
-#define QM_REG_PQTX2PF_36_RT_OFFSET 29940
-#define QM_REG_PQTX2PF_37_RT_OFFSET 29941
-#define QM_REG_PQTX2PF_38_RT_OFFSET 29942
-#define QM_REG_PQTX2PF_39_RT_OFFSET 29943
-#define QM_REG_PQTX2PF_40_RT_OFFSET 29944
-#define QM_REG_PQTX2PF_41_RT_OFFSET 29945
-#define QM_REG_PQTX2PF_42_RT_OFFSET 29946
-#define QM_REG_PQTX2PF_43_RT_OFFSET 29947
-#define QM_REG_PQTX2PF_44_RT_OFFSET 29948
-#define QM_REG_PQTX2PF_45_RT_OFFSET 29949
-#define QM_REG_PQTX2PF_46_RT_OFFSET 29950
-#define QM_REG_PQTX2PF_47_RT_OFFSET 29951
-#define QM_REG_PQTX2PF_48_RT_OFFSET 29952
-#define QM_REG_PQTX2PF_49_RT_OFFSET 29953
-#define QM_REG_PQTX2PF_50_RT_OFFSET 29954
-#define QM_REG_PQTX2PF_51_RT_OFFSET 29955
-#define QM_REG_PQTX2PF_52_RT_OFFSET 29956
-#define QM_REG_PQTX2PF_53_RT_OFFSET 29957
-#define QM_REG_PQTX2PF_54_RT_OFFSET 29958
-#define QM_REG_PQTX2PF_55_RT_OFFSET 29959
-#define QM_REG_PQTX2PF_56_RT_OFFSET 29960
-#define QM_REG_PQTX2PF_57_RT_OFFSET 29961
-#define QM_REG_PQTX2PF_58_RT_OFFSET 29962
-#define QM_REG_PQTX2PF_59_RT_OFFSET 29963
-#define QM_REG_PQTX2PF_60_RT_OFFSET 29964
-#define QM_REG_PQTX2PF_61_RT_OFFSET 29965
-#define QM_REG_PQTX2PF_62_RT_OFFSET 29966
-#define QM_REG_PQTX2PF_63_RT_OFFSET 29967
-#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29968
-#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29969
-#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29970
-#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29971
-#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29972
-#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29973
-#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29974
-#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29975
-#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29976
-#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29977
-#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29978
-#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29979
-#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29980
-#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29981
-#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29982
-#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29983
-#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29984
-#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29985
-#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29986
-#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29987
-#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29988
-#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29989
-#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29990
-#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29991
-#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29992
-#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29993
-#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29994
-#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29995
-#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29996
-#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
-#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30252
-#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
-#define QM_REG_RLGLBLCRD_RT_OFFSET 30508
-#define QM_REG_RLGLBLCRD_RT_SIZE 256
-#define QM_REG_RLGLBLENABLE_RT_OFFSET 30764
-#define QM_REG_RLPFPERIOD_RT_OFFSET 30765
-#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30766
-#define QM_REG_RLPFINCVAL_RT_OFFSET 30767
-#define QM_REG_RLPFINCVAL_RT_SIZE 16
-#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30783
-#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_RLPFCRD_RT_OFFSET 30799
-#define QM_REG_RLPFCRD_RT_SIZE 16
-#define QM_REG_RLPFENABLE_RT_OFFSET 30815
-#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30816
-#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30817
-#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
-#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30833
-#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_WFQPFCRD_RT_OFFSET 30849
-#define QM_REG_WFQPFCRD_RT_SIZE 160
-#define QM_REG_WFQPFENABLE_RT_OFFSET 31009
-#define QM_REG_WFQVPENABLE_RT_OFFSET 31010
-#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31011
-#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
-#define QM_REG_TXPQMAP_RT_OFFSET 31523
-#define QM_REG_TXPQMAP_RT_SIZE 512
-#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32035
-#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
-#define QM_REG_WFQVPCRD_RT_OFFSET 32547
-#define QM_REG_WFQVPCRD_RT_SIZE 512
-#define QM_REG_WFQVPMAP_RT_OFFSET 33059
-#define QM_REG_WFQVPMAP_RT_SIZE 512
-#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33571
-#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
-#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33731
-#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33732
-#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33733
-#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33734
-#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33735
-#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33736
-#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33737
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33738
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33742
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33746
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33750
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33751
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33783
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33799
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33815
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33831
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
-#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33847
-#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 33848
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33849
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33850
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33851
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33852
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33853
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33854
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33855
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33856
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33857
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33858
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33859
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33860
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33861
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33862
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33863
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33864
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33865
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33866
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33867
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33868
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33869
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33870
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33871
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33872
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33873
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33874
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33875
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33876
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33877
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33878
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33879
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33880
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33881
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33882
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33883
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33884
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33885
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33886
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33887
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33888
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33889
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33890
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33891
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33892
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33893
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33894
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33895
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33896
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33897
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33898
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33899
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33900
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33901
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33902
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33903
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33904
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33905
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33906
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33907
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33908
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33909
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33910
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33911
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33912
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33913
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33914
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33915
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33916
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33917
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33918
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33919
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33920
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33921
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33922
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33923
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33924
-
-#define RUNTIME_ARRAY_SIZE 33925
+#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET       0
+#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET       1
+#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET       2
+#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET       3
+#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET       4
+#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET       5
+#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET       6
+#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET       7
+#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET       8
+#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET       9
+#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET       10
+#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET       11
+#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET       12
+#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET       13
+#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET       14
+#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET       15
+#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
+#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET      17
+#define IGU_REG_PF_CONFIGURATION_RT_OFFSET     18
+#define IGU_REG_VF_CONFIGURATION_RT_OFFSET     19
+#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET      20
+#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET      21
+#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET   22
+#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET  23
+#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET    24
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET        761
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE  736
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET        761
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE  736
+#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET       1497
+#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
+#define CAU_REG_PI_MEMORY_RT_OFFSET    2233
+#define CAU_REG_PI_MEMORY_RT_SIZE      4416
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET   6649
+#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET     6650
+#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET     6651
+#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET        6652
+#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET        6653
+#define PRS_REG_SEARCH_TCP_RT_OFFSET   6654
+#define PRS_REG_SEARCH_FCOE_RT_OFFSET  6655
+#define PRS_REG_SEARCH_ROCE_RT_OFFSET  6656
+#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET  6657
+#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET  6658
+#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET      6659
+#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET    6660
+#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET  6661
+#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET     6662
+#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET      6663
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET        6664
+#define SRC_REG_FIRSTFREE_RT_OFFSET    6665
+#define SRC_REG_FIRSTFREE_RT_SIZE      2
+#define SRC_REG_LASTFREE_RT_OFFSET     6667
+#define SRC_REG_LASTFREE_RT_SIZE       2
+#define SRC_REG_COUNTFREE_RT_OFFSET    6669
+#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET     6670
+#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET       6671
+#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET       6672
+#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673
+#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674
+#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET        6675
+#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET       6676
+#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET      6677
+#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET       6678
+#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET      6679
+#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET       6680
+#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET     6681
+#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET      6682
+#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET    6683
+#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET     6684
+#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET    6685
+#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET     6686
+#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET    6687
+#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET     6688
+#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET    6689
+#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET  6690
+#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET  6691
+#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET      6692
+#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET    6693
+#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET    6694
+#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET  6695
+#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET        6696
+#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET        6697
+#define PSWRQ2_REG_VF_BASE_RT_OFFSET   6698
+#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET       6699
+#define PSWRQ2_REG_WR_MBS0_RT_OFFSET   6700
+#define PSWRQ2_REG_RD_MBS0_RT_OFFSET   6701
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET     6702
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET     6703
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET        6704
+#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE  22000
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET  28704
+#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET       28705
+#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET  28706
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET  28707
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET     28708
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET     28709
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET     28710
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET        28711
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET        28712
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET        28713
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET    28714
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET    28715
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET       28716
+#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET       29132
+#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET   29644
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET   29645
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET   29646
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET      29647
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET      29648
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET      29649
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET      29650
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET      29651
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET      29652
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET      29653
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET      29654
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET      29655
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET      29656
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET     29657
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET     29658
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET     29659
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET     29660
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET     29661
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET     29662
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET     29663
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET     29664
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET     29665
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET     29666
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET     29667
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET     29668
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET     29669
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET     29670
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET     29671
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET     29672
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET     29673
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET     29674
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET     29675
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET     29676
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET     29677
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET     29678
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET     29679
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET     29680
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET     29681
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET     29682
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET     29683
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET     29684
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET     29685
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET     29686
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET     29687
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET     29688
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET     29689
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET     29690
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET     29691
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET     29692
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET     29693
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET     29694
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET     29695
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET     29696
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET     29697
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET     29698
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET     29699
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET     29700
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET     29701
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET     29702
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET     29703
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET     29704
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET     29705
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET     29706
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET     29707
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET     29708
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET     29709
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET     29710
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET       29711
+#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
+#define QM_REG_VOQCRDLINE_RT_OFFSET    29839
+#define QM_REG_VOQCRDLINE_RT_SIZE      20
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET        29859
+#define QM_REG_VOQINITCRDLINE_RT_SIZE  20
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET    29879
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET    29880
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET     29881
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET   29882
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET  29883
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET       29884
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET       29885
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET       29886
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET       29887
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET       29888
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET       29889
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET       29890
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET       29891
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET       29892
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET       29893
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET      29894
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET      29895
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET      29896
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET      29897
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET      29898
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET      29899
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET   29900
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET   29901
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET   29902
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET   29903
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET      29904
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET      29905
+#define QM_REG_PQTX2PF_0_RT_OFFSET     29906
+#define QM_REG_PQTX2PF_1_RT_OFFSET     29907
+#define QM_REG_PQTX2PF_2_RT_OFFSET     29908
+#define QM_REG_PQTX2PF_3_RT_OFFSET     29909
+#define QM_REG_PQTX2PF_4_RT_OFFSET     29910
+#define QM_REG_PQTX2PF_5_RT_OFFSET     29911
+#define QM_REG_PQTX2PF_6_RT_OFFSET     29912
+#define QM_REG_PQTX2PF_7_RT_OFFSET     29913
+#define QM_REG_PQTX2PF_8_RT_OFFSET     29914
+#define QM_REG_PQTX2PF_9_RT_OFFSET     29915
+#define QM_REG_PQTX2PF_10_RT_OFFSET    29916
+#define QM_REG_PQTX2PF_11_RT_OFFSET    29917
+#define QM_REG_PQTX2PF_12_RT_OFFSET    29918
+#define QM_REG_PQTX2PF_13_RT_OFFSET    29919
+#define QM_REG_PQTX2PF_14_RT_OFFSET    29920
+#define QM_REG_PQTX2PF_15_RT_OFFSET    29921
+#define QM_REG_PQTX2PF_16_RT_OFFSET    29922
+#define QM_REG_PQTX2PF_17_RT_OFFSET    29923
+#define QM_REG_PQTX2PF_18_RT_OFFSET    29924
+#define QM_REG_PQTX2PF_19_RT_OFFSET    29925
+#define QM_REG_PQTX2PF_20_RT_OFFSET    29926
+#define QM_REG_PQTX2PF_21_RT_OFFSET    29927
+#define QM_REG_PQTX2PF_22_RT_OFFSET    29928
+#define QM_REG_PQTX2PF_23_RT_OFFSET    29929
+#define QM_REG_PQTX2PF_24_RT_OFFSET    29930
+#define QM_REG_PQTX2PF_25_RT_OFFSET    29931
+#define QM_REG_PQTX2PF_26_RT_OFFSET    29932
+#define QM_REG_PQTX2PF_27_RT_OFFSET    29933
+#define QM_REG_PQTX2PF_28_RT_OFFSET    29934
+#define QM_REG_PQTX2PF_29_RT_OFFSET    29935
+#define QM_REG_PQTX2PF_30_RT_OFFSET    29936
+#define QM_REG_PQTX2PF_31_RT_OFFSET    29937
+#define QM_REG_PQTX2PF_32_RT_OFFSET    29938
+#define QM_REG_PQTX2PF_33_RT_OFFSET    29939
+#define QM_REG_PQTX2PF_34_RT_OFFSET    29940
+#define QM_REG_PQTX2PF_35_RT_OFFSET    29941
+#define QM_REG_PQTX2PF_36_RT_OFFSET    29942
+#define QM_REG_PQTX2PF_37_RT_OFFSET    29943
+#define QM_REG_PQTX2PF_38_RT_OFFSET    29944
+#define QM_REG_PQTX2PF_39_RT_OFFSET    29945
+#define QM_REG_PQTX2PF_40_RT_OFFSET    29946
+#define QM_REG_PQTX2PF_41_RT_OFFSET    29947
+#define QM_REG_PQTX2PF_42_RT_OFFSET    29948
+#define QM_REG_PQTX2PF_43_RT_OFFSET    29949
+#define QM_REG_PQTX2PF_44_RT_OFFSET    29950
+#define QM_REG_PQTX2PF_45_RT_OFFSET    29951
+#define QM_REG_PQTX2PF_46_RT_OFFSET    29952
+#define QM_REG_PQTX2PF_47_RT_OFFSET    29953
+#define QM_REG_PQTX2PF_48_RT_OFFSET    29954
+#define QM_REG_PQTX2PF_49_RT_OFFSET    29955
+#define QM_REG_PQTX2PF_50_RT_OFFSET    29956
+#define QM_REG_PQTX2PF_51_RT_OFFSET    29957
+#define QM_REG_PQTX2PF_52_RT_OFFSET    29958
+#define QM_REG_PQTX2PF_53_RT_OFFSET    29959
+#define QM_REG_PQTX2PF_54_RT_OFFSET    29960
+#define QM_REG_PQTX2PF_55_RT_OFFSET    29961
+#define QM_REG_PQTX2PF_56_RT_OFFSET    29962
+#define QM_REG_PQTX2PF_57_RT_OFFSET    29963
+#define QM_REG_PQTX2PF_58_RT_OFFSET    29964
+#define QM_REG_PQTX2PF_59_RT_OFFSET    29965
+#define QM_REG_PQTX2PF_60_RT_OFFSET    29966
+#define QM_REG_PQTX2PF_61_RT_OFFSET    29967
+#define QM_REG_PQTX2PF_62_RT_OFFSET    29968
+#define QM_REG_PQTX2PF_63_RT_OFFSET    29969
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET  29970
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET  29971
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET  29972
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET  29973
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET  29974
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET  29975
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET  29976
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET  29977
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET  29978
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET  29979
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29980
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29981
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29982
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29983
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29984
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29985
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET        29986
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET        29987
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET   29988
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET   29989
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET     29990
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET     29991
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET     29992
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET     29993
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET     29994
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET     29995
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET     29996
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET     29997
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET  29998
+#define QM_REG_RLGLBLINCVAL_RT_SIZE    256
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET      30254
+#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE        256
+#define QM_REG_RLGLBLCRD_RT_OFFSET     30510
+#define QM_REG_RLGLBLCRD_RT_SIZE       256
+#define QM_REG_RLGLBLENABLE_RT_OFFSET  30766
+#define QM_REG_RLPFPERIOD_RT_OFFSET    30767
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET       30768
+#define QM_REG_RLPFINCVAL_RT_OFFSET    30769
+#define QM_REG_RLPFINCVAL_RT_SIZE      16
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET        30785
+#define QM_REG_RLPFUPPERBOUND_RT_SIZE  16
+#define QM_REG_RLPFCRD_RT_OFFSET       30801
+#define QM_REG_RLPFCRD_RT_SIZE 16
+#define QM_REG_RLPFENABLE_RT_OFFSET    30817
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30818
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET   30819
+#define QM_REG_WFQPFWEIGHT_RT_SIZE     16
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET       30835
+#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_WFQPFCRD_RT_OFFSET      30851
+#define QM_REG_WFQPFCRD_RT_SIZE        160
+#define QM_REG_WFQPFENABLE_RT_OFFSET   31011
+#define QM_REG_WFQVPENABLE_RT_OFFSET   31012
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET  31013
+#define QM_REG_BASEADDRTXPQ_RT_SIZE    512
+#define QM_REG_TXPQMAP_RT_OFFSET       31525
+#define QM_REG_TXPQMAP_RT_SIZE 512
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET   32037
+#define QM_REG_WFQVPWEIGHT_RT_SIZE     512
+#define QM_REG_WFQVPCRD_RT_OFFSET      32549
+#define QM_REG_WFQVPCRD_RT_SIZE        512
+#define QM_REG_WFQVPMAP_RT_OFFSET      33061
+#define QM_REG_WFQVPMAP_RT_SIZE        512
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET  33573
+#define QM_REG_WFQPFCRD_MSB_RT_SIZE    160
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET      33733
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET        33734
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET        33735
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET        33736
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET        33737
+#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33738
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET     33739
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET      33740
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE        4
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33744
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE   4
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET   33748
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE     4
+#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET      33752
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET        33753
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE  32
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET   33785
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE     16
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33801
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE   16
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET        33817
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE  16
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET      33833
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE        16
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33849
+#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET       33850
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET      33851
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET      33852
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET      33853
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET  33854
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET  33855
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET  33856
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET  33857
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET       33858
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET       33859
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET       33860
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET       33861
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET   33862
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET        33863
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET      33864
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33865
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET       33866
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET  33867
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET   33868
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET       33869
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET  33870
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET   33871
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET       33872
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET  33873
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET   33874
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET       33875
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET  33876
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET   33877
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET       33878
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET  33879
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET   33880
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET       33881
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET  33882
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET   33883
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET       33884
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET  33885
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET   33886
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET       33887
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET  33888
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET   33889
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET       33890
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET  33891
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET   33892
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET       33893
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET  33894
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET   33895
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET      33896
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33897
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET  33898
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET      33899
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33900
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET  33901
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET      33902
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33903
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET  33904
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET      33905
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33906
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET  33907
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET      33908
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33909
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET  33910
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET      33911
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33912
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET  33913
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET      33914
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33915
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET  33916
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET      33917
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33918
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET  33919
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET      33920
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33921
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET  33922
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET      33923
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33924
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET  33925
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET   33926
+
+#define RUNTIME_ARRAY_SIZE 33927
 
 /* The eth storm context for the Tstorm */
 struct tstorm_eth_conn_st_ctx {
@@ -3201,7 +4605,31 @@ struct eth_conn_context {
        struct mstorm_eth_conn_st_ctx mstorm_st_context;
 };
 
-/* opcodes for the event ring */
+enum eth_error_code {
+       ETH_OK = 0x00,
+       ETH_FILTERS_MAC_ADD_FAIL_FULL,
+       ETH_FILTERS_MAC_ADD_FAIL_FULL_MTT2,
+       ETH_FILTERS_MAC_ADD_FAIL_DUP_MTT2,
+       ETH_FILTERS_MAC_ADD_FAIL_DUP_STT2,
+       ETH_FILTERS_MAC_DEL_FAIL_NOF,
+       ETH_FILTERS_MAC_DEL_FAIL_NOF_MTT2,
+       ETH_FILTERS_MAC_DEL_FAIL_NOF_STT2,
+       ETH_FILTERS_MAC_ADD_FAIL_ZERO_MAC,
+       ETH_FILTERS_VLAN_ADD_FAIL_FULL,
+       ETH_FILTERS_VLAN_ADD_FAIL_DUP,
+       ETH_FILTERS_VLAN_DEL_FAIL_NOF,
+       ETH_FILTERS_VLAN_DEL_FAIL_NOF_TT1,
+       ETH_FILTERS_PAIR_ADD_FAIL_DUP,
+       ETH_FILTERS_PAIR_ADD_FAIL_FULL,
+       ETH_FILTERS_PAIR_ADD_FAIL_FULL_MAC,
+       ETH_FILTERS_PAIR_DEL_FAIL_NOF,
+       ETH_FILTERS_PAIR_DEL_FAIL_NOF_TT1,
+       ETH_FILTERS_PAIR_ADD_FAIL_ZERO_MAC,
+       ETH_FILTERS_VNI_ADD_FAIL_FULL,
+       ETH_FILTERS_VNI_ADD_FAIL_DUP,
+       MAX_ETH_ERROR_CODE
+};
+
 enum eth_event_opcode {
        ETH_EVENT_UNUSED,
        ETH_EVENT_VPORT_START,
@@ -3269,7 +4697,19 @@ enum eth_filter_type {
        MAX_ETH_FILTER_TYPE
 };
 
-/* Ethernet Ramrod Command IDs */
+enum eth_ipv4_frag_type {
+       ETH_IPV4_NOT_FRAG,
+       ETH_IPV4_FIRST_FRAG,
+       ETH_IPV4_NON_FIRST_FRAG,
+       MAX_ETH_IPV4_FRAG_TYPE
+};
+
+enum eth_ip_type {
+       ETH_IPV4,
+       ETH_IPV6,
+       MAX_ETH_IP_TYPE
+};
+
 enum eth_ramrod_cmd_id {
        ETH_RAMROD_UNUSED,
        ETH_RAMROD_VPORT_START,
@@ -3427,6 +4867,18 @@ struct eth_vport_tx_mode {
        __le16 reserved2[3];
 };
 
+enum gft_filter_update_action {
+       GFT_ADD_FILTER,
+       GFT_DELETE_FILTER,
+       MAX_GFT_FILTER_UPDATE_ACTION
+};
+
+enum gft_logic_filter_type {
+       GFT_FILTER_TYPE,
+       RFS_FILTER_TYPE,
+       MAX_GFT_LOGIC_FILTER_TYPE
+};
+
 /* Ramrod data for rx queue start ramrod */
 struct rx_queue_start_ramrod_data {
        __le16 rx_queue_id;
@@ -3451,8 +4903,8 @@ struct rx_queue_start_ramrod_data {
        u8 toggle_val;
 
        u8 vf_rx_prod_index;
-
-       u8 reserved[6];
+       u8 vf_rx_prod_use_zone_a;
+       u8 reserved[5];
        __le16 reserved1;
        struct regpair cqe_pbl_addr;
        struct regpair bd_base;
@@ -3497,6 +4949,16 @@ struct rx_udp_filter_data {
        __le32 tenant_id;
 };
 
+struct rx_update_gft_filter_data {
+       struct regpair pkt_hdr_addr;
+       __le16 pkt_hdr_length;
+       __le16 rx_qid_or_action_icid;
+       u8 vport_id;
+       u8 filter_type;
+       u8 filter_action;
+       u8 reserved;
+};
+
 /* Ramrod data for rx queue start ramrod */
 struct tx_queue_start_ramrod_data {
        __le16 sb_id;
@@ -3526,10 +4988,11 @@ struct tx_queue_start_ramrod_data {
        __le16 pxp_st_index;
        __le16 comp_agg_size;
        __le16 queue_zone_id;
-       __le16 test_dup_count;
+       __le16 reserved2;
        __le16 pbl_size;
        __le16 tx_queue_id;
-
+       __le16 same_as_last_id;
+       __le16 reserved[3];
        struct regpair pbl_base_addr;
        struct regpair bd_cons_address;
 };
@@ -3618,7 +5081,10 @@ struct vport_update_ramrod_data_cmn {
        u8 update_mtu_flg;
 
        __le16 mtu;
-       u8 reserved[2];
+       u8 update_ctl_frame_checks_en_flg;
+       u8 ctl_frame_mac_check_en;
+       u8 ctl_frame_ethtype_check_en;
+       u8 reserved[15];
 };
 
 struct vport_update_ramrod_mcast {
@@ -3636,6 +5102,652 @@ struct vport_update_ramrod_data {
        struct eth_vport_rss_config rss_config;
 };
 
+struct gft_cam_line {
+       __le32 camline;
+#define GFT_CAM_LINE_VALID_MASK                0x1
+#define GFT_CAM_LINE_VALID_SHIFT       0
+#define GFT_CAM_LINE_DATA_MASK         0x3FFF
+#define GFT_CAM_LINE_DATA_SHIFT                1
+#define GFT_CAM_LINE_MASK_BITS_MASK    0x3FFF
+#define GFT_CAM_LINE_MASK_BITS_SHIFT   15
+#define GFT_CAM_LINE_RESERVED1_MASK    0x7
+#define GFT_CAM_LINE_RESERVED1_SHIFT   29
+};
+
+struct gft_cam_line_mapped {
+       __le32 camline;
+#define GFT_CAM_LINE_MAPPED_VALID_MASK                         0x1
+#define GFT_CAM_LINE_MAPPED_VALID_SHIFT                                0
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK                    0x1
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_SHIFT                   1
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK             0x1
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_SHIFT            2
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK           0xF
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_SHIFT          3
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK                   0xF
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_SHIFT                  7
+#define GFT_CAM_LINE_MAPPED_PF_ID_MASK                         0xF
+#define GFT_CAM_LINE_MAPPED_PF_ID_SHIFT                                11
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_MASK               0x1
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_SHIFT              15
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_MASK                0x1
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_SHIFT       16
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK      0xF
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_SHIFT     17
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_MASK              0xF
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_SHIFT             21
+#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK                    0xF
+#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_SHIFT                   25
+#define GFT_CAM_LINE_MAPPED_RESERVED1_MASK                     0x7
+#define GFT_CAM_LINE_MAPPED_RESERVED1_SHIFT                    29
+};
+
+union gft_cam_line_union {
+       struct gft_cam_line cam_line;
+       struct gft_cam_line_mapped cam_line_mapped;
+};
+
+enum gft_profile_ip_version {
+       GFT_PROFILE_IPV4 = 0,
+       GFT_PROFILE_IPV6 = 1,
+       MAX_GFT_PROFILE_IP_VERSION
+};
+
+enum gft_profile_upper_protocol_type {
+       GFT_PROFILE_ROCE_PROTOCOL = 0,
+       GFT_PROFILE_RROCE_PROTOCOL = 1,
+       GFT_PROFILE_FCOE_PROTOCOL = 2,
+       GFT_PROFILE_ICMP_PROTOCOL = 3,
+       GFT_PROFILE_ARP_PROTOCOL = 4,
+       GFT_PROFILE_USER_TCP_SRC_PORT_1_INNER = 5,
+       GFT_PROFILE_USER_TCP_DST_PORT_1_INNER = 6,
+       GFT_PROFILE_TCP_PROTOCOL = 7,
+       GFT_PROFILE_USER_UDP_DST_PORT_1_INNER = 8,
+       GFT_PROFILE_USER_UDP_DST_PORT_2_OUTER = 9,
+       GFT_PROFILE_UDP_PROTOCOL = 10,
+       GFT_PROFILE_USER_IP_1_INNER = 11,
+       GFT_PROFILE_USER_IP_2_OUTER = 12,
+       GFT_PROFILE_USER_ETH_1_INNER = 13,
+       GFT_PROFILE_USER_ETH_2_OUTER = 14,
+       GFT_PROFILE_RAW = 15,
+       MAX_GFT_PROFILE_UPPER_PROTOCOL_TYPE
+};
+
+struct gft_ram_line {
+       __le32 low32bits;
+#define GFT_RAM_LINE_VLAN_SELECT_MASK                  0x3
+#define GFT_RAM_LINE_VLAN_SELECT_SHIFT                 0
+#define GFT_RAM_LINE_TUNNEL_ENTROPHY_MASK              0x1
+#define GFT_RAM_LINE_TUNNEL_ENTROPHY_SHIFT             2
+#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_MASK         0x1
+#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_SHIFT                3
+#define GFT_RAM_LINE_TUNNEL_TTL_MASK                   0x1
+#define GFT_RAM_LINE_TUNNEL_TTL_SHIFT                  4
+#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_MASK             0x1
+#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_SHIFT            5
+#define GFT_RAM_LINE_TUNNEL_DST_PORT_MASK              0x1
+#define GFT_RAM_LINE_TUNNEL_DST_PORT_SHIFT             6
+#define GFT_RAM_LINE_TUNNEL_SRC_PORT_MASK              0x1
+#define GFT_RAM_LINE_TUNNEL_SRC_PORT_SHIFT             7
+#define GFT_RAM_LINE_TUNNEL_DSCP_MASK                  0x1
+#define GFT_RAM_LINE_TUNNEL_DSCP_SHIFT                 8
+#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_MASK      0x1
+#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_SHIFT     9
+#define GFT_RAM_LINE_TUNNEL_DST_IP_MASK                        0x1
+#define GFT_RAM_LINE_TUNNEL_DST_IP_SHIFT               10
+#define GFT_RAM_LINE_TUNNEL_SRC_IP_MASK                        0x1
+#define GFT_RAM_LINE_TUNNEL_SRC_IP_SHIFT               11
+#define GFT_RAM_LINE_TUNNEL_PRIORITY_MASK              0x1
+#define GFT_RAM_LINE_TUNNEL_PRIORITY_SHIFT             12
+#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_MASK         0x1
+#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_SHIFT                13
+#define GFT_RAM_LINE_TUNNEL_VLAN_MASK                  0x1
+#define GFT_RAM_LINE_TUNNEL_VLAN_SHIFT                 14
+#define GFT_RAM_LINE_TUNNEL_DST_MAC_MASK               0x1
+#define GFT_RAM_LINE_TUNNEL_DST_MAC_SHIFT              15
+#define GFT_RAM_LINE_TUNNEL_SRC_MAC_MASK               0x1
+#define GFT_RAM_LINE_TUNNEL_SRC_MAC_SHIFT              16
+#define GFT_RAM_LINE_TTL_EQUAL_ONE_MASK                        0x1
+#define GFT_RAM_LINE_TTL_EQUAL_ONE_SHIFT               17
+#define GFT_RAM_LINE_TTL_MASK                          0x1
+#define GFT_RAM_LINE_TTL_SHIFT                         18
+#define GFT_RAM_LINE_ETHERTYPE_MASK                    0x1
+#define GFT_RAM_LINE_ETHERTYPE_SHIFT                   19
+#define GFT_RAM_LINE_RESERVED0_MASK                    0x1
+#define GFT_RAM_LINE_RESERVED0_SHIFT                   20
+#define GFT_RAM_LINE_TCP_FLAG_FIN_MASK                 0x1
+#define GFT_RAM_LINE_TCP_FLAG_FIN_SHIFT                        21
+#define GFT_RAM_LINE_TCP_FLAG_SYN_MASK                 0x1
+#define GFT_RAM_LINE_TCP_FLAG_SYN_SHIFT                        22
+#define GFT_RAM_LINE_TCP_FLAG_RST_MASK                 0x1
+#define GFT_RAM_LINE_TCP_FLAG_RST_SHIFT                        23
+#define GFT_RAM_LINE_TCP_FLAG_PSH_MASK                 0x1
+#define GFT_RAM_LINE_TCP_FLAG_PSH_SHIFT                        24
+#define GFT_RAM_LINE_TCP_FLAG_ACK_MASK                 0x1
+#define GFT_RAM_LINE_TCP_FLAG_ACK_SHIFT                        25
+#define GFT_RAM_LINE_TCP_FLAG_URG_MASK                 0x1
+#define GFT_RAM_LINE_TCP_FLAG_URG_SHIFT                        26
+#define GFT_RAM_LINE_TCP_FLAG_ECE_MASK                 0x1
+#define GFT_RAM_LINE_TCP_FLAG_ECE_SHIFT                        27
+#define GFT_RAM_LINE_TCP_FLAG_CWR_MASK                 0x1
+#define GFT_RAM_LINE_TCP_FLAG_CWR_SHIFT                        28
+#define GFT_RAM_LINE_TCP_FLAG_NS_MASK                  0x1
+#define GFT_RAM_LINE_TCP_FLAG_NS_SHIFT                 29
+#define GFT_RAM_LINE_DST_PORT_MASK                     0x1
+#define GFT_RAM_LINE_DST_PORT_SHIFT                    30
+#define GFT_RAM_LINE_SRC_PORT_MASK                     0x1
+#define GFT_RAM_LINE_SRC_PORT_SHIFT                    31
+       __le32 high32bits;
+#define GFT_RAM_LINE_DSCP_MASK                         0x1
+#define GFT_RAM_LINE_DSCP_SHIFT                                0
+#define GFT_RAM_LINE_OVER_IP_PROTOCOL_MASK             0x1
+#define GFT_RAM_LINE_OVER_IP_PROTOCOL_SHIFT            1
+#define GFT_RAM_LINE_DST_IP_MASK                       0x1
+#define GFT_RAM_LINE_DST_IP_SHIFT                      2
+#define GFT_RAM_LINE_SRC_IP_MASK                       0x1
+#define GFT_RAM_LINE_SRC_IP_SHIFT                      3
+#define GFT_RAM_LINE_PRIORITY_MASK                     0x1
+#define GFT_RAM_LINE_PRIORITY_SHIFT                    4
+#define GFT_RAM_LINE_PROVIDER_VLAN_MASK                        0x1
+#define GFT_RAM_LINE_PROVIDER_VLAN_SHIFT               5
+#define GFT_RAM_LINE_VLAN_MASK                         0x1
+#define GFT_RAM_LINE_VLAN_SHIFT                                6
+#define GFT_RAM_LINE_DST_MAC_MASK                      0x1
+#define GFT_RAM_LINE_DST_MAC_SHIFT                     7
+#define GFT_RAM_LINE_SRC_MAC_MASK                      0x1
+#define GFT_RAM_LINE_SRC_MAC_SHIFT                     8
+#define GFT_RAM_LINE_TENANT_ID_MASK                    0x1
+#define GFT_RAM_LINE_TENANT_ID_SHIFT                   9
+#define GFT_RAM_LINE_RESERVED1_MASK                    0x3FFFFF
+#define GFT_RAM_LINE_RESERVED1_SHIFT                   10
+};
+
+struct mstorm_eth_conn_ag_ctx {
+       u8 byte0;
+       u8 byte1;
+       u8 flags0;
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK       0x1
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK       0x1
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT         1
+#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK        0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT          2
+#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK        0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT          4
+#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK        0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT          6
+       u8 flags1;
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK      0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT        0
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK      0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT        1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK      0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT        2
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK    0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT      3
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK    0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT      4
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK    0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT      5
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK    0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT      6
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK    0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT      7
+       __le16 word0;
+       __le16 word1;
+       __le32 reg0;
+       __le32 reg1;
+};
+
+struct xstorm_eth_conn_agctxdq_ext_ldpart {
+       u8 reserved0;
+       u8 eth_state;
+       u8 flags0;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK        0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT           0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT              1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT              2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK        0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT           3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT              4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT              5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT              6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT              7
+       u8 flags1;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT              0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT              1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT              2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK       0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT                  3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_MASK       0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_SHIFT                  4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_MASK       0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_SHIFT                  5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK      0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT         6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK        0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT           7
+       u8 flags2;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT                    0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT                    2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT                    4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT                    6
+       u8 flags3;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT                    0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT                    2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT                    4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT                    6
+       u8 flags4;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT                    0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT                    2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK        0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT                   4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK        0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT                   6
+       u8 flags5;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK        0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT                   0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK        0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT                   2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK        0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT                   4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK        0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT                   6
+       u8 flags6;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK    0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT       0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK    0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT       2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK       0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT                  4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK        0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT           6
+       u8 flags7;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK    0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT               0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK  0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT             2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK   0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT              4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK       0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT                  6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK       0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT                  7
+       u8 flags8;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK       0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT                  0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK       0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT                  1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK       0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT                  2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK       0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT                  3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK       0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT                  4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK       0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT                  5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK       0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT                  6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK       0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT                  7
+       u8 flags9;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK      0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT                 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK      0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT                 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK      0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT                 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK      0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT                 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK      0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT                 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK      0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT                 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT    6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT    7
+       u8 flags10;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK    0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT               0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK     0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT        1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT            2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK  0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT             3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK        0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT           4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK      0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK  0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT             6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK  0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT             7
+       u8 flags11;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK  0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT             0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK  0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT             1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK      0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT         2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK     0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT                3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK     0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT                4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK     0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT                5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK        0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT           6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK     0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT                7
+       u8 flags12;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK    0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT               0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK    0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT               1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK        0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT           2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK        0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT           3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK    0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT               4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK    0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT               5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK    0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT               6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK    0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT               7
+       u8 flags13;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK    0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT               0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK    0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT               1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK        0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT           2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK        0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT           3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK        0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT           4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK        0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT           5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK        0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT           6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK        0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT           7
+       u8 flags14;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK    0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT       0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK  0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT     1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK        0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT   2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK        0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT   3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK      0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT         4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK    0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT       5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK  0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT             6
+       u8 edpm_event_id;
+       __le16 physical_q0;
+       __le16 quota;
+       __le16 edpm_num_bds;
+       __le16 tx_bd_cons;
+       __le16 tx_bd_prod;
+       __le16 tx_class;
+       __le16 conn_dpi;
+       u8 byte3;
+       u8 byte4;
+       u8 byte5;
+       u8 byte6;
+       __le32 reg0;
+       __le32 reg1;
+       __le32 reg2;
+       __le32 reg3;
+       __le32 reg4;
+};
+
+struct xstorm_eth_hw_conn_ag_ctx {
+       u8 reserved0;
+       u8 eth_state;
+       u8 flags0;
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK    0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT           0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK       0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT              1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK       0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT              2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK    0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT           3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK       0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT              4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK       0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT              5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK       0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT              6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK       0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT              7
+       u8 flags1;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK       0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT              0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK       0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT              1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK       0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT              2
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT                  3
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT                  4
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT                  5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT         6
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK    0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT           7
+       u8 flags2;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT                    0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT                    2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT                    4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT                    6
+       u8 flags3;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT                    0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT                    2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT                    4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT                    6
+       u8 flags4;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT                    0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT                    2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK    0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT                   4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK    0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT                   6
+       u8 flags5;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK    0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT                   0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK    0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT                   2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK    0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT                   4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK    0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT                   6
+       u8 flags6;
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK        0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT       0
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK        0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT       2
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK   0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT                  4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK    0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT           6
+       u8 flags7;
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK        0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT               0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK      0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT             2
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK       0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT              4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT                  6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT                  7
+       u8 flags8;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT                  0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT                  1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT                  2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT                  3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT                  4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT                  5
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT                  6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT                  7
+       u8 flags9;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT                 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT                 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT                 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT                 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT                 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT                 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK     0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT    6
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK     0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT    7
+       u8 flags10;
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK        0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT               0
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT        1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK     0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT            2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK      0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT             3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK    0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT           4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK      0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT             6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK      0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT             7
+       u8 flags11;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK      0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT             0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK      0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT             1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT         2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT                3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT                4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT                5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK    0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT           6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT                7
+       u8 flags12;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK        0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT               0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK        0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT               1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK    0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT           2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK    0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT           3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK        0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT               4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK        0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT               5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK        0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT               6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK        0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT               7
+       u8 flags13;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK        0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT               0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK        0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT               1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK    0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT           2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK    0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT           3
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK    0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT           4
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK    0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT           5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK    0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT           6
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK    0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT           7
+       u8 flags14;
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK        0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT       0
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK      0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT     1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK    0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT   2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK    0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT   3
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT         4
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK        0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT       5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK      0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT             6
+       u8 edpm_event_id;
+       __le16 physical_q0;
+       __le16 quota;
+       __le16 edpm_num_bds;
+       __le16 tx_bd_cons;
+       __le16 tx_bd_prod;
+       __le16 tx_class;
+       __le16 conn_dpi;
+};
+
 struct mstorm_rdma_task_st_ctx {
        struct regpair temp[4];
 };
@@ -4839,7 +6951,7 @@ struct ystorm_roce_conn_st_ctx {
 };
 
 struct xstorm_roce_conn_st_ctx {
-       struct regpair temp[22];
+       struct regpair temp[24];
 };
 
 struct tstorm_roce_conn_st_ctx {
@@ -4894,7 +7006,7 @@ struct roce_create_qp_req_ramrod_data {
        __le16 mtu;
        __le16 pd;
        __le16 sq_num_pages;
-       __le16 reseved2;
+       __le16 low_latency_phy_queue;
        struct regpair sq_pbl_addr;
        struct regpair orq_pbl_addr;
        __le16 local_mac_addr[3];
@@ -4908,7 +7020,7 @@ struct roce_create_qp_req_ramrod_data {
        u8 stats_counter_id;
        u8 reserved3[7];
        __le32 cq_cid;
-       __le16 physical_queue0;
+       __le16 regular_latency_phy_queue;
        __le16 dpi;
 };
 
@@ -4926,8 +7038,8 @@ struct roce_create_qp_resp_ramrod_data {
 #define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_SHIFT             5
 #define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_MASK  0x1
 #define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_SHIFT 6
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED0_MASK            0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED0_SHIFT           7
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_MASK   0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_SHIFT  7
 #define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_MASK                  0x7
 #define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_SHIFT                 8
 #define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK    0x1F
@@ -4956,15 +7068,16 @@ struct roce_create_qp_resp_ramrod_data {
        __le32 dst_gid[4];
        struct regpair qp_handle_for_cqe;
        struct regpair qp_handle_for_async;
-       __le32 reserved2[2];
+       __le16 low_latency_phy_queue;
+       u8 reserved2[6];
        __le32 cq_cid;
-       __le16 physical_queue0;
+       __le16 regular_latency_phy_queue;
        __le16 dpi;
 };
 
 struct roce_destroy_qp_req_output_params {
        __le32 num_bound_mw;
-       __le32 reserved;
+       __le32 cq_prod;
 };
 
 struct roce_destroy_qp_req_ramrod_data {
@@ -4973,7 +7086,7 @@ struct roce_destroy_qp_req_ramrod_data {
 
 struct roce_destroy_qp_resp_output_params {
        __le32 num_invalidated_mw;
-       __le32 reserved;
+       __le32 cq_prod;
 };
 
 struct roce_destroy_qp_resp_ramrod_data {
@@ -4988,6 +7101,10 @@ enum roce_event_opcode {
        MAX_ROCE_EVENT_OPCODE
 };
 
+struct roce_init_func_ramrod_data {
+       struct rdma_init_func_ramrod_data rdma;
+};
+
 struct roce_modify_qp_req_ramrod_data {
        __le16 flags;
 #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK      0x1
@@ -6083,6 +8200,769 @@ struct ystorm_roce_resp_conn_ag_ctx {
        __le32 reg3;
 };
 
+struct ystorm_fcoe_conn_st_ctx {
+       u8 func_mode;
+       u8 cos;
+       u8 conf_version;
+       u8 eth_hdr_size;
+       __le16 stat_ram_addr;
+       __le16 mtu;
+       __le16 max_fc_payload_len;
+       __le16 tx_max_fc_pay_len;
+       u8 fcp_cmd_size;
+       u8 fcp_rsp_size;
+       __le16 mss;
+       struct regpair reserved;
+       __le16 min_frame_size;
+       u8 protection_info_flags;
+#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK  0x1
+#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 0
+#define YSTORM_FCOE_CONN_ST_CTX_VALID_MASK               0x1
+#define YSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT              1
+#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_MASK           0x3F
+#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_SHIFT          2
+       u8 dst_protection_per_mss;
+       u8 src_protection_per_mss;
+       u8 ptu_log_page_size;
+       u8 flags;
+#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK     0x1
+#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT    0
+#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK     0x1
+#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT    1
+#define YSTORM_FCOE_CONN_ST_CTX_RSRV_MASK                0x3F
+#define YSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT               2
+       u8 fcp_xfer_size;
+};
+
+struct fcoe_vlan_fields {
+       __le16 fields;
+#define FCOE_VLAN_FIELDS_VID_MASK  0xFFF
+#define FCOE_VLAN_FIELDS_VID_SHIFT 0
+#define FCOE_VLAN_FIELDS_CLI_MASK  0x1
+#define FCOE_VLAN_FIELDS_CLI_SHIFT 12
+#define FCOE_VLAN_FIELDS_PRI_MASK  0x7
+#define FCOE_VLAN_FIELDS_PRI_SHIFT 13
+};
+
+union fcoe_vlan_field_union {
+       struct fcoe_vlan_fields fields;
+       __le16 val;
+};
+
+union fcoe_vlan_vif_field_union {
+       union fcoe_vlan_field_union vlan;
+       __le16 vif;
+};
+
+struct pstorm_fcoe_eth_context_section {
+       u8 remote_addr_3;
+       u8 remote_addr_2;
+       u8 remote_addr_1;
+       u8 remote_addr_0;
+       u8 local_addr_1;
+       u8 local_addr_0;
+       u8 remote_addr_5;
+       u8 remote_addr_4;
+       u8 local_addr_5;
+       u8 local_addr_4;
+       u8 local_addr_3;
+       u8 local_addr_2;
+       union fcoe_vlan_vif_field_union vif_outer_vlan;
+       __le16 vif_outer_eth_type;
+       union fcoe_vlan_vif_field_union inner_vlan;
+       __le16 inner_eth_type;
+};
+
+struct pstorm_fcoe_conn_st_ctx {
+       u8 func_mode;
+       u8 cos;
+       u8 conf_version;
+       u8 rsrv;
+       __le16 stat_ram_addr;
+       __le16 mss;
+       struct regpair abts_cleanup_addr;
+       struct pstorm_fcoe_eth_context_section eth;
+       u8 sid_2;
+       u8 sid_1;
+       u8 sid_0;
+       u8 flags;
+#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_MASK          0x1
+#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_SHIFT         0
+#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_MASK  0x1
+#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_SHIFT 1
+#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK     0x1
+#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT    2
+#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK     0x1
+#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT    3
+#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK            0xF
+#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT           4
+       u8 did_2;
+       u8 did_1;
+       u8 did_0;
+       u8 src_mac_index;
+       __le16 rec_rr_tov_val;
+       u8 q_relative_offset;
+       u8 reserved1;
+};
+
+struct xstorm_fcoe_conn_st_ctx {
+       u8 func_mode;
+       u8 src_mac_index;
+       u8 conf_version;
+       u8 cached_wqes_avail;
+       __le16 stat_ram_addr;
+       u8 flags;
+#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_MASK             0x1
+#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_SHIFT            0
+#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK         0x1
+#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT        1
+#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_MASK    0x1
+#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_SHIFT   2
+#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_MASK      0x3
+#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_SHIFT     3
+#define XSTORM_FCOE_CONN_ST_CTX_RSRV_MASK                    0x7
+#define XSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT                   5
+       u8 cached_wqes_offset;
+       u8 reserved2;
+       u8 eth_hdr_size;
+       u8 seq_id;
+       u8 max_conc_seqs;
+       __le16 num_pages_in_pbl;
+       __le16 reserved;
+       struct regpair sq_pbl_addr;
+       struct regpair sq_curr_page_addr;
+       struct regpair sq_next_page_addr;
+       struct regpair xferq_pbl_addr;
+       struct regpair xferq_curr_page_addr;
+       struct regpair xferq_next_page_addr;
+       struct regpair respq_pbl_addr;
+       struct regpair respq_curr_page_addr;
+       struct regpair respq_next_page_addr;
+       __le16 mtu;
+       __le16 tx_max_fc_pay_len;
+       __le16 max_fc_payload_len;
+       __le16 min_frame_size;
+       __le16 sq_pbl_next_index;
+       __le16 respq_pbl_next_index;
+       u8 fcp_cmd_byte_credit;
+       u8 fcp_rsp_byte_credit;
+       __le16 protection_info;
+#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_MASK         0x1
+#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_SHIFT        0
+#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK      0x1
+#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT     1
+#define XSTORM_FCOE_CONN_ST_CTX_VALID_MASK                   0x1
+#define XSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT                  2
+#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_MASK      0x1
+#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_SHIFT     3
+#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_MASK               0xF
+#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_SHIFT              4
+#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_MASK  0xFF
+#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_SHIFT 8
+       __le16 xferq_pbl_next_index;
+       __le16 page_size;
+       u8 mid_seq;
+       u8 fcp_xfer_byte_credit;
+       u8 reserved1[2];
+       struct fcoe_wqe cached_wqes[16];
+};
+
+struct xstorm_fcoe_conn_ag_ctx {
+       u8 reserved0;
+       u8 fcoe_state;
+       u8 flags0;
+#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK       0x1
+#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT      0
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_MASK          0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_SHIFT         1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_MASK          0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_SHIFT         2
+#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_MASK       0x1
+#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT      3
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_MASK          0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_SHIFT         4
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_MASK          0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_SHIFT         5
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_MASK          0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_SHIFT         6
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_MASK          0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_SHIFT         7
+       u8 flags1;
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_MASK          0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_SHIFT         0
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_MASK          0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_SHIFT         1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_MASK          0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_SHIFT         2
+#define XSTORM_FCOE_CONN_AG_CTX_BIT11_MASK              0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT11_SHIFT             3
+#define XSTORM_FCOE_CONN_AG_CTX_BIT12_MASK              0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT12_SHIFT             4
+#define XSTORM_FCOE_CONN_AG_CTX_BIT13_MASK              0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT13_SHIFT             5
+#define XSTORM_FCOE_CONN_AG_CTX_BIT14_MASK              0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT14_SHIFT             6
+#define XSTORM_FCOE_CONN_AG_CTX_BIT15_MASK              0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT15_SHIFT             7
+       u8 flags2;
+#define XSTORM_FCOE_CONN_AG_CTX_CF0_MASK                0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT               0
+#define XSTORM_FCOE_CONN_AG_CTX_CF1_MASK                0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT               2
+#define XSTORM_FCOE_CONN_AG_CTX_CF2_MASK                0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT               4
+#define XSTORM_FCOE_CONN_AG_CTX_CF3_MASK                0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF3_SHIFT               6
+       u8 flags3;
+#define XSTORM_FCOE_CONN_AG_CTX_CF4_MASK                0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT               0
+#define XSTORM_FCOE_CONN_AG_CTX_CF5_MASK                0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT               2
+#define XSTORM_FCOE_CONN_AG_CTX_CF6_MASK                0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT               4
+#define XSTORM_FCOE_CONN_AG_CTX_CF7_MASK                0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT               6
+       u8 flags4;
+#define XSTORM_FCOE_CONN_AG_CTX_CF8_MASK                0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT               0
+#define XSTORM_FCOE_CONN_AG_CTX_CF9_MASK                0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT               2
+#define XSTORM_FCOE_CONN_AG_CTX_CF10_MASK               0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT              4
+#define XSTORM_FCOE_CONN_AG_CTX_CF11_MASK               0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF11_SHIFT              6
+       u8 flags5;
+#define XSTORM_FCOE_CONN_AG_CTX_CF12_MASK               0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF12_SHIFT              0
+#define XSTORM_FCOE_CONN_AG_CTX_CF13_MASK               0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF13_SHIFT              2
+#define XSTORM_FCOE_CONN_AG_CTX_CF14_MASK               0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF14_SHIFT              4
+#define XSTORM_FCOE_CONN_AG_CTX_CF15_MASK               0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF15_SHIFT              6
+       u8 flags6;
+#define XSTORM_FCOE_CONN_AG_CTX_CF16_MASK               0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF16_SHIFT              0
+#define XSTORM_FCOE_CONN_AG_CTX_CF17_MASK               0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF17_SHIFT              2
+#define XSTORM_FCOE_CONN_AG_CTX_CF18_MASK               0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF18_SHIFT              4
+#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_MASK              0x3
+#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_SHIFT             6
+       u8 flags7;
+#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_MASK           0x3
+#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_SHIFT          0
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_MASK         0x3
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_SHIFT        2
+#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_MASK          0x3
+#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_SHIFT         4
+#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK              0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT             6
+#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK              0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT             7
+       u8 flags8;
+#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK              0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT             0
+#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_MASK              0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT             1
+#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK              0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT             2
+#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK              0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT             3
+#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK              0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT             4
+#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK              0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT             5
+#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK              0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT             6
+#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK              0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT             7
+       u8 flags9;
+#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK             0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT            0
+#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_MASK             0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_SHIFT            1
+#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_MASK             0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_SHIFT            2
+#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_MASK             0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_SHIFT            3
+#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_MASK             0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_SHIFT            4
+#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_MASK             0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_SHIFT            5
+#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_MASK             0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_SHIFT            6
+#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_MASK             0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_SHIFT            7
+       u8 flags10;
+#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_MASK             0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_SHIFT            0
+#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_MASK           0x1
+#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_SHIFT          1
+#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK        0x1
+#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT       2
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_MASK         0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_SHIFT        3
+#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_MASK       0x1
+#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT      4
+#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_MASK             0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_SHIFT            5
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_MASK         0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_SHIFT        6
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_MASK         0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_SHIFT        7
+       u8 flags11;
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_MASK         0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_SHIFT        0
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_MASK         0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_SHIFT        1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_MASK         0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_SHIFT        2
+#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK            0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT           3
+#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK            0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT           4
+#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK            0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT           5
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_MASK       0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_SHIFT      6
+#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_MASK  0x1
+#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_SHIFT 7
+       u8 flags12;
+#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_MASK     0x1
+#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_SHIFT    0
+#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_MASK           0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_SHIFT          1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_MASK       0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_SHIFT      2
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_MASK       0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_SHIFT      3
+#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_MASK           0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_SHIFT          4
+#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_MASK           0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_SHIFT          5
+#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_MASK           0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_SHIFT          6
+#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_MASK           0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_SHIFT          7
+       u8 flags13;
+#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_MASK  0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_MASK           0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_SHIFT          1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_MASK       0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_SHIFT      2
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_MASK       0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_SHIFT      3
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_MASK       0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_SHIFT      4
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_MASK       0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_SHIFT      5
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_MASK       0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_SHIFT      6
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_MASK       0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_SHIFT      7
+       u8 flags14;
+#define XSTORM_FCOE_CONN_AG_CTX_BIT16_MASK              0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT16_SHIFT             0
+#define XSTORM_FCOE_CONN_AG_CTX_BIT17_MASK              0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT17_SHIFT             1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT18_MASK              0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT18_SHIFT             2
+#define XSTORM_FCOE_CONN_AG_CTX_BIT19_MASK              0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT19_SHIFT             3
+#define XSTORM_FCOE_CONN_AG_CTX_BIT20_MASK              0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT20_SHIFT             4
+#define XSTORM_FCOE_CONN_AG_CTX_BIT21_MASK              0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT21_SHIFT             5
+#define XSTORM_FCOE_CONN_AG_CTX_CF23_MASK               0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF23_SHIFT              6
+       u8 byte2;
+       __le16 physical_q0;
+       __le16 word1;
+       __le16 word2;
+       __le16 sq_cons;
+       __le16 sq_prod;
+       __le16 xferq_prod;
+       __le16 xferq_cons;
+       u8 byte3;
+       u8 byte4;
+       u8 byte5;
+       u8 byte6;
+       __le32 remain_io;
+       __le32 reg1;
+       __le32 reg2;
+       __le32 reg3;
+       __le32 reg4;
+       __le32 reg5;
+       __le32 reg6;
+       __le16 respq_prod;
+       __le16 respq_cons;
+       __le16 word9;
+       __le16 word10;
+       __le32 reg7;
+       __le32 reg8;
+};
+
+struct ustorm_fcoe_conn_st_ctx {
+       struct regpair respq_pbl_addr;
+       __le16 num_pages_in_pbl;
+       u8 ptu_log_page_size;
+       u8 log_page_size;
+       __le16 respq_prod;
+       u8 reserved[2];
+};
+
+struct tstorm_fcoe_conn_ag_ctx {
+       u8 reserved0;
+       u8 fcoe_state;
+       u8 flags0;
+#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK          0x1
+#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT         0
+#define TSTORM_FCOE_CONN_AG_CTX_BIT1_MASK                  0x1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT                 1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT2_MASK                  0x1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT2_SHIFT                 2
+#define TSTORM_FCOE_CONN_AG_CTX_BIT3_MASK                  0x1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT3_SHIFT                 3
+#define TSTORM_FCOE_CONN_AG_CTX_BIT4_MASK                  0x1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT4_SHIFT                 4
+#define TSTORM_FCOE_CONN_AG_CTX_BIT5_MASK                  0x1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT5_SHIFT                 5
+#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_MASK        0x3
+#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_SHIFT       6
+       u8 flags1;
+#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_MASK           0x3
+#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT          0
+#define TSTORM_FCOE_CONN_AG_CTX_CF2_MASK                   0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT                  2
+#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK     0x3
+#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT    4
+#define TSTORM_FCOE_CONN_AG_CTX_CF4_MASK                   0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT                  6
+       u8 flags2;
+#define TSTORM_FCOE_CONN_AG_CTX_CF5_MASK                   0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT                  0
+#define TSTORM_FCOE_CONN_AG_CTX_CF6_MASK                   0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT                  2
+#define TSTORM_FCOE_CONN_AG_CTX_CF7_MASK                   0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT                  4
+#define TSTORM_FCOE_CONN_AG_CTX_CF8_MASK                   0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT                  6
+       u8 flags3;
+#define TSTORM_FCOE_CONN_AG_CTX_CF9_MASK                   0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT                  0
+#define TSTORM_FCOE_CONN_AG_CTX_CF10_MASK                  0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT                 2
+#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_MASK     0x1
+#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_SHIFT    4
+#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK        0x1
+#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT       5
+#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK                 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT                6
+#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK  0x1
+#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+       u8 flags4;
+#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK                 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT                0
+#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK                 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT                1
+#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK                 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT                2
+#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK                 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT                3
+#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK                 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT                4
+#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK                 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT                5
+#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK                0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT               6
+#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK               0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT              7
+       u8 flags5;
+#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK               0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT              0
+#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK               0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT              1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK               0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT              2
+#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK               0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT              3
+#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK               0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT              4
+#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK               0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT              5
+#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK               0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT              6
+#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK               0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT              7
+       __le32 reg0;
+       __le32 reg1;
+};
+
+struct ustorm_fcoe_conn_ag_ctx {
+       u8 byte0;
+       u8 byte1;
+       u8 flags0;
+#define USTORM_FCOE_CONN_AG_CTX_BIT0_MASK     0x1
+#define USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT    0
+#define USTORM_FCOE_CONN_AG_CTX_BIT1_MASK     0x1
+#define USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT    1
+#define USTORM_FCOE_CONN_AG_CTX_CF0_MASK      0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT     2
+#define USTORM_FCOE_CONN_AG_CTX_CF1_MASK      0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT     4
+#define USTORM_FCOE_CONN_AG_CTX_CF2_MASK      0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT     6
+       u8 flags1;
+#define USTORM_FCOE_CONN_AG_CTX_CF3_MASK      0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT     0
+#define USTORM_FCOE_CONN_AG_CTX_CF4_MASK      0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT     2
+#define USTORM_FCOE_CONN_AG_CTX_CF5_MASK      0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT     4
+#define USTORM_FCOE_CONN_AG_CTX_CF6_MASK      0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT     6
+       u8 flags2;
+#define USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK    0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT   0
+#define USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK    0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT   1
+#define USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK    0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT   2
+#define USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK    0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT   3
+#define USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK    0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT   4
+#define USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK    0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT   5
+#define USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK    0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT   6
+#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK  0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
+       u8 flags3;
+#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK  0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK  0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK  0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK  0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK  0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK  0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK  0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK  0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
+       u8 byte2;
+       u8 byte3;
+       __le16 word0;
+       __le16 word1;
+       __le32 reg0;
+       __le32 reg1;
+       __le32 reg2;
+       __le32 reg3;
+       __le16 word2;
+       __le16 word3;
+};
+
+struct tstorm_fcoe_conn_st_ctx {
+       __le16 stat_ram_addr;
+       __le16 rx_max_fc_payload_len;
+       __le16 e_d_tov_val;
+       u8 flags;
+#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_MASK   0x1
+#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_SHIFT  0
+#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_MASK  0x1
+#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_SHIFT 1
+#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_MASK     0x3F
+#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_SHIFT    2
+       u8 timers_cleanup_invocation_cnt;
+       __le32 reserved1[2];
+       __le32 dst_mac_address_bytes0to3;
+       __le16 dst_mac_address_bytes4to5;
+       __le16 ramrod_echo;
+       u8 flags1;
+#define TSTORM_FCOE_CONN_ST_CTX_MODE_MASK          0x3
+#define TSTORM_FCOE_CONN_ST_CTX_MODE_SHIFT         0
+#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK      0x3F
+#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT     2
+       u8 q_relative_offset;
+       u8 bdq_resource_id;
+       u8 reserved0[5];
+};
+
+struct mstorm_fcoe_conn_ag_ctx {
+       u8 byte0;
+       u8 byte1;
+       u8 flags0;
+#define MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK     0x1
+#define MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT    0
+#define MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK     0x1
+#define MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT    1
+#define MSTORM_FCOE_CONN_AG_CTX_CF0_MASK      0x3
+#define MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT     2
+#define MSTORM_FCOE_CONN_AG_CTX_CF1_MASK      0x3
+#define MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT     4
+#define MSTORM_FCOE_CONN_AG_CTX_CF2_MASK      0x3
+#define MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT     6
+       u8 flags1;
+#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK    0x1
+#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT   0
+#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK    0x1
+#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT   1
+#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK    0x1
+#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT   2
+#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK  0x1
+#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK  0x1
+#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK  0x1
+#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK  0x1
+#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK  0x1
+#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
+       __le16 word0;
+       __le16 word1;
+       __le32 reg0;
+       __le32 reg1;
+};
+
+struct fcoe_mstorm_fcoe_conn_st_ctx_fp {
+       __le16 xfer_prod;
+       __le16 reserved1;
+       u8 protection_info;
+#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_SUPPORT_PROTECTION_MASK  0x1
+#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_SUPPORT_PROTECTION_SHIFT 0
+#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_VALID_MASK               0x1
+#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_VALID_SHIFT              1
+#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_RESERVED0_MASK           0x3F
+#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_RESERVED0_SHIFT          2
+       u8 q_relative_offset;
+       u8 reserved2[2];
+};
+
+struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp {
+       __le16 conn_id;
+       __le16 stat_ram_addr;
+       __le16 num_pages_in_pbl;
+       u8 ptu_log_page_size;
+       u8 log_page_size;
+       __le16 unsolicited_cq_count;
+       __le16 cmdq_count;
+       u8 bdq_resource_id;
+       u8 reserved0[3];
+       struct regpair xferq_pbl_addr;
+       struct regpair reserved1;
+       struct regpair reserved2[3];
+};
+
+struct mstorm_fcoe_conn_st_ctx {
+       struct fcoe_mstorm_fcoe_conn_st_ctx_fp fp;
+       struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp non_fp;
+};
+
+struct fcoe_conn_context {
+       struct ystorm_fcoe_conn_st_ctx ystorm_st_context;
+       struct pstorm_fcoe_conn_st_ctx pstorm_st_context;
+       struct regpair pstorm_st_padding[2];
+       struct xstorm_fcoe_conn_st_ctx xstorm_st_context;
+       struct xstorm_fcoe_conn_ag_ctx xstorm_ag_context;
+       struct regpair xstorm_ag_padding[6];
+       struct ustorm_fcoe_conn_st_ctx ustorm_st_context;
+       struct regpair ustorm_st_padding[2];
+       struct tstorm_fcoe_conn_ag_ctx tstorm_ag_context;
+       struct regpair tstorm_ag_padding[2];
+       struct timers_context timer_context;
+       struct ustorm_fcoe_conn_ag_ctx ustorm_ag_context;
+       struct tstorm_fcoe_conn_st_ctx tstorm_st_context;
+       struct mstorm_fcoe_conn_ag_ctx mstorm_ag_context;
+       struct mstorm_fcoe_conn_st_ctx mstorm_st_context;
+};
+
+struct fcoe_conn_offload_ramrod_params {
+       struct fcoe_conn_offload_ramrod_data offload_ramrod_data;
+};
+
+struct fcoe_conn_terminate_ramrod_params {
+       struct fcoe_conn_terminate_ramrod_data terminate_ramrod_data;
+};
+
+enum fcoe_event_type {
+       FCOE_EVENT_INIT_FUNC,
+       FCOE_EVENT_DESTROY_FUNC,
+       FCOE_EVENT_STAT_FUNC,
+       FCOE_EVENT_OFFLOAD_CONN,
+       FCOE_EVENT_TERMINATE_CONN,
+       FCOE_EVENT_ERROR,
+       MAX_FCOE_EVENT_TYPE
+};
+
+struct fcoe_init_ramrod_params {
+       struct fcoe_init_func_ramrod_data init_ramrod_data;
+};
+
+enum fcoe_ramrod_cmd_id {
+       FCOE_RAMROD_CMD_ID_INIT_FUNC,
+       FCOE_RAMROD_CMD_ID_DESTROY_FUNC,
+       FCOE_RAMROD_CMD_ID_STAT_FUNC,
+       FCOE_RAMROD_CMD_ID_OFFLOAD_CONN,
+       FCOE_RAMROD_CMD_ID_TERMINATE_CONN,
+       MAX_FCOE_RAMROD_CMD_ID
+};
+
+struct fcoe_stat_ramrod_params {
+       struct fcoe_stat_ramrod_data stat_ramrod_data;
+};
+
+struct ystorm_fcoe_conn_ag_ctx {
+       u8 byte0;
+       u8 byte1;
+       u8 flags0;
+#define YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK     0x1
+#define YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT    0
+#define YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK     0x1
+#define YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT    1
+#define YSTORM_FCOE_CONN_AG_CTX_CF0_MASK      0x3
+#define YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT     2
+#define YSTORM_FCOE_CONN_AG_CTX_CF1_MASK      0x3
+#define YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT     4
+#define YSTORM_FCOE_CONN_AG_CTX_CF2_MASK      0x3
+#define YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT     6
+       u8 flags1;
+#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK    0x1
+#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT   0
+#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK    0x1
+#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT   1
+#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK    0x1
+#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT   2
+#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK  0x1
+#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK  0x1
+#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK  0x1
+#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK  0x1
+#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK  0x1
+#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
+       u8 byte2;
+       u8 byte3;
+       __le16 word0;
+       __le32 reg0;
+       __le32 reg1;
+       __le16 word1;
+       __le16 word2;
+       __le16 word3;
+       __le16 word4;
+       __le32 reg2;
+       __le32 reg3;
+};
+
 struct ystorm_iscsi_conn_st_ctx {
        __le32 reserved[4];
 };
@@ -6180,10 +9060,10 @@ struct xstorm_iscsi_conn_ag_ctx {
 #define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK                    0x3
 #define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT                   6
        u8 flags7;
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK                    0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT                   0
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_MASK                    0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_SHIFT                   2
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK      0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT        0
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK      0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT        2
 #define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK                   0x3
 #define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT                  4
 #define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK                       0x1
@@ -6229,10 +9109,10 @@ struct xstorm_iscsi_conn_ag_ctx {
 #define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT                     0
 #define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK                 0x1
 #define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT                1
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK                 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT                2
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_EN_MASK                 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT                3
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK   0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT     2
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK   0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT     3
 #define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK                0x1
 #define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT               4
 #define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK        0x1
@@ -6242,8 +9122,8 @@ struct xstorm_iscsi_conn_ag_ctx {
 #define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK    0x1
 #define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT   7
        u8 flags11;
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK                     0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT                    0
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK    0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT              0
 #define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK                     0x1
 #define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT                    1
 #define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK                   0x1
@@ -6347,7 +9227,7 @@ struct xstorm_iscsi_conn_ag_ctx {
        __le32 reg10;
        __le32 reg11;
        __le32 exp_stat_sn;
-       __le32 reg13;
+       __le32 ongoing_fast_rxmit_seq;
        __le32 reg14;
        __le32 reg15;
        __le32 reg16;
@@ -6373,10 +9253,10 @@ struct tstorm_iscsi_conn_ag_ctx {
 #define TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK                0x3
 #define TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT               6
        u8 flags1;
-#define TSTORM_ISCSI_CONN_AG_CTX_CF1_MASK                0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT               0
-#define TSTORM_ISCSI_CONN_AG_CTX_CF2_MASK                0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT               2
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK     0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT      0
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK     0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT      2
 #define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK     0x3
 #define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT    4
 #define TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK                0x3
@@ -6397,10 +9277,10 @@ struct tstorm_iscsi_conn_ag_ctx {
 #define TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT              2
 #define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK              0x1
 #define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT             4
-#define TSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK              0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT             5
-#define TSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK              0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT             6
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK  0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT   5
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK  0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT   6
 #define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK  0x1
 #define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
        u8 flags4;
@@ -6446,7 +9326,7 @@ struct tstorm_iscsi_conn_ag_ctx {
        __le32 reg6;
        __le32 reg7;
        __le32 reg8;
-       u8 byte2;
+       u8 cid_offload_cnt;
        u8 byte3;
        __le16 word0;
 };
@@ -6639,6 +9519,35 @@ struct ystorm_iscsi_conn_ag_ctx {
        __le32 reg2;
        __le32 reg3;
 };
+
+#define MFW_TRACE_SIGNATURE     0x25071946
+
+/* The trace in the buffer */
+#define MFW_TRACE_EVENTID_MASK          0x00ffff
+#define MFW_TRACE_PRM_SIZE_MASK         0x0f0000
+#define MFW_TRACE_PRM_SIZE_SHIFT        16
+#define MFW_TRACE_ENTRY_SIZE            3
+
+struct mcp_trace {
+       u32 signature;          /* Help to identify that the trace is valid */
+       u32 size;               /* the size of the trace buffer in bytes */
+       u32 curr_level;         /* 2 - all will be written to the buffer
+                                * 1 - debug trace will not be written
+                                * 0 - just errors will be written to the buffer
+                                */
+       u32 modules_mask[2];    /* a bit per module, 1 means write it, 0 means
+                                * mask it.
+                                */
+
+       /* Warning: the following pointers are assumed to be 32bits as they are
+        * used only in the MFW.
+        */
+       u32 trace_prod; /* The next trace will be written to this offset */
+       u32 trace_oldest; /* The oldest valid trace starts at this offset
+                          * (usually very close after the current producer).
+                          */
+};
+
 #define VF_MAX_STATIC 192
 
 #define MCP_GLOB_PATH_MAX      2
@@ -6646,6 +9555,7 @@ struct ystorm_iscsi_conn_ag_ctx {
 #define MCP_GLOB_PORT_MAX      4
 #define MCP_GLOB_FUNC_MAX      16
 
+typedef u32 offsize_t;         /* In DWORDS !!! */
 /* Offset from the beginning of the MCP scratchpad */
 #define OFFSIZE_OFFSET_SHIFT   0
 #define OFFSIZE_OFFSET_MASK    0x0000ffff
@@ -6708,11 +9618,24 @@ struct eth_stats {
        u64 r511;
        u64 r1023;
        u64 r1518;
-       u64 r1522;
-       u64 r2047;
-       u64 r4095;
-       u64 r9216;
-       u64 r16383;
+
+       union {
+               struct {
+                       u64 r1522;
+                       u64 r2047;
+                       u64 r4095;
+                       u64 r9216;
+                       u64 r16383;
+               } bb0;
+               struct {
+                       u64 unused1;
+                       u64 r1519_to_max;
+                       u64 unused2;
+                       u64 unused3;
+                       u64 unused4;
+               } ah0;
+       } u0;
+
        u64 rfcs;
        u64 rxcf;
        u64 rxpf;
@@ -6729,14 +9652,36 @@ struct eth_stats {
        u64 t511;
        u64 t1023;
        u64 t1518;
-       u64 t2047;
-       u64 t4095;
-       u64 t9216;
-       u64 t16383;
+
+       union {
+               struct {
+                       u64 t2047;
+                       u64 t4095;
+                       u64 t9216;
+                       u64 t16383;
+               } bb1;
+               struct {
+                       u64 t1519_to_max;
+                       u64 unused6;
+                       u64 unused7;
+                       u64 unused8;
+               } ah1;
+       } u1;
+
        u64 txpf;
        u64 txpp;
-       u64 tlpiec;
-       u64 tncl;
+
+       union {
+               struct {
+                       u64 tlpiec;
+                       u64 tncl;
+               } bb2;
+               struct {
+                       u64 unused9;
+                       u64 unused10;
+               } ah2;
+       } u2;
+
        u64 rbyte;
        u64 rxuca;
        u64 rxmca;
@@ -6820,12 +9765,12 @@ struct dcbx_ets_feature {
 #define DCBX_ETS_CBS_SHIFT     3
 #define DCBX_ETS_MAX_TCS_MASK  0x000000f0
 #define DCBX_ETS_MAX_TCS_SHIFT 4
-#define DCBX_ISCSI_OOO_TC_MASK 0x00000f00
-#define DCBX_ISCSI_OOO_TC_SHIFT        8
+#define DCBX_OOO_TC_MASK       0x00000f00
+#define DCBX_OOO_TC_SHIFT      8
        u32 pri_tc_tbl[1];
-#define DCBX_ISCSI_OOO_TC      (4)
+#define DCBX_TCP_OOO_TC                (4)
 
-#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET        (DCBX_ISCSI_OOO_TC + 1)
+#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET        (DCBX_TCP_OOO_TC + 1)
 #define DCBX_CEE_STRICT_PRIORITY       0xf
        u32 tc_bw_tbl[2];
        u32 tc_tsa_tbl[2];
@@ -6834,6 +9779,9 @@ struct dcbx_ets_feature {
 #define DCBX_ETS_TSA_ETS       2
 };
 
+#define DCBX_TCP_OOO_TC                        (4)
+#define DCBX_TCP_OOO_K2_4PORT_TC       (3)
+
 struct dcbx_app_priority_entry {
        u32 entry;
 #define DCBX_APP_PRI_MAP_MASK          0x000000ff
@@ -6944,6 +9892,10 @@ struct dcb_dscp_map {
 struct public_global {
        u32 max_path;
        u32 max_ports;
+#define MODE_1P 1
+#define MODE_2P 2
+#define MODE_3P 3
+#define MODE_4P 4
        u32 debug_mb_offset;
        u32 phymod_dbg_mb_offset;
        struct couple_mode_teaming cmt;
@@ -7081,6 +10033,7 @@ struct public_func {
 #define FUNC_MF_CFG_PROTOCOL_SHIFT     4
 #define FUNC_MF_CFG_PROTOCOL_ETHERNET  0x00000000
 #define FUNC_MF_CFG_PROTOCOL_ISCSI              0x00000010
+#define FUNC_MF_CFG_PROTOCOL_FCOE               0x00000020
 #define FUNC_MF_CFG_PROTOCOL_ROCE               0x00000030
 #define FUNC_MF_CFG_PROTOCOL_MAX       0x00000030
 
@@ -7124,9 +10077,11 @@ struct public_func {
 #define DRV_ID_PDA_COMP_VER_MASK       0x0000ffff
 #define DRV_ID_PDA_COMP_VER_SHIFT      0
 
+#define LOAD_REQ_HSI_VERSION           2
 #define DRV_ID_MCP_HSI_VER_MASK                0x00ff0000
 #define DRV_ID_MCP_HSI_VER_SHIFT       16
-#define DRV_ID_MCP_HSI_VER_CURRENT     (1 << DRV_ID_MCP_HSI_VER_SHIFT)
+#define DRV_ID_MCP_HSI_VER_CURRENT     (LOAD_REQ_HSI_VERSION << \
+                                        DRV_ID_MCP_HSI_VER_SHIFT)
 
 #define DRV_ID_DRV_TYPE_MASK           0x7f000000
 #define DRV_ID_DRV_TYPE_SHIFT          24
@@ -7175,6 +10130,13 @@ struct lan_stats_stc {
        u32 rserved;
 };
 
+struct fcoe_stats_stc {
+       u64 rx_pkts;
+       u64 tx_pkts;
+       u32 fcs_err;
+       u32 login_failure;
+};
+
 struct ocbb_data_stc {
        u32 ocbb_host_addr;
        u32 ocsd_host_addr;
@@ -7196,6 +10158,82 @@ struct mdump_config_stc {
        u32 valid_logs;
 };
 
+enum resource_id_enum {
+       RESOURCE_NUM_SB_E = 0,
+       RESOURCE_NUM_L2_QUEUE_E = 1,
+       RESOURCE_NUM_VPORT_E = 2,
+       RESOURCE_NUM_VMQ_E = 3,
+       RESOURCE_FACTOR_NUM_RSS_PF_E = 4,
+       RESOURCE_FACTOR_RSS_PER_VF_E = 5,
+       RESOURCE_NUM_RL_E = 6,
+       RESOURCE_NUM_PQ_E = 7,
+       RESOURCE_NUM_VF_E = 8,
+       RESOURCE_VFC_FILTER_E = 9,
+       RESOURCE_ILT_E = 10,
+       RESOURCE_CQS_E = 11,
+       RESOURCE_GFT_PROFILES_E = 12,
+       RESOURCE_NUM_TC_E = 13,
+       RESOURCE_NUM_RSS_ENGINES_E = 14,
+       RESOURCE_LL2_QUEUE_E = 15,
+       RESOURCE_RDMA_STATS_QUEUE_E = 16,
+       RESOURCE_BDQ_E = 17,
+       RESOURCE_MAX_NUM,
+       RESOURCE_NUM_INVALID = 0xFFFFFFFF
+};
+
+/* Resource ID is to be filled by the driver in the MB request
+ * Size, offset & flags to be filled by the MFW in the MB response
+ */
+struct resource_info {
+       enum resource_id_enum res_id;
+       u32 size;               /* number of allocated resources */
+       u32 offset;             /* Offset of the 1st resource */
+       u32 vf_size;
+       u32 vf_offset;
+       u32 flags;
+#define RESOURCE_ELEMENT_STRICT (1 << 0)
+};
+
+#define DRV_ROLE_NONE           0
+#define DRV_ROLE_PREBOOT        1
+#define DRV_ROLE_OS             2
+#define DRV_ROLE_KDUMP          3
+
+struct load_req_stc {
+       u32 drv_ver_0;
+       u32 drv_ver_1;
+       u32 fw_ver;
+       u32 misc0;
+#define LOAD_REQ_ROLE_MASK              0x000000FF
+#define LOAD_REQ_ROLE_SHIFT             0
+#define LOAD_REQ_LOCK_TO_MASK           0x0000FF00
+#define LOAD_REQ_LOCK_TO_SHIFT          8
+#define LOAD_REQ_LOCK_TO_DEFAULT        0
+#define LOAD_REQ_LOCK_TO_NONE           255
+#define LOAD_REQ_FORCE_MASK             0x000F0000
+#define LOAD_REQ_FORCE_SHIFT            16
+#define LOAD_REQ_FORCE_NONE             0
+#define LOAD_REQ_FORCE_PF               1
+#define LOAD_REQ_FORCE_ALL              2
+#define LOAD_REQ_FLAGS0_MASK            0x00F00000
+#define LOAD_REQ_FLAGS0_SHIFT           20
+#define LOAD_REQ_FLAGS0_AVOID_RESET     (0x1 << 0)
+};
+
+struct load_rsp_stc {
+       u32 drv_ver_0;
+       u32 drv_ver_1;
+       u32 fw_ver;
+       u32 misc0;
+#define LOAD_RSP_ROLE_MASK              0x000000FF
+#define LOAD_RSP_ROLE_SHIFT             0
+#define LOAD_RSP_HSI_MASK               0x0000FF00
+#define LOAD_RSP_HSI_SHIFT              8
+#define LOAD_RSP_FLAGS0_MASK            0x000F0000
+#define LOAD_RSP_FLAGS0_SHIFT           16
+#define LOAD_RSP_FLAGS0_DRV_EXISTS      (0x1 << 0)
+};
+
 union drv_union_data {
        u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD];
        struct mcp_mac wol_mac;
@@ -7213,9 +10251,10 @@ union drv_union_data {
        struct drv_version_stc drv_version;
 
        struct lan_stats_stc lan_stats;
-       u64 reserved_stats[11];
+       struct fcoe_stats_stc fcoe_stats;
        struct ocbb_data_stc ocbb_info;
        struct temperature_status_stc temp_info;
+       struct resource_info resource;
        struct bist_nvm_image_att nvm_image_att;
        struct mdump_config_stc mdump_config;
 };
@@ -7226,28 +10265,94 @@ struct public_drv_mb {
 #define DRV_MSG_CODE_LOAD_REQ                  0x10000000
 #define DRV_MSG_CODE_LOAD_DONE                 0x11000000
 #define DRV_MSG_CODE_INIT_HW                   0x12000000
+#define DRV_MSG_CODE_CANCEL_LOAD_REQ            0x13000000
 #define DRV_MSG_CODE_UNLOAD_REQ                        0x20000000
 #define DRV_MSG_CODE_UNLOAD_DONE               0x21000000
 #define DRV_MSG_CODE_INIT_PHY                  0x22000000
 #define DRV_MSG_CODE_LINK_RESET                        0x23000000
 #define DRV_MSG_CODE_SET_DCBX                  0x25000000
+#define DRV_MSG_CODE_OV_UPDATE_CURR_CFG         0x26000000
+#define DRV_MSG_CODE_OV_UPDATE_BUS_NUM          0x27000000
+#define DRV_MSG_CODE_OV_UPDATE_BOOT_PROGRESS    0x28000000
+#define DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER     0x29000000
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE     0x31000000
+#define DRV_MSG_CODE_BW_UPDATE_ACK              0x32000000
+#define DRV_MSG_CODE_OV_UPDATE_MTU              0x33000000
+#define DRV_MSG_GET_RESOURCE_ALLOC_MSG         0x34000000
+#define DRV_MSG_SET_RESOURCE_VALUE_MSG         0x35000000
+#define DRV_MSG_CODE_OV_UPDATE_WOL              0x38000000
+#define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE     0x39000000
 
 #define DRV_MSG_CODE_BW_UPDATE_ACK             0x32000000
 #define DRV_MSG_CODE_NIG_DRAIN                 0x30000000
+#define DRV_MSG_CODE_INITIATE_PF_FLR            0x02010000
 #define DRV_MSG_CODE_VF_DISABLED_DONE          0xc0000000
 #define DRV_MSG_CODE_CFG_VF_MSIX               0xc0010000
+#define DRV_MSG_CODE_NVM_GET_FILE_ATT          0x00030000
+#define DRV_MSG_CODE_NVM_READ_NVRAM            0x00050000
 #define DRV_MSG_CODE_MCP_RESET                 0x00090000
 #define DRV_MSG_CODE_SET_VERSION               0x000f0000
+#define DRV_MSG_CODE_MCP_HALT                   0x00100000
+#define DRV_MSG_CODE_SET_VMAC                   0x00110000
+#define DRV_MSG_CODE_GET_VMAC                   0x00120000
+#define DRV_MSG_CODE_VMAC_TYPE_SHIFT            4
+#define DRV_MSG_CODE_VMAC_TYPE_MASK             0x30
+#define DRV_MSG_CODE_VMAC_TYPE_MAC              1
+#define DRV_MSG_CODE_VMAC_TYPE_WWNN             2
+#define DRV_MSG_CODE_VMAC_TYPE_WWPN             3
+
+#define DRV_MSG_CODE_GET_STATS                  0x00130000
+#define DRV_MSG_CODE_STATS_TYPE_LAN             1
+#define DRV_MSG_CODE_STATS_TYPE_FCOE            2
+#define DRV_MSG_CODE_STATS_TYPE_ISCSI           3
+#define DRV_MSG_CODE_STATS_TYPE_RDMA            4
+
+#define DRV_MSG_CODE_MASK_PARITIES              0x001a0000
 
 #define DRV_MSG_CODE_BIST_TEST                 0x001e0000
 #define DRV_MSG_CODE_SET_LED_MODE              0x00200000
+#define DRV_MSG_CODE_RESOURCE_CMD      0x00230000
+
+#define RESOURCE_CMD_REQ_RESC_MASK             0x0000001F
+#define RESOURCE_CMD_REQ_RESC_SHIFT            0
+#define RESOURCE_CMD_REQ_OPCODE_MASK           0x000000E0
+#define RESOURCE_CMD_REQ_OPCODE_SHIFT          5
+#define RESOURCE_OPCODE_REQ                    1
+#define RESOURCE_OPCODE_REQ_WO_AGING           2
+#define RESOURCE_OPCODE_REQ_W_AGING            3
+#define RESOURCE_OPCODE_RELEASE                        4
+#define RESOURCE_OPCODE_FORCE_RELEASE          5
+#define RESOURCE_CMD_REQ_AGE_MASK              0x0000FF00
+#define RESOURCE_CMD_REQ_AGE_SHIFT             8
+
+#define RESOURCE_CMD_RSP_OWNER_MASK            0x000000FF
+#define RESOURCE_CMD_RSP_OWNER_SHIFT           0
+#define RESOURCE_CMD_RSP_OPCODE_MASK           0x00000700
+#define RESOURCE_CMD_RSP_OPCODE_SHIFT          8
+#define RESOURCE_OPCODE_GNT                    1
+#define RESOURCE_OPCODE_BUSY                   2
+#define RESOURCE_OPCODE_RELEASED               3
+#define RESOURCE_OPCODE_RELEASED_PREVIOUS      4
+#define RESOURCE_OPCODE_WRONG_OWNER            5
+#define RESOURCE_OPCODE_UNKNOWN_CMD            255
+
+#define RESOURCE_DUMP                          0
+
+#define DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL      0x002b0000
+#define DRV_MSG_CODE_OS_WOL                    0x002e0000
 
 #define DRV_MSG_SEQ_NUMBER_MASK                        0x0000ffff
 
        u32 drv_mb_param;
-#define DRV_MB_PARAM_UNLOAD_WOL_MCP            0x00000001
+#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN         0x00000000
+#define DRV_MB_PARAM_UNLOAD_WOL_MCP             0x00000001
+#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED        0x00000002
+#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED         0x00000003
 #define DRV_MB_PARAM_DCBX_NOTIFY_MASK          0x000000FF
 #define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT         3
+
+#define DRV_MB_PARAM_NVM_LEN_SHIFT             24
+
 #define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT   0
 #define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK    0x000000FF
 #define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT  8
@@ -7255,13 +10360,59 @@ struct public_drv_mb {
 #define DRV_MB_PARAM_LLDP_SEND_MASK            0x00000001
 #define DRV_MB_PARAM_LLDP_SEND_SHIFT           0
 
+#define DRV_MB_PARAM_OV_CURR_CFG_SHIFT         0
+#define DRV_MB_PARAM_OV_CURR_CFG_MASK          0x0000000F
+#define DRV_MB_PARAM_OV_CURR_CFG_NONE          0
+#define DRV_MB_PARAM_OV_CURR_CFG_OS            1
+#define DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC   2
+#define DRV_MB_PARAM_OV_CURR_CFG_OTHER         3
+
+#define DRV_MB_PARAM_OV_STORM_FW_VER_SHIFT     0
+#define DRV_MB_PARAM_OV_STORM_FW_VER_MASK      0xFFFFFFFF
+#define DRV_MB_PARAM_OV_STORM_FW_VER_MAJOR_MASK        0xFF000000
+#define DRV_MB_PARAM_OV_STORM_FW_VER_MINOR_MASK        0x00FF0000
+#define DRV_MB_PARAM_OV_STORM_FW_VER_BUILD_MASK        0x0000FF00
+#define DRV_MB_PARAM_OV_STORM_FW_VER_DROP_MASK 0x000000FF
+
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_SHIFT      0
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_MASK       0xF
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_UNKNOWN    0x1
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED 0x2
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_LOADING    0x3
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED   0x4
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE     0x5
+
+#define DRV_MB_PARAM_OV_MTU_SIZE_SHIFT 0
+#define DRV_MB_PARAM_OV_MTU_SIZE_MASK  0xFFFFFFFF
+
+#define DRV_MB_PARAM_WOL_MASK  (DRV_MB_PARAM_WOL_DEFAULT | \
+                                DRV_MB_PARAM_WOL_DISABLED | \
+                                DRV_MB_PARAM_WOL_ENABLED)
+#define DRV_MB_PARAM_WOL_DEFAULT       DRV_MB_PARAM_UNLOAD_WOL_MCP
+#define DRV_MB_PARAM_WOL_DISABLED      DRV_MB_PARAM_UNLOAD_WOL_DISABLED
+#define DRV_MB_PARAM_WOL_ENABLED       DRV_MB_PARAM_UNLOAD_WOL_ENABLED
+
+#define DRV_MB_PARAM_ESWITCH_MODE_MASK (DRV_MB_PARAM_ESWITCH_MODE_NONE | \
+                                        DRV_MB_PARAM_ESWITCH_MODE_VEB | \
+                                        DRV_MB_PARAM_ESWITCH_MODE_VEPA)
+#define DRV_MB_PARAM_ESWITCH_MODE_NONE 0x0
+#define DRV_MB_PARAM_ESWITCH_MODE_VEB  0x1
+#define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2
 
 #define DRV_MB_PARAM_SET_LED_MODE_OPER         0x0
 #define DRV_MB_PARAM_SET_LED_MODE_ON           0x1
 #define DRV_MB_PARAM_SET_LED_MODE_OFF          0x2
 
+       /* Resource Allocation params - Driver version support */
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT        16
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000FFFF
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT        0
+
 #define DRV_MB_PARAM_BIST_REGISTER_TEST                1
 #define DRV_MB_PARAM_BIST_CLOCK_TEST           2
+#define DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES  3
+#define DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX      4
 
 #define DRV_MB_PARAM_BIST_RC_UNKNOWN           0
 #define DRV_MB_PARAM_BIST_RC_PASSED            1
@@ -7270,26 +10421,50 @@ struct public_drv_mb {
 
 #define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT     0
 #define DRV_MB_PARAM_BIST_TEST_INDEX_MASK      0x000000FF
+#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT       8
+#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_MASK                0x0000FF00
 
        u32 fw_mb_header;
 #define FW_MSG_CODE_MASK                       0xffff0000
+#define FW_MSG_CODE_UNSUPPORTED                 0x00000000
 #define FW_MSG_CODE_DRV_LOAD_ENGINE            0x10100000
 #define FW_MSG_CODE_DRV_LOAD_PORT              0x10110000
 #define FW_MSG_CODE_DRV_LOAD_FUNCTION          0x10120000
 #define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA       0x10200000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI       0x10210000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1     0x10210000
 #define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG      0x10220000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI        0x10230000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE 0x10300000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT     0x10310000
 #define FW_MSG_CODE_DRV_LOAD_DONE              0x11100000
 #define FW_MSG_CODE_DRV_UNLOAD_ENGINE          0x20110000
 #define FW_MSG_CODE_DRV_UNLOAD_PORT            0x20120000
 #define FW_MSG_CODE_DRV_UNLOAD_FUNCTION                0x20130000
 #define FW_MSG_CODE_DRV_UNLOAD_DONE            0x21100000
+#define FW_MSG_CODE_RESOURCE_ALLOC_OK           0x34000000
+#define FW_MSG_CODE_RESOURCE_ALLOC_UNKNOWN      0x35000000
+#define FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED   0x36000000
 #define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE       0xb0010000
+
+#define FW_MSG_CODE_NVM_OK                     0x00010000
 #define FW_MSG_CODE_OK                         0x00160000
 
+#define FW_MSG_CODE_OS_WOL_SUPPORTED            0x00800000
+#define FW_MSG_CODE_OS_WOL_NOT_SUPPORTED        0x00810000
+
 #define FW_MSG_SEQ_NUMBER_MASK                 0x0000ffff
 
        u32 fw_mb_param;
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK  0xFFFF0000
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK  0x0000FFFF
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0
+
+       /* get pf rdma protocol command responce */
+#define FW_MB_PARAM_GET_PF_RDMA_NONE           0x0
+#define FW_MB_PARAM_GET_PF_RDMA_ROCE           0x1
+#define FW_MB_PARAM_GET_PF_RDMA_IWARP          0x2
+#define FW_MB_PARAM_GET_PF_RDMA_BOTH           0x3
 
        u32 drv_pulse_mb;
 #define DRV_PULSE_SEQ_MASK                     0x00007fff
@@ -7315,10 +10490,10 @@ enum MFW_DRV_MSG_TYPE {
        MFW_DRV_MSG_RESERVED4,
        MFW_DRV_MSG_BW_UPDATE,
        MFW_DRV_MSG_BW_UPDATE5,
-       MFW_DRV_MSG_BW_UPDATE6,
-       MFW_DRV_MSG_BW_UPDATE7,
-       MFW_DRV_MSG_BW_UPDATE8,
-       MFW_DRV_MSG_BW_UPDATE9,
+       MFW_DRV_MSG_GET_LAN_STATS,
+       MFW_DRV_MSG_GET_FCOE_STATS,
+       MFW_DRV_MSG_GET_ISCSI_STATS,
+       MFW_DRV_MSG_GET_RDMA_STATS,
        MFW_DRV_MSG_BW_UPDATE10,
        MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
        MFW_DRV_MSG_BW_UPDATE11,
@@ -7394,6 +10569,8 @@ struct nvm_cfg1_glob {
 #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G          0xC
 #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G          0xD
 #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G          0xE
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G          0xF
+
        u32 e_lane_cfg1;
        u32 e_lane_cfg2;
        u32 f_lane_cfg1;
@@ -7418,6 +10595,7 @@ struct nvm_cfg1_glob {
        u32 misc_sig;
        u32 device_capabilities;
 #define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET     0x1
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE         0x2
 #define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI                0x4
 #define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE         0x8
        u32 power_dissipated;
@@ -7521,4 +10699,101 @@ struct nvm_cfg1 {
        struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX];
        struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX];
 };
+
+enum spad_sections {
+       SPAD_SECTION_TRACE,
+       SPAD_SECTION_NVM_CFG,
+       SPAD_SECTION_PUBLIC,
+       SPAD_SECTION_PRIVATE,
+       SPAD_SECTION_MAX
+};
+
+#define MCP_TRACE_SIZE          2048   /* 2kb */
+
+/* This section is located at a fixed location in the beginning of the
+ * scratchpad, to ensure that the MCP trace is not run over during MFW upgrade.
+ * All the rest of data has a floating location which differs from version to
+ * version, and is pointed by the mcp_meta_data below.
+ * Moreover, the spad_layout section is part of the MFW firmware, and is loaded
+ * with it from nvram in order to clear this portion.
+ */
+struct static_init {
+       u32 num_sections;
+       offsize_t sections[SPAD_SECTION_MAX];
+#define SECTION(_sec_) (*((offsize_t *)(STRUCT_OFFSET(sections[_sec_]))))
+
+       struct mcp_trace trace;
+#define MCP_TRACE_P ((struct mcp_trace *)(STRUCT_OFFSET(trace)))
+       u8 trace_buffer[MCP_TRACE_SIZE];
+#define MCP_TRACE_BUF ((u8 *)(STRUCT_OFFSET(trace_buffer)))
+       /* running_mfw has the same definition as in nvm_map.h.
+        * This bit indicate both the running dir, and the running bundle.
+        * It is set once when the LIM is loaded.
+        */
+       u32 running_mfw;
+#define RUNNING_MFW (*((u32 *)(STRUCT_OFFSET(running_mfw))))
+       u32 build_time;
+#define MFW_BUILD_TIME (*((u32 *)(STRUCT_OFFSET(build_time))))
+       u32 reset_type;
+#define RESET_TYPE (*((u32 *)(STRUCT_OFFSET(reset_type))))
+       u32 mfw_secure_mode;
+#define MFW_SECURE_MODE (*((u32 *)(STRUCT_OFFSET(mfw_secure_mode))))
+       u16 pme_status_pf_bitmap;
+#define PME_STATUS_PF_BITMAP (*((u16 *)(STRUCT_OFFSET(pme_status_pf_bitmap))))
+       u16 pme_enable_pf_bitmap;
+#define PME_ENABLE_PF_BITMAP (*((u16 *)(STRUCT_OFFSET(pme_enable_pf_bitmap))))
+       u32 mim_nvm_addr;
+       u32 mim_start_addr;
+       u32 ah_pcie_link_params;
+#define AH_PCIE_LINK_PARAMS_LINK_SPEED_MASK     (0x000000ff)
+#define AH_PCIE_LINK_PARAMS_LINK_SPEED_SHIFT    (0)
+#define AH_PCIE_LINK_PARAMS_LINK_WIDTH_MASK     (0x0000ff00)
+#define AH_PCIE_LINK_PARAMS_LINK_WIDTH_SHIFT    (8)
+#define AH_PCIE_LINK_PARAMS_ASPM_MODE_MASK      (0x00ff0000)
+#define AH_PCIE_LINK_PARAMS_ASPM_MODE_SHIFT     (16)
+#define AH_PCIE_LINK_PARAMS_ASPM_CAP_MASK       (0xff000000)
+#define AH_PCIE_LINK_PARAMS_ASPM_CAP_SHIFT      (24)
+#define AH_PCIE_LINK_PARAMS (*((u32 *)(STRUCT_OFFSET(ah_pcie_link_params))))
+
+       u32 rsrv_persist[5];    /* Persist reserved for MFW upgrades */
+};
+
+enum nvm_image_type {
+       NVM_TYPE_TIM1 = 0x01,
+       NVM_TYPE_TIM2 = 0x02,
+       NVM_TYPE_MIM1 = 0x03,
+       NVM_TYPE_MIM2 = 0x04,
+       NVM_TYPE_MBA = 0x05,
+       NVM_TYPE_MODULES_PN = 0x06,
+       NVM_TYPE_VPD = 0x07,
+       NVM_TYPE_MFW_TRACE1 = 0x08,
+       NVM_TYPE_MFW_TRACE2 = 0x09,
+       NVM_TYPE_NVM_CFG1 = 0x0a,
+       NVM_TYPE_L2B = 0x0b,
+       NVM_TYPE_DIR1 = 0x0c,
+       NVM_TYPE_EAGLE_FW1 = 0x0d,
+       NVM_TYPE_FALCON_FW1 = 0x0e,
+       NVM_TYPE_PCIE_FW1 = 0x0f,
+       NVM_TYPE_HW_SET = 0x10,
+       NVM_TYPE_LIM = 0x11,
+       NVM_TYPE_AVS_FW1 = 0x12,
+       NVM_TYPE_DIR2 = 0x13,
+       NVM_TYPE_CCM = 0x14,
+       NVM_TYPE_EAGLE_FW2 = 0x15,
+       NVM_TYPE_FALCON_FW2 = 0x16,
+       NVM_TYPE_PCIE_FW2 = 0x17,
+       NVM_TYPE_AVS_FW2 = 0x18,
+       NVM_TYPE_INIT_HW = 0x19,
+       NVM_TYPE_DEFAULT_CFG = 0x1a,
+       NVM_TYPE_MDUMP = 0x1b,
+       NVM_TYPE_META = 0x1c,
+       NVM_TYPE_ISCSI_CFG = 0x1d,
+       NVM_TYPE_FCOE_CFG = 0x1f,
+       NVM_TYPE_ETH_PHY_FW1 = 0x20,
+       NVM_TYPE_ETH_PHY_FW2 = 0x21,
+       NVM_TYPE_MAX,
+};
+
+#define DIR_ID_1    (0)
+
 #endif
index e17885321faf8e543f0dd8d07f7280cfa2d9d531..a05feb38c6eebc778f1471a950e4c59d1ddba092 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #include <linux/types.h>
@@ -34,6 +58,7 @@ struct qed_ptt {
        struct list_head        list_entry;
        unsigned int            idx;
        struct pxp_ptt_entry    pxp;
+       u8                      hwfn_id;
 };
 
 struct qed_ptt_pool {
@@ -44,8 +69,7 @@ struct qed_ptt_pool {
 
 int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn)
 {
-       struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool),
-                                             GFP_KERNEL);
+       struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool), GFP_KERNEL);
        int i;
 
        if (!p_pool)
@@ -56,6 +80,7 @@ int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn)
                p_pool->ptts[i].idx = i;
                p_pool->ptts[i].pxp.offset = QED_BAR_INVALID_OFFSET;
                p_pool->ptts[i].pxp.pretend.control = 0;
+               p_pool->ptts[i].hwfn_id = p_hwfn->my_id;
                if (i >= RESERVED_PTT_MAX)
                        list_add(&p_pool->ptts[i].list_entry,
                                 &p_pool->free_list);
@@ -113,16 +138,14 @@ struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn)
        return NULL;
 }
 
-void qed_ptt_release(struct qed_hwfn *p_hwfn,
-                    struct qed_ptt *p_ptt)
+void qed_ptt_release(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
        spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
        list_add(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
        spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
 }
 
-u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn,
-                       struct qed_ptt *p_ptt)
+u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
        /* The HW is using DWORDS and we need to translate it to Bytes */
        return le32_to_cpu(p_ptt->pxp.offset) << 2;
@@ -141,8 +164,7 @@ u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt)
 }
 
 void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
-                    struct qed_ptt *p_ptt,
-                    u32 new_hw_addr)
+                    struct qed_ptt *p_ptt, u32 new_hw_addr)
 {
        u32 prev_hw_addr;
 
@@ -166,14 +188,18 @@ void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
 }
 
 static u32 qed_set_ptt(struct qed_hwfn *p_hwfn,
-                      struct qed_ptt *p_ptt,
-                      u32 hw_addr)
+                      struct qed_ptt *p_ptt, u32 hw_addr)
 {
        u32 win_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt);
        u32 offset;
 
        offset = hw_addr - win_hw_addr;
 
+       if (p_ptt->hwfn_id != p_hwfn->my_id)
+               DP_NOTICE(p_hwfn,
+                         "ptt[%d] of hwfn[%02x] is used by hwfn[%02x]!\n",
+                         p_ptt->idx, p_ptt->hwfn_id, p_hwfn->my_id);
+
        /* Verify the address is within the window */
        if (hw_addr < win_hw_addr ||
            offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
@@ -224,10 +250,7 @@ u32 qed_rd(struct qed_hwfn *p_hwfn,
 
 static void qed_memcpy_hw(struct qed_hwfn *p_hwfn,
                          struct qed_ptt *p_ptt,
-                         void *addr,
-                         u32 hw_addr,
-                         size_t n,
-                         bool to_device)
+                         void *addr, u32 hw_addr, size_t n, bool to_device)
 {
        u32 dw_count, *host_addr, hw_offset;
        size_t quota, done = 0;
@@ -259,8 +282,7 @@ static void qed_memcpy_hw(struct qed_hwfn *p_hwfn,
 }
 
 void qed_memcpy_from(struct qed_hwfn *p_hwfn,
-                    struct qed_ptt *p_ptt,
-                    void *dest, u32 hw_addr, size_t n)
+                    struct qed_ptt *p_ptt, void *dest, u32 hw_addr, size_t n)
 {
        DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
                   "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
@@ -270,8 +292,7 @@ void qed_memcpy_from(struct qed_hwfn *p_hwfn,
 }
 
 void qed_memcpy_to(struct qed_hwfn *p_hwfn,
-                  struct qed_ptt *p_ptt,
-                  u32 hw_addr, void *src, size_t n)
+                  struct qed_ptt *p_ptt, u32 hw_addr, void *src, size_t n)
 {
        DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
                   "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
@@ -280,9 +301,7 @@ void qed_memcpy_to(struct qed_hwfn *p_hwfn,
        qed_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
 }
 
-void qed_fid_pretend(struct qed_hwfn *p_hwfn,
-                    struct qed_ptt *p_ptt,
-                    u16 fid)
+void qed_fid_pretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 fid)
 {
        u16 control = 0;
 
@@ -309,8 +328,7 @@ void qed_fid_pretend(struct qed_hwfn *p_hwfn,
 }
 
 void qed_port_pretend(struct qed_hwfn *p_hwfn,
-                     struct qed_ptt *p_ptt,
-                     u8 port_id)
+                     struct qed_ptt *p_ptt, u8 port_id)
 {
        u16 control = 0;
 
@@ -326,8 +344,7 @@ void qed_port_pretend(struct qed_hwfn *p_hwfn,
               *(u32 *)&p_ptt->pxp.pretend);
 }
 
-void qed_port_unpretend(struct qed_hwfn *p_hwfn,
-                       struct qed_ptt *p_ptt)
+void qed_port_unpretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
        u16 control = 0;
 
@@ -429,28 +446,27 @@ u32 qed_dmae_idx_to_go_cmd(u8 idx)
        return DMAE_REG_GO_C0 + (idx << 2);
 }
 
-static int
-qed_dmae_post_command(struct qed_hwfn *p_hwfn,
-                     struct qed_ptt *p_ptt)
+static int qed_dmae_post_command(struct qed_hwfn *p_hwfn,
+                                struct qed_ptt *p_ptt)
 {
-       struct dmae_cmd *command = p_hwfn->dmae_info.p_dmae_cmd;
+       struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd;
        u8 idx_cmd = p_hwfn->dmae_info.channel, i;
        int qed_status = 0;
 
        /* verify address is not NULL */
-       if ((((command->dst_addr_lo == 0) && (command->dst_addr_hi == 0)) ||
-            ((command->src_addr_lo == 0) && (command->src_addr_hi == 0)))) {
+       if ((((!p_command->dst_addr_lo) && (!p_command->dst_addr_hi)) ||
+            ((!p_command->src_addr_lo) && (!p_command->src_addr_hi)))) {
                DP_NOTICE(p_hwfn,
                          "source or destination address 0 idx_cmd=%d\n"
                          "opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
-                          idx_cmd,
-                          le32_to_cpu(command->opcode),
-                          le16_to_cpu(command->opcode_b),
-                          le16_to_cpu(command->length_dw),
-                          le32_to_cpu(command->src_addr_hi),
-                          le32_to_cpu(command->src_addr_lo),
-                          le32_to_cpu(command->dst_addr_hi),
-                          le32_to_cpu(command->dst_addr_lo));
+                         idx_cmd,
+                         le32_to_cpu(p_command->opcode),
+                         le16_to_cpu(p_command->opcode_b),
+                         le16_to_cpu(p_command->length_dw),
+                         le32_to_cpu(p_command->src_addr_hi),
+                         le32_to_cpu(p_command->src_addr_lo),
+                         le32_to_cpu(p_command->dst_addr_hi),
+                         le32_to_cpu(p_command->dst_addr_lo));
 
                return -EINVAL;
        }
@@ -459,13 +475,13 @@ qed_dmae_post_command(struct qed_hwfn *p_hwfn,
                   NETIF_MSG_HW,
                   "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
                   idx_cmd,
-                  le32_to_cpu(command->opcode),
-                  le16_to_cpu(command->opcode_b),
-                  le16_to_cpu(command->length_dw),
-                  le32_to_cpu(command->src_addr_hi),
-                  le32_to_cpu(command->src_addr_lo),
-                  le32_to_cpu(command->dst_addr_hi),
-                  le32_to_cpu(command->dst_addr_lo));
+                  le32_to_cpu(p_command->opcode),
+                  le16_to_cpu(p_command->opcode_b),
+                  le16_to_cpu(p_command->length_dw),
+                  le32_to_cpu(p_command->src_addr_hi),
+                  le32_to_cpu(p_command->src_addr_lo),
+                  le32_to_cpu(p_command->dst_addr_hi),
+                  le32_to_cpu(p_command->dst_addr_lo));
 
        /* Copy the command to DMAE - need to do it before every call
         * for source/dest address no reset.
@@ -475,7 +491,7 @@ qed_dmae_post_command(struct qed_hwfn *p_hwfn,
         */
        for (i = 0; i < DMAE_CMD_SIZE; i++) {
                u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
-                          *(((u32 *)command) + i) : 0;
+                          *(((u32 *)p_command) + i) : 0;
 
                qed_wr(p_hwfn, p_ptt,
                       DMAE_REG_CMD_MEM +
@@ -483,9 +499,7 @@ qed_dmae_post_command(struct qed_hwfn *p_hwfn,
                       (i * sizeof(u32)), data);
        }
 
-       qed_wr(p_hwfn, p_ptt,
-              qed_dmae_idx_to_go_cmd(idx_cmd),
-              DMAE_GO_VALUE);
+       qed_wr(p_hwfn, p_ptt, qed_dmae_idx_to_go_cmd(idx_cmd), DMAE_GO_VALUE);
 
        return qed_status;
 }
@@ -498,31 +512,23 @@ int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn)
        u32 **p_comp = &p_hwfn->dmae_info.p_completion_word;
 
        *p_comp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
-                                    sizeof(u32),
-                                    p_addr,
-                                    GFP_KERNEL);
-       if (!*p_comp) {
-               DP_NOTICE(p_hwfn, "Failed to allocate `p_completion_word'\n");
+                                    sizeof(u32), p_addr, GFP_KERNEL);
+       if (!*p_comp)
                goto err;
-       }
 
        p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr;
        *p_cmd = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
                                    sizeof(struct dmae_cmd),
                                    p_addr, GFP_KERNEL);
-       if (!*p_cmd) {
-               DP_NOTICE(p_hwfn, "Failed to allocate `struct dmae_cmd'\n");
+       if (!*p_cmd)
                goto err;
-       }
 
        p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr;
        *p_buff = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
                                     sizeof(u32) * DMAE_MAX_RW_SIZE,
                                     p_addr, GFP_KERNEL);
-       if (!*p_buff) {
-               DP_NOTICE(p_hwfn, "Failed to allocate `intermediate_buffer'\n");
+       if (!*p_buff)
                goto err;
-       }
 
        p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
 
@@ -543,8 +549,7 @@ void qed_dmae_info_free(struct qed_hwfn *p_hwfn)
                p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
                dma_free_coherent(&p_hwfn->cdev->pdev->dev,
                                  sizeof(u32),
-                                 p_hwfn->dmae_info.p_completion_word,
-                                 p_phys);
+                                 p_hwfn->dmae_info.p_completion_word, p_phys);
                p_hwfn->dmae_info.p_completion_word = NULL;
        }
 
@@ -552,8 +557,7 @@ void qed_dmae_info_free(struct qed_hwfn *p_hwfn)
                p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr;
                dma_free_coherent(&p_hwfn->cdev->pdev->dev,
                                  sizeof(struct dmae_cmd),
-                                 p_hwfn->dmae_info.p_dmae_cmd,
-                                 p_phys);
+                                 p_hwfn->dmae_info.p_dmae_cmd, p_phys);
                p_hwfn->dmae_info.p_dmae_cmd = NULL;
        }
 
@@ -571,9 +575,7 @@ void qed_dmae_info_free(struct qed_hwfn *p_hwfn)
 
 static int qed_dmae_operation_wait(struct qed_hwfn *p_hwfn)
 {
-       u32 wait_cnt = 0;
-       u32 wait_cnt_limit = 10000;
-
+       u32 wait_cnt_limit = 10000, wait_cnt = 0;
        int qed_status = 0;
 
        barrier();
@@ -606,7 +608,7 @@ static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
                                          u64 dst_addr,
                                          u8 src_type,
                                          u8 dst_type,
-                                         u32 length)
+                                         u32 length_dw)
 {
        dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
        struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
@@ -624,7 +626,7 @@ static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
                cmd->src_addr_lo = cpu_to_le32(lower_32_bits(phys));
                memcpy(&p_hwfn->dmae_info.p_intermediate_buffer[0],
                       (void *)(uintptr_t)src_addr,
-                      length * sizeof(u32));
+                      length_dw * sizeof(u32));
                break;
        default:
                return -EINVAL;
@@ -645,7 +647,7 @@ static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
                return -EINVAL;
        }
 
-       cmd->length_dw = cpu_to_le16((u16)length);
+       cmd->length_dw = cpu_to_le16((u16)length_dw);
 
        qed_dmae_post_command(p_hwfn, p_ptt);
 
@@ -654,16 +656,14 @@ static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
        if (qed_status) {
                DP_NOTICE(p_hwfn,
                          "qed_dmae_host2grc: Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x\n",
-                         src_addr,
-                         dst_addr,
-                         length);
+                         src_addr, dst_addr, length_dw);
                return qed_status;
        }
 
        if (dst_type == QED_DMAE_ADDRESS_HOST_VIRT)
                memcpy((void *)(uintptr_t)(dst_addr),
                       &p_hwfn->dmae_info.p_intermediate_buffer[0],
-                      length * sizeof(u32));
+                      length_dw * sizeof(u32));
 
        return 0;
 }
@@ -730,10 +730,7 @@ static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn,
                if (qed_status) {
                        DP_NOTICE(p_hwfn,
                                  "qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
-                                 qed_status,
-                                 src_addr,
-                                 dst_addr,
-                                 length_cur);
+                                 qed_status, src_addr, dst_addr, length_cur);
                        break;
                }
        }
@@ -743,10 +740,7 @@ static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn,
 
 int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
                      struct qed_ptt *p_ptt,
-                     u64 source_addr,
-                     u32 grc_addr,
-                     u32 size_in_dwords,
-                     u32 flags)
+                 u64 source_addr, u32 grc_addr, u32 size_in_dwords, u32 flags)
 {
        u32 grc_addr_in_dw = grc_addr / sizeof(u32);
        struct qed_dmae_params params;
@@ -768,9 +762,10 @@ int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
        return rc;
 }
 
-int
-qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 grc_addr,
-                 dma_addr_t dest_addr, u32 size_in_dwords, u32 flags)
+int qed_dmae_grc2host(struct qed_hwfn *p_hwfn,
+                     struct qed_ptt *p_ptt,
+                     u32 grc_addr,
+                     dma_addr_t dest_addr, u32 size_in_dwords, u32 flags)
 {
        u32 grc_addr_in_dw = grc_addr / sizeof(u32);
        struct qed_dmae_params params;
@@ -791,12 +786,11 @@ qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 grc_addr,
        return rc;
 }
 
-int
-qed_dmae_host2host(struct qed_hwfn *p_hwfn,
-                  struct qed_ptt *p_ptt,
-                  dma_addr_t source_addr,
-                  dma_addr_t dest_addr,
-                  u32 size_in_dwords, struct qed_dmae_params *p_params)
+int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
+                      struct qed_ptt *p_ptt,
+                      dma_addr_t source_addr,
+                      dma_addr_t dest_addr,
+                      u32 size_in_dwords, struct qed_dmae_params *p_params)
 {
        int rc;
 
@@ -813,52 +807,3 @@ qed_dmae_host2host(struct qed_hwfn *p_hwfn,
        return rc;
 }
 
-u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
-                 enum protocol_type proto, union qed_qm_pq_params *p_params)
-{
-       u16 pq_id = 0;
-
-       if ((proto == PROTOCOLID_CORE ||
-            proto == PROTOCOLID_ETH ||
-            proto == PROTOCOLID_ISCSI ||
-            proto == PROTOCOLID_ROCE) && !p_params) {
-               DP_NOTICE(p_hwfn,
-                         "Protocol %d received NULL PQ params\n", proto);
-               return 0;
-       }
-
-       switch (proto) {
-       case PROTOCOLID_CORE:
-               if (p_params->core.tc == LB_TC)
-                       pq_id = p_hwfn->qm_info.pure_lb_pq;
-               else if (p_params->core.tc == OOO_LB_TC)
-                       pq_id = p_hwfn->qm_info.ooo_pq;
-               else
-                       pq_id = p_hwfn->qm_info.offload_pq;
-               break;
-       case PROTOCOLID_ETH:
-               pq_id = p_params->eth.tc;
-               if (p_params->eth.is_vf)
-                       pq_id += p_hwfn->qm_info.vf_queues_offset +
-                                p_params->eth.vf_id;
-               break;
-       case PROTOCOLID_ISCSI:
-               if (p_params->iscsi.q_idx == 1)
-                       pq_id = p_hwfn->qm_info.pure_ack_pq;
-               break;
-       case PROTOCOLID_ROCE:
-               if (p_params->roce.dcqcn)
-                       pq_id = p_params->roce.qpid;
-               else
-                       pq_id = p_hwfn->qm_info.offload_pq;
-               if (pq_id > p_hwfn->qm_info.num_pf_rls)
-                       pq_id = p_hwfn->qm_info.offload_pq;
-               break;
-       default:
-               pq_id = 0;
-       }
-
-       pq_id = CM_TX_PQ_BASE + pq_id + RESC_START(p_hwfn, QED_PQ);
-
-       return pq_id;
-}
index d01557092868ec34018b80a27f14377d586135bd..f2505c691c264198e73ce946cfa5933b63292e86 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef _QED_HW_H
@@ -273,9 +297,6 @@ union qed_qm_pq_params {
        } roce;
 };
 
-u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
-                 enum protocol_type proto, union qed_qm_pq_params *params);
-
 int qed_init_fw_data(struct qed_dev *cdev,
                     const u8 *fw_data);
 #endif
index 23e455f22adc54747953b574e91808c6b6924aa5..67200c5498ab01d3911735f25e6ccea05aed200e 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #include <linux/types.h>
@@ -191,13 +215,6 @@ static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
 {
        u32 qm_line_crd;
 
-       /* In A0 - Limit the size of pbf queue so that only 511 commands with
-        * the minimum size of 4 (FCoE minimum size)
-        */
-       bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
-
-       if (is_bb_a0)
-               cmdq_lines = min_t(u32, cmdq_lines, 1022);
        qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
        OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
                         (u32)cmdq_lines);
@@ -319,13 +336,11 @@ static void qed_tx_pq_map_rt_init(
        u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
        u16 last_pq_group = (p_params->start_pq + num_pqs - 1) /
                            QM_PF_QUEUE_GROUP_SIZE;
-       bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
        u16 i, pq_id, pq_group;
 
        /* a bit per Tx PQ indicating if the PQ is associated with a VF */
        u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
-       u32 tx_pq_vf_mask_width = is_bb_a0 ? 32 : QM_PF_QUEUE_GROUP_SIZE;
-       u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / tx_pq_vf_mask_width;
+       u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
        u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
        u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
        u32 mem_addr_4kb = base_mem_addr_4kb;
@@ -347,6 +362,10 @@ static void qed_tx_pq_map_rt_init(
                bool is_vf_pq = (i >= p_params->num_pf_pqs);
                struct qm_rf_pq_map tx_pq_map;
 
+               bool rl_valid = p_params->pq_params[i].rl_valid &&
+                               (p_params->pq_params[i].vport_id <
+                                MAX_QM_GLOBAL_RLS);
+
                /* update first Tx PQ of VPORT/TC */
                u8 vport_id_in_pf = p_params->pq_params[i].vport_id -
                                    p_params->start_vport;
@@ -365,14 +384,18 @@ static void qed_tx_pq_map_rt_init(
                                     (p_params->pf_id <<
                                      QM_WFQ_VP_PQ_PF_SHIFT));
                }
+
+               if (p_params->pq_params[i].rl_valid && !rl_valid)
+                       DP_NOTICE(p_hwfn,
+                                 "Invalid VPORT ID for rate limiter configuration");
                /* fill PQ map entry */
                memset(&tx_pq_map, 0, sizeof(tx_pq_map));
                SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
-               SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
-                         p_params->pq_params[i].rl_valid ? 1 : 0);
+               SET_FIELD(tx_pq_map.reg,
+                         QM_RF_PQ_MAP_RL_VALID, rl_valid ? 1 : 0);
                SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
                SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
-                         p_params->pq_params[i].rl_valid ?
+                         rl_valid ?
                          p_params->pq_params[i].vport_id : 0);
                SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
                SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
@@ -389,8 +412,9 @@ static void qed_tx_pq_map_rt_init(
                        /* if PQ is associated with a VF, add indication
                         * to PQ VF mask
                         */
-                       tx_pq_vf_mask[pq_id / tx_pq_vf_mask_width] |=
-                               (1 << (pq_id % tx_pq_vf_mask_width));
+                       tx_pq_vf_mask[pq_id /
+                                     QM_PF_QUEUE_GROUP_SIZE] |=
+                           BIT((pq_id % QM_PF_QUEUE_GROUP_SIZE));
                        mem_addr_4kb += vport_pq_mem_4kb;
                } else {
                        mem_addr_4kb += pq_mem_4kb;
@@ -456,8 +480,8 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
        if (p_params->pf_id < MAX_NUM_PFS_BB)
                crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET;
        else
-               crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET +
-                                (p_params->pf_id % MAX_NUM_PFS_BB);
+               crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET;
+       crd_reg_offset += p_params->pf_id % MAX_NUM_PFS_BB;
 
        inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
        if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
@@ -474,11 +498,11 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
                                 QM_WFQ_CRD_REG_SIGN_BIT);
        }
 
-       STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
-                    inc_val);
        STORE_RT_REG(p_hwfn,
                     QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
                     QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
+       STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
+                    inc_val);
        return 0;
 }
 
@@ -552,6 +576,12 @@ static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
 {
        u8 i, vport_id;
 
+       if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
+               DP_NOTICE(p_hwfn,
+                         "Invalid VPORT ID for rate limiter configuration");
+               return -1;
+       }
+
        /* go over all PF VPORTs */
        for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
                u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
@@ -761,6 +791,12 @@ int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
 {
        u32 inc_val = QM_RL_INC_VAL(vport_rl);
 
+       if (vport_id >= MAX_QM_GLOBAL_RLS) {
+               DP_NOTICE(p_hwfn,
+                         "Invalid VPORT ID for rate limiter configuration");
+               return -1;
+       }
+
        if (inc_val > QM_RL_MAX_INC_VAL) {
                DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration");
                return -1;
@@ -916,12 +952,6 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
               eth_geneve_enable ? 1 : 0);
        qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
 
-       /* comp ver */
-       reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0;
-       qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val);
-       qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val);
-       qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val);
-
        /* EDPM with geneve tunnel not supported in BB_B0 */
        if (QED_IS_BB_B0(p_hwfn->cdev))
                return;
@@ -931,3 +961,132 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
        qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN,
               ip_geneve_enable ? 1 : 0);
 }
+
+#define T_ETH_PACKET_MATCH_RFS_EVENTID 25
+#define PARSER_ETH_CONN_CM_HDR (0x0)
+#define CAM_LINE_SIZE sizeof(u32)
+#define RAM_LINE_SIZE sizeof(u64)
+#define REG_SIZE sizeof(u32)
+
+void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn,
+                             struct qed_ptt *p_ptt, u16 pf_id)
+{
+       union gft_cam_line_union camline;
+       struct gft_ram_line ramline;
+       u32 *p_ramline, i;
+
+       p_ramline = (u32 *)&ramline;
+
+       /*stop using gft logic */
+       qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
+       qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, 0x0);
+       memset(&camline, 0, sizeof(union gft_cam_line_union));
+       qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
+              camline.cam_line_mapped.camline);
+       memset(&ramline, 0, sizeof(union gft_cam_line_union));
+
+       for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++) {
+               u32 hw_addr = PRS_REG_GFT_PROFILE_MASK_RAM;
+
+               hw_addr += (RAM_LINE_SIZE * pf_id + i * REG_SIZE);
+
+               qed_wr(p_hwfn, p_ptt, hw_addr, *(p_ramline + i));
+       }
+}
+
+void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+                            u16 pf_id, bool tcp, bool udp,
+                            bool ipv4, bool ipv6)
+{
+       u32 rfs_cm_hdr_event_id, *p_ramline;
+       union gft_cam_line_union camline;
+       struct gft_ram_line ramline;
+       int i;
+
+       rfs_cm_hdr_event_id = qed_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
+       p_ramline = (u32 *)&ramline;
+
+       if (!ipv6 && !ipv4)
+               DP_NOTICE(p_hwfn,
+                         "set_rfs_mode_enable: must accept at least on of - ipv4 or ipv6");
+       if (!tcp && !udp)
+               DP_NOTICE(p_hwfn,
+                         "set_rfs_mode_enable: must accept at least on of - udp or tcp");
+
+       rfs_cm_hdr_event_id |= T_ETH_PACKET_MATCH_RFS_EVENTID <<
+                                       PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
+       rfs_cm_hdr_event_id |= PARSER_ETH_CONN_CM_HDR <<
+                                       PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
+       qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
+
+       /* Configure Registers for RFS mode */
+       qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
+       qed_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
+       camline.cam_line_mapped.camline = 0;
+
+       /* cam line is now valid!! */
+       SET_FIELD(camline.cam_line_mapped.camline,
+                 GFT_CAM_LINE_MAPPED_VALID, 1);
+
+       /* filters are per PF!! */
+       SET_FIELD(camline.cam_line_mapped.camline,
+                 GFT_CAM_LINE_MAPPED_PF_ID_MASK, 1);
+       SET_FIELD(camline.cam_line_mapped.camline,
+                 GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
+       if (!(tcp && udp)) {
+               SET_FIELD(camline.cam_line_mapped.camline,
+                         GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK, 1);
+               if (tcp)
+                       SET_FIELD(camline.cam_line_mapped.camline,
+                                 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
+                                 GFT_PROFILE_TCP_PROTOCOL);
+               else
+                       SET_FIELD(camline.cam_line_mapped.camline,
+                                 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
+                                 GFT_PROFILE_UDP_PROTOCOL);
+       }
+
+       if (!(ipv4 && ipv6)) {
+               SET_FIELD(camline.cam_line_mapped.camline,
+                         GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
+               if (ipv4)
+                       SET_FIELD(camline.cam_line_mapped.camline,
+                                 GFT_CAM_LINE_MAPPED_IP_VERSION,
+                                 GFT_PROFILE_IPV4);
+               else
+                       SET_FIELD(camline.cam_line_mapped.camline,
+                                 GFT_CAM_LINE_MAPPED_IP_VERSION,
+                                 GFT_PROFILE_IPV6);
+       }
+
+       /* write characteristics to cam */
+       qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
+              camline.cam_line_mapped.camline);
+       camline.cam_line_mapped.camline = qed_rd(p_hwfn, p_ptt,
+                                                PRS_REG_GFT_CAM +
+                                                CAM_LINE_SIZE * pf_id);
+
+       /* write line to RAM - compare to filter 4 tuple */
+       ramline.low32bits = 0;
+       ramline.high32bits = 0;
+       SET_FIELD(ramline.high32bits, GFT_RAM_LINE_DST_IP, 1);
+       SET_FIELD(ramline.high32bits, GFT_RAM_LINE_SRC_IP, 1);
+       SET_FIELD(ramline.low32bits, GFT_RAM_LINE_SRC_PORT, 1);
+       SET_FIELD(ramline.low32bits, GFT_RAM_LINE_DST_PORT, 1);
+
+       /* each iteration write to reg */
+       for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
+               qed_wr(p_hwfn, p_ptt,
+                      PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id +
+                      i * REG_SIZE, *(p_ramline + i));
+
+       /* set default profile so that no filter match will happen */
+       ramline.low32bits = 0xffff;
+       ramline.high32bits = 0xffff;
+
+       for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
+               qed_wr(p_hwfn, p_ptt,
+                      PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
+                      PRS_GFT_CAM_LINES_NO_MATCH + i * REG_SIZE,
+                      *(p_ramline + i));
+}
index 9866a20d212812edc36155135a0b43db03acc951..4a2e7be5bf7210acc93f3ded8d20e1240e3aa6ef 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #include <linux/types.h>
@@ -59,17 +83,14 @@ void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn)
                p_hwfn->rt_data.b_valid[i] = false;
 }
 
-void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
-                          u32 rt_offset,
-                          u32 val)
+void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 val)
 {
        p_hwfn->rt_data.init_val[rt_offset] = val;
        p_hwfn->rt_data.b_valid[rt_offset] = true;
 }
 
 void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
-                          u32 rt_offset, u32 *p_val,
-                          size_t size)
+                          u32 rt_offset, u32 *p_val, size_t size)
 {
        size_t i;
 
@@ -81,10 +102,7 @@ void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
 
 static int qed_init_rt(struct qed_hwfn *p_hwfn,
                       struct qed_ptt *p_ptt,
-                      u32 addr,
-                      u16 rt_offset,
-                      u16 size,
-                      bool b_must_dmae)
+                      u32 addr, u16 rt_offset, u16 size, bool b_must_dmae)
 {
        u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
        bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
@@ -102,8 +120,7 @@ static int qed_init_rt(struct qed_hwfn      *p_hwfn,
                 * simply write the data instead of using dmae.
                 */
                if (!b_must_dmae) {
-                       qed_wr(p_hwfn, p_ptt, addr + (i << 2),
-                              p_init_val[i]);
+                       qed_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]);
                        continue;
                }
 
@@ -115,7 +132,7 @@ static int qed_init_rt(struct qed_hwfn      *p_hwfn,
                rc = qed_dmae_host2grc(p_hwfn, p_ptt,
                                       (uintptr_t)(p_init_val + i),
                                       addr + (i << 2), segment, 0);
-               if (rc != 0)
+               if (rc)
                        return rc;
 
                /* Jump over the entire segment, including invalid entry */
@@ -182,9 +199,7 @@ static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
 
 static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
                              struct qed_ptt *p_ptt,
-                             u32 addr,
-                             u32 fill,
-                             u32 fill_count)
+                             u32 addr, u32 fill, u32 fill_count)
 {
        static u32 zero_buffer[DMAE_MAX_RW_SIZE];
 
@@ -199,15 +214,12 @@ static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
 
        return qed_dmae_host2grc(p_hwfn, p_ptt,
                                 (uintptr_t)(&zero_buffer[0]),
-                                addr, fill_count,
-                                QED_DMAE_FLAG_RW_REPL_SRC);
+                                addr, fill_count, QED_DMAE_FLAG_RW_REPL_SRC);
 }
 
 static void qed_init_fill(struct qed_hwfn *p_hwfn,
                          struct qed_ptt *p_ptt,
-                         u32 addr,
-                         u32 fill,
-                         u32 fill_count)
+                         u32 addr, u32 fill, u32 fill_count)
 {
        u32 i;
 
@@ -218,12 +230,12 @@ static void qed_init_fill(struct qed_hwfn *p_hwfn,
 static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
                              struct qed_ptt *p_ptt,
                              struct init_write_op *cmd,
-                             bool b_must_dmae,
-                             bool b_can_dmae)
+                             bool b_must_dmae, bool b_can_dmae)
 {
+       u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset);
        u32 data = le32_to_cpu(cmd->data);
        u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
-       u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset);
+
        u32 offset, output_len, input_len, max_size;
        struct qed_dev *cdev = p_hwfn->cdev;
        union init_array_hdr *hdr;
@@ -233,8 +245,7 @@ static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
 
        array_data = cdev->fw_data->arr_data;
 
-       hdr = (union init_array_hdr *)(array_data +
-                                      dmae_array_offset);
+       hdr = (union init_array_hdr *)(array_data + dmae_array_offset);
        data = le32_to_cpu(hdr->raw.data);
        switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
        case INIT_ARR_ZIPPED:
@@ -290,13 +301,12 @@ static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
 /* init_ops write command */
 static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
                           struct qed_ptt *p_ptt,
-                          struct init_write_op *cmd,
-                          bool b_can_dmae)
+                          struct init_write_op *p_cmd, bool b_can_dmae)
 {
-       u32 data = le32_to_cpu(cmd->data);
-       u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
+       u32 data = le32_to_cpu(p_cmd->data);
        bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
-       union init_write_args *arg = &cmd->args;
+       u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
+       union init_write_args *arg = &p_cmd->args;
        int rc = 0;
 
        /* Sanitize */
@@ -309,20 +319,18 @@ static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
 
        switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
        case INIT_SRC_INLINE:
-               qed_wr(p_hwfn, p_ptt, addr,
-                      le32_to_cpu(arg->inline_val));
+               data = le32_to_cpu(p_cmd->args.inline_val);
+               qed_wr(p_hwfn, p_ptt, addr, data);
                break;
        case INIT_SRC_ZEROS:
-               if (b_must_dmae ||
-                   (b_can_dmae && (le32_to_cpu(arg->zeros_count) >= 64)))
-                       rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0,
-                                               le32_to_cpu(arg->zeros_count));
+               data = le32_to_cpu(p_cmd->args.zeros_count);
+               if (b_must_dmae || (b_can_dmae && (data >= 64)))
+                       rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0, data);
                else
-                       qed_init_fill(p_hwfn, p_ptt, addr, 0,
-                                     le32_to_cpu(arg->zeros_count));
+                       qed_init_fill(p_hwfn, p_ptt, addr, 0, data);
                break;
        case INIT_SRC_ARRAY:
-               rc = qed_init_cmd_array(p_hwfn, p_ptt, cmd,
+               rc = qed_init_cmd_array(p_hwfn, p_ptt, p_cmd,
                                        b_must_dmae, b_can_dmae);
                break;
        case INIT_SRC_RUNTIME:
@@ -353,8 +361,7 @@ static inline bool comp_or(u32 val, u32 expected_val)
 
 /* init_ops read/poll commands */
 static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
-                           struct qed_ptt *p_ptt,
-                           struct init_read_op *cmd)
+                           struct qed_ptt *p_ptt, struct init_read_op *cmd)
 {
        bool (*comp_check)(u32 val, u32 expected_val);
        u32 delay = QED_INIT_POLL_PERIOD_US, val;
@@ -412,35 +419,33 @@ static void qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
 }
 
 static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
-                                 u16 *offset,
-                                 int modes)
+                                 u16 *p_offset, int modes)
 {
        struct qed_dev *cdev = p_hwfn->cdev;
        const u8 *modes_tree_buf;
        u8 arg1, arg2, tree_val;
 
        modes_tree_buf = cdev->fw_data->modes_tree_buf;
-       tree_val = modes_tree_buf[(*offset)++];
+       tree_val = modes_tree_buf[(*p_offset)++];
        switch (tree_val) {
        case INIT_MODE_OP_NOT:
-               return qed_init_cmd_mode_match(p_hwfn, offset, modes) ^ 1;
+               return qed_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1;
        case INIT_MODE_OP_OR:
-               arg1    = qed_init_cmd_mode_match(p_hwfn, offset, modes);
-               arg2    = qed_init_cmd_mode_match(p_hwfn, offset, modes);
+               arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
+               arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
                return arg1 | arg2;
        case INIT_MODE_OP_AND:
-               arg1    = qed_init_cmd_mode_match(p_hwfn, offset, modes);
-               arg2    = qed_init_cmd_mode_match(p_hwfn, offset, modes);
+               arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
+               arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
                return arg1 & arg2;
        default:
                tree_val -= MAX_INIT_MODE_OPS;
-               return (modes & (1 << tree_val)) ? 1 : 0;
+               return (modes & BIT(tree_val)) ? 1 : 0;
        }
 }
 
 static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
-                            struct init_if_mode_op *p_cmd,
-                            int modes)
+                            struct init_if_mode_op *p_cmd, int modes)
 {
        u16 offset = le16_to_cpu(p_cmd->modes_buf_offset);
 
@@ -453,8 +458,7 @@ static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
 
 static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
                              struct init_if_phase_op *p_cmd,
-                             u32 phase,
-                             u32 phase_id)
+                             u32 phase, u32 phase_id)
 {
        u32 data = le32_to_cpu(p_cmd->phase_data);
        u32 op_data = le32_to_cpu(p_cmd->op_data);
@@ -468,10 +472,7 @@ static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
 }
 
 int qed_init_run(struct qed_hwfn *p_hwfn,
-                struct qed_ptt *p_ptt,
-                int phase,
-                int phase_id,
-                int modes)
+                struct qed_ptt *p_ptt, int phase, int phase_id, int modes)
 {
        struct qed_dev *cdev = p_hwfn->cdev;
        u32 cmd_num, num_init_ops;
@@ -483,10 +484,8 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
        init_ops = cdev->fw_data->init_ops;
 
        p_hwfn->unzip_buf = kzalloc(MAX_ZIPPED_SIZE * 4, GFP_ATOMIC);
-       if (!p_hwfn->unzip_buf) {
-               DP_NOTICE(p_hwfn, "Failed to allocate unzip buffer\n");
+       if (!p_hwfn->unzip_buf)
                return -ENOMEM;
-       }
 
        for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
                union init_op *cmd = &init_ops[cmd_num];
@@ -555,9 +554,9 @@ int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
        }
 
        /* First Dword contains metadata and should be skipped */
-       buf_hdr = (struct bin_buffer_hdr *)(data + sizeof(u32));
+       buf_hdr = (struct bin_buffer_hdr *)data;
 
-       offset = buf_hdr[BIN_BUF_FW_VER_INFO].offset;
+       offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
        fw->fw_ver_info = (struct fw_ver_info *)(data + offset);
 
        offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
index 1e832049983d4263705223c0a94c2a407720af37..555dd086796df0660b21f29408e0a923ca72d9c5 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef _QED_INIT_OPS_H
index 8fa50fa23c8d0f24cc841ad25cd9595a729112d6..23910fa12b2f3d2e98f598a6d6e4a557f76f83d9 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #include <linux/types.h>
@@ -1775,10 +1799,9 @@ struct qed_sb_attn_info {
 };
 
 static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn,
-                                     struct qed_sb_attn_info   *p_sb_desc)
+                                     struct qed_sb_attn_info *p_sb_desc)
 {
-       u16     rc = 0;
-       u16     index;
+       u16 rc = 0, index;
 
        /* Make certain HW write took affect */
        mmiowb();
@@ -1802,15 +1825,13 @@ static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn,
  *  @param asserted_bits newly asserted bits
  *  @return int
  */
-static int qed_int_assertion(struct qed_hwfn *p_hwfn,
-                            u16 asserted_bits)
+static int qed_int_assertion(struct qed_hwfn *p_hwfn, u16 asserted_bits)
 {
        struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
        u32 igu_mask;
 
        /* Mask the source of the attention in the IGU */
-       igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
-                         IGU_REG_ATTENTION_ENABLE);
+       igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE);
        DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
                   igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
        igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
@@ -2041,7 +2062,7 @@ static int qed_int_deassertion(struct qed_hwfn  *p_hwfn,
                        struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
 
                        if ((p_bit->flags & ATTENTION_PARITY) &&
-                           !!(parities & (1 << bit_idx)))
+                           !!(parities & BIT(bit_idx)))
                                qed_int_deassertion_parity(p_hwfn, p_bit,
                                                           bit_idx);
 
@@ -2114,8 +2135,7 @@ static int qed_int_deassertion(struct qed_hwfn  *p_hwfn,
                                    ~((u32)deasserted_bits));
 
        /* Unmask deasserted attentions in IGU */
-       aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
-                         IGU_REG_ATTENTION_ENABLE);
+       aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE);
        aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
        qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
 
@@ -2160,8 +2180,7 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn)
                        index, attn_bits, attn_acks, asserted_bits,
                        deasserted_bits, p_sb_attn_sw->known_attn);
        } else if (asserted_bits == 0x100) {
-               DP_INFO(p_hwfn,
-                       "MFW indication via attention\n");
+               DP_INFO(p_hwfn, "MFW indication via attention\n");
        } else {
                DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
                           "MFW indication [deassertion]\n");
@@ -2173,18 +2192,14 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn)
                        return rc;
        }
 
-       if (deasserted_bits) {
+       if (deasserted_bits)
                rc = qed_int_deassertion(p_hwfn, deasserted_bits);
-               if (rc)
-                       return rc;
-       }
 
        return rc;
 }
 
 static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn,
-                           void __iomem *igu_addr,
-                           u32 ack_cons)
+                           void __iomem *igu_addr, u32 ack_cons)
 {
        struct igu_prod_cons_update igu_ack = { 0 };
 
@@ -2242,9 +2257,8 @@ void qed_int_sp_dpc(unsigned long hwfn_cookie)
 
        /* Gather Interrupts/Attentions information */
        if (!sb_info->sb_virt) {
-               DP_ERR(
-                       p_hwfn->cdev,
-                       "Interrupt Status block is NULL - cannot check for new interrupts!\n");
+               DP_ERR(p_hwfn->cdev,
+                      "Interrupt Status block is NULL - cannot check for new interrupts!\n");
        } else {
                u32 tmp_index = sb_info->sb_ack;
 
@@ -2255,9 +2269,8 @@ void qed_int_sp_dpc(unsigned long hwfn_cookie)
        }
 
        if (!sb_attn || !sb_attn->sb_attn) {
-               DP_ERR(
-                       p_hwfn->cdev,
-                       "Attentions Status block is NULL - cannot check for new attentions!\n");
+               DP_ERR(p_hwfn->cdev,
+                      "Attentions Status block is NULL - cannot check for new attentions!\n");
        } else {
                u16 tmp_index = sb_attn->index;
 
@@ -2313,8 +2326,7 @@ static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn)
        if (p_sb->sb_attn)
                dma_free_coherent(&p_hwfn->cdev->pdev->dev,
                                  SB_ATTN_ALIGNED_SIZE(p_hwfn),
-                                 p_sb->sb_attn,
-                                 p_sb->sb_phys);
+                                 p_sb->sb_attn, p_sb->sb_phys);
        kfree(p_sb);
 }
 
@@ -2337,8 +2349,7 @@ static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn,
 
 static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn,
                                 struct qed_ptt *p_ptt,
-                                void *sb_virt_addr,
-                                dma_addr_t sb_phy_addr)
+                                void *sb_virt_addr, dma_addr_t sb_phy_addr)
 {
        struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
        int i, j, k;
@@ -2378,15 +2389,13 @@ static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn,
 {
        struct qed_dev *cdev = p_hwfn->cdev;
        struct qed_sb_attn_info *p_sb;
-       void *p_virt;
        dma_addr_t p_phys = 0;
+       void *p_virt;
 
        /* SB struct */
        p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL);
-       if (!p_sb) {
-               DP_NOTICE(cdev, "Failed to allocate `struct qed_sb_attn_info'\n");
+       if (!p_sb)
                return -ENOMEM;
-       }
 
        /* SB ring  */
        p_virt = dma_alloc_coherent(&cdev->pdev->dev,
@@ -2394,7 +2403,6 @@ static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn,
                                    &p_phys, GFP_KERNEL);
 
        if (!p_virt) {
-               DP_NOTICE(cdev, "Failed to allocate status block (attentions)\n");
                kfree(p_sb);
                return -ENOMEM;
        }
@@ -2412,9 +2420,7 @@ static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn,
 
 void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
                           struct cau_sb_entry *p_sb_entry,
-                          u8 pf_id,
-                          u16 vf_number,
-                          u8 vf_valid)
+                          u8 pf_id, u16 vf_number, u8 vf_valid)
 {
        struct qed_dev *cdev = p_hwfn->cdev;
        u32 cau_state;
@@ -2428,12 +2434,6 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
        SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
        SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
 
-       /* setting the time resultion to a fixed value ( = 1) */
-       SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0,
-                 QED_CAU_DEF_RX_TIMER_RES);
-       SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1,
-                 QED_CAU_DEF_TX_TIMER_RES);
-
        cau_state = CAU_HC_DISABLE_STATE;
 
        if (cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
@@ -2468,9 +2468,7 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
 void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
                         struct qed_ptt *p_ptt,
                         dma_addr_t sb_phys,
-                        u16 igu_sb_id,
-                        u16 vf_number,
-                        u8 vf_valid)
+                        u16 igu_sb_id, u16 vf_number, u8 vf_valid)
 {
        struct cau_sb_entry sb_entry;
 
@@ -2502,8 +2500,9 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
 
        /* Configure pi coalescing if set */
        if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
+               u8 num_tc = p_hwfn->hw_info.num_hw_tc;
                u8 timeset, timer_res;
-               u8 num_tc = 1, i;
+               u8 i;
 
                /* timeset = (coalesce >> timer-res), timeset is 7bit wide */
                if (p_hwfn->cdev->rx_coalesce_usecs <= 0x7F)
@@ -2514,8 +2513,7 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
                        timer_res = 2;
                timeset = (u8)(p_hwfn->cdev->rx_coalesce_usecs >> timer_res);
                qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
-                                   QED_COAL_RX_STATE_MACHINE,
-                                   timeset);
+                                   QED_COAL_RX_STATE_MACHINE, timeset);
 
                if (p_hwfn->cdev->tx_coalesce_usecs <= 0x7F)
                        timer_res = 0;
@@ -2541,8 +2539,7 @@ void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
                         u8 timeset)
 {
        struct cau_pi_entry pi_entry;
-       u32 sb_offset;
-       u32 pi_offset;
+       u32 sb_offset, pi_offset;
 
        if (IS_VF(p_hwfn->cdev))
                return;
@@ -2569,8 +2566,7 @@ void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
 }
 
 void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
-                     struct qed_ptt *p_ptt,
-                     struct qed_sb_info *sb_info)
+                     struct qed_ptt *p_ptt, struct qed_sb_info *sb_info)
 {
        /* zero status block and ack counter */
        sb_info->sb_ack = 0;
@@ -2590,8 +2586,7 @@ void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
  *
  * @return u16
  */
-static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn,
-                            u16 sb_id)
+static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
 {
        u16 igu_sb_id;
 
@@ -2603,8 +2598,12 @@ static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn,
        else
                igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id);
 
-       DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "SB [%s] index is 0x%04x\n",
-                  (sb_id == QED_SP_SB_ID) ? "DSB" : "non-DSB", igu_sb_id);
+       if (sb_id == QED_SP_SB_ID)
+               DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+                          "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
+       else
+               DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+                          "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id);
 
        return igu_sb_id;
 }
@@ -2612,9 +2611,7 @@ static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn,
 int qed_int_sb_init(struct qed_hwfn *p_hwfn,
                    struct qed_ptt *p_ptt,
                    struct qed_sb_info *sb_info,
-                   void *sb_virt_addr,
-                   dma_addr_t sb_phy_addr,
-                   u16 sb_id)
+                   void *sb_virt_addr, dma_addr_t sb_phy_addr, u16 sb_id)
 {
        sb_info->sb_virt = sb_virt_addr;
        sb_info->sb_phys = sb_phy_addr;
@@ -2650,8 +2647,7 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn,
 }
 
 int qed_int_sb_release(struct qed_hwfn *p_hwfn,
-                      struct qed_sb_info *sb_info,
-                      u16 sb_id)
+                      struct qed_sb_info *sb_info, u16 sb_id)
 {
        if (sb_id == QED_SP_SB_ID) {
                DP_ERR(p_hwfn, "Do Not free sp sb using this function");
@@ -2685,8 +2681,7 @@ static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn)
        kfree(p_sb);
 }
 
-static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn,
-                              struct qed_ptt *p_ptt)
+static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
        struct qed_sb_sp_info *p_sb;
        dma_addr_t p_phys = 0;
@@ -2694,17 +2689,14 @@ static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn,
 
        /* SB struct */
        p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL);
-       if (!p_sb) {
-               DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sb_info'\n");
+       if (!p_sb)
                return -ENOMEM;
-       }
 
        /* SB ring  */
        p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
                                    SB_ALIGNED_SIZE(p_hwfn),
                                    &p_phys, GFP_KERNEL);
        if (!p_virt) {
-               DP_NOTICE(p_hwfn, "Failed to allocate status block\n");
                kfree(p_sb);
                return -ENOMEM;
        }
@@ -2721,9 +2713,7 @@ static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn,
 
 int qed_int_register_cb(struct qed_hwfn *p_hwfn,
                        qed_int_comp_cb_t comp_cb,
-                       void *cookie,
-                       u8 *sb_idx,
-                       __le16 **p_fw_cons)
+                       void *cookie, u8 *sb_idx, __le16 **p_fw_cons)
 {
        struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
        int rc = -ENOMEM;
@@ -2764,8 +2754,7 @@ u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn)
 }
 
 void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
-                           struct qed_ptt *p_ptt,
-                           enum qed_int_mode int_mode)
+                           struct qed_ptt *p_ptt, enum qed_int_mode int_mode)
 {
        u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN;
 
@@ -2809,7 +2798,7 @@ int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
        qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
        if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
                rc = qed_slowpath_irq_req(p_hwfn);
-               if (rc != 0) {
+               if (rc) {
                        DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n");
                        return -EINVAL;
                }
@@ -2822,8 +2811,7 @@ int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
        return rc;
 }
 
-void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
-                            struct qed_ptt *p_ptt)
+void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
        p_hwfn->b_int_enabled = 0;
 
@@ -2950,13 +2938,11 @@ void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
                                        p_hwfn->hw_info.opaque_fid, b_set);
 }
 
-static u32 qed_int_igu_read_cam_block(struct qed_hwfn  *p_hwfn,
-                                     struct qed_ptt    *p_ptt,
-                                     u16               sb_id)
+static u32 qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn,
+                                     struct qed_ptt *p_ptt, u16 sb_id)
 {
        u32 val = qed_rd(p_hwfn, p_ptt,
-                        IGU_REG_MAPPING_MEMORY +
-                        sizeof(u32) * sb_id);
+                        IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id);
        struct qed_igu_block *p_block;
 
        p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
@@ -2983,8 +2969,7 @@ out:
        return val;
 }
 
-int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
-                        struct qed_ptt *p_ptt)
+int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
        struct qed_igu_info *p_igu_info;
        u32 val, min_vf = 0, max_vf = 0;
@@ -2993,7 +2978,6 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
        u16 prev_sb_id = 0xFF;
 
        p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL);
-
        if (!p_hwfn->hw_info.p_igu_info)
                return -ENOMEM;
 
@@ -3071,6 +3055,31 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
                        }
                }
        }
+
+       /* There's a possibility the igu_sb_cnt_iov doesn't properly reflect
+        * the number of VF SBs [especially for first VF on engine, as we can't
+        * diffrentiate between empty entries and its entries].
+        * Since we don't really support more SBs than VFs today, prevent any
+        * such configuration by sanitizing the number of SBs to equal the
+        * number of VFs.
+        */
+       if (IS_PF_SRIOV(p_hwfn)) {
+               u16 total_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
+
+               if (total_vfs < p_igu_info->free_blks) {
+                       DP_VERBOSE(p_hwfn,
+                                  (NETIF_MSG_INTR | QED_MSG_IOV),
+                                  "Limiting number of SBs for IOV - %04x --> %04x\n",
+                                  p_igu_info->free_blks,
+                                  p_hwfn->cdev->p_iov_info->total_vfs);
+                       p_igu_info->free_blks = total_vfs;
+               } else if (total_vfs > p_igu_info->free_blks) {
+                       DP_NOTICE(p_hwfn,
+                                 "IGU has only %04x SBs for VFs while the device has %04x VFs\n",
+                                 p_igu_info->free_blks, total_vfs);
+                       return -EINVAL;
+               }
+       }
        p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks;
 
        DP_VERBOSE(
@@ -3104,22 +3113,19 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
  */
 void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn)
 {
-       u32 igu_pf_conf = 0;
-
-       igu_pf_conf |= IGU_PF_CONF_FUNC_EN;
+       u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
 
        STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
 }
 
 u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn)
 {
-       u64 intr_status = 0;
-       u32 intr_status_lo = 0;
-       u32 intr_status_hi = 0;
        u32 lsb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_LSB_UPPER -
                               IGU_CMD_INT_ACK_BASE;
        u32 msb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_MSB_UPPER -
                               IGU_CMD_INT_ACK_BASE;
+       u32 intr_status_hi = 0, intr_status_lo = 0;
+       u64 intr_status = 0;
 
        intr_status_lo = REG_RD(p_hwfn,
                                GTT_BAR0_MAP_REG_IGU_CMD +
@@ -3153,26 +3159,20 @@ static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn)
        kfree(p_hwfn->sp_dpc);
 }
 
-int qed_int_alloc(struct qed_hwfn *p_hwfn,
-                 struct qed_ptt *p_ptt)
+int qed_int_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
        int rc = 0;
 
        rc = qed_int_sp_dpc_alloc(p_hwfn);
-       if (rc) {
-               DP_ERR(p_hwfn->cdev, "Failed to allocate sp dpc mem\n");
+       if (rc)
                return rc;
-       }
+
        rc = qed_int_sp_sb_alloc(p_hwfn, p_ptt);
-       if (rc) {
-               DP_ERR(p_hwfn->cdev, "Failed to allocate sp sb mem\n");
+       if (rc)
                return rc;
-       }
+
        rc = qed_int_sb_attn_alloc(p_hwfn, p_ptt);
-       if (rc) {
-               DP_ERR(p_hwfn->cdev, "Failed to allocate sb attn mem\n");
-               return rc;
-       }
+
        return rc;
 }
 
@@ -3183,8 +3183,7 @@ void qed_int_free(struct qed_hwfn *p_hwfn)
        qed_int_sp_dpc_free(p_hwfn);
 }
 
-void qed_int_setup(struct qed_hwfn *p_hwfn,
-                  struct qed_ptt *p_ptt)
+void qed_int_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
        qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
        qed_int_sb_attn_setup(p_hwfn, p_ptt);
@@ -3214,7 +3213,12 @@ u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
                return sb_id - p_info->igu_base_sb;
        } else if ((sb_id >= p_info->igu_base_sb_iov) &&
                   (sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) {
-               return sb_id - p_info->igu_base_sb_iov + p_info->igu_sb_cnt;
+               /* We want the first VF queue to be adjacent to the
+                * last PF queue. Since L2 queues can be partial to
+                * SBs, we'll use the feature instead.
+                */
+               return sb_id - p_info->igu_base_sb_iov +
+                      FEAT_NUM(p_hwfn, QED_PF_L2_QUE);
        } else {
                DP_NOTICE(p_hwfn, "SB %d not in range for function\n", sb_id);
                return 0;
index 0948be64dc782f926f5a2c8317c22552e5bf87d3..0ae0bb4593effc45a12895a43c49c8a1fdcfb636 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef _QED_INT_H
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
new file mode 100644 (file)
index 0000000..339c91d
--- /dev/null
@@ -0,0 +1,1372 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/param.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/qed/qed_iscsi_if.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_iscsi.h"
+#include "qed_ll2.h"
+#include "qed_mcp.h"
+#include "qed_sp.h"
+#include "qed_sriov.h"
+#include "qed_reg_addr.h"
+
+struct qed_iscsi_conn {
+       struct list_head list_entry;
+       bool free_on_delete;
+
+       u16 conn_id;
+       u32 icid;
+       u32 fw_cid;
+
+       u8 layer_code;
+       u8 offl_flags;
+       u8 connect_mode;
+       u32 initial_ack;
+       dma_addr_t sq_pbl_addr;
+       struct qed_chain r2tq;
+       struct qed_chain xhq;
+       struct qed_chain uhq;
+
+       struct tcp_upload_params *tcp_upload_params_virt_addr;
+       dma_addr_t tcp_upload_params_phys_addr;
+       struct scsi_terminate_extra_params *queue_cnts_virt_addr;
+       dma_addr_t queue_cnts_phys_addr;
+       dma_addr_t syn_phy_addr;
+
+       u16 syn_ip_payload_length;
+       u8 local_mac[6];
+       u8 remote_mac[6];
+       u16 vlan_id;
+       u8 tcp_flags;
+       u8 ip_version;
+       u32 remote_ip[4];
+       u32 local_ip[4];
+       u8 ka_max_probe_cnt;
+       u8 dup_ack_theshold;
+       u32 rcv_next;
+       u32 snd_una;
+       u32 snd_next;
+       u32 snd_max;
+       u32 snd_wnd;
+       u32 rcv_wnd;
+       u32 snd_wl1;
+       u32 cwnd;
+       u32 ss_thresh;
+       u16 srtt;
+       u16 rtt_var;
+       u32 ts_time;
+       u32 ts_recent;
+       u32 ts_recent_age;
+       u32 total_rt;
+       u32 ka_timeout_delta;
+       u32 rt_timeout_delta;
+       u8 dup_ack_cnt;
+       u8 snd_wnd_probe_cnt;
+       u8 ka_probe_cnt;
+       u8 rt_cnt;
+       u32 flow_label;
+       u32 ka_timeout;
+       u32 ka_interval;
+       u32 max_rt_time;
+       u32 initial_rcv_wnd;
+       u8 ttl;
+       u8 tos_or_tc;
+       u16 remote_port;
+       u16 local_port;
+       u16 mss;
+       u8 snd_wnd_scale;
+       u8 rcv_wnd_scale;
+       u32 ts_ticks_per_second;
+       u16 da_timeout_value;
+       u8 ack_frequency;
+
+       u8 update_flag;
+       u8 default_cq;
+       u32 max_seq_size;
+       u32 max_recv_pdu_length;
+       u32 max_send_pdu_length;
+       u32 first_seq_length;
+       u32 exp_stat_sn;
+       u32 stat_sn;
+       u16 physical_q0;
+       u16 physical_q1;
+       u8 abortive_dsconnect;
+};
+
+static int
+qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
+                       enum spq_mode comp_mode,
+                       struct qed_spq_comp_cb *p_comp_addr,
+                       void *event_context, iscsi_event_cb_t async_event_cb)
+{
+       struct iscsi_init_ramrod_params *p_ramrod = NULL;
+       struct scsi_init_func_queues *p_queue = NULL;
+       struct qed_iscsi_pf_params *p_params = NULL;
+       struct iscsi_spe_func_init *p_init = NULL;
+       struct qed_spq_entry *p_ent = NULL;
+       struct qed_sp_init_data init_data;
+       int rc = 0;
+       u32 dval;
+       u16 val;
+       u8 i;
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = qed_spq_get_cid(p_hwfn);
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = comp_mode;
+       init_data.p_comp_data = p_comp_addr;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                ISCSI_RAMROD_CMD_ID_INIT_FUNC,
+                                PROTOCOLID_ISCSI, &init_data);
+       if (rc)
+               return rc;
+
+       p_ramrod = &p_ent->ramrod.iscsi_init;
+       p_init = &p_ramrod->iscsi_init_spe;
+       p_params = &p_hwfn->pf_params.iscsi_pf_params;
+       p_queue = &p_init->q_params;
+
+       /* Sanity */
+       if (p_params->num_queues > p_hwfn->hw_info.feat_num[QED_ISCSI_CQ]) {
+               DP_ERR(p_hwfn,
+                      "Cannot satisfy CQ amount. Queues requested %d, CQs available %d. Aborting function start\n",
+                      p_params->num_queues,
+                      p_hwfn->hw_info.resc_num[QED_ISCSI_CQ]);
+               return -EINVAL;
+       }
+
+       SET_FIELD(p_init->hdr.flags,
+                 ISCSI_SLOW_PATH_HDR_LAYER_CODE, ISCSI_SLOW_PATH_LAYER_CODE);
+       p_init->hdr.op_code = ISCSI_RAMROD_CMD_ID_INIT_FUNC;
+
+       val = p_params->half_way_close_timeout;
+       p_init->half_way_close_timeout = cpu_to_le16(val);
+       p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring;
+       p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring;
+       p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring;
+       p_init->ooo_enable = p_params->ooo_enable;
+       p_init->ll2_rx_queue_id = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] +
+                                 p_params->ll2_ooo_queue_id;
+       p_init->func_params.log_page_size = p_params->log_page_size;
+       val = p_params->num_tasks;
+       p_init->func_params.num_tasks = cpu_to_le16(val);
+       p_init->debug_mode.flags = p_params->debug_mode;
+
+       DMA_REGPAIR_LE(p_queue->glbl_q_params_addr,
+                      p_params->glbl_q_params_addr);
+
+       val = p_params->cq_num_entries;
+       p_queue->cq_num_entries = cpu_to_le16(val);
+       val = p_params->cmdq_num_entries;
+       p_queue->cmdq_num_entries = cpu_to_le16(val);
+       p_queue->num_queues = p_params->num_queues;
+       dval = (u8)p_hwfn->hw_info.resc_start[QED_CMDQS_CQS];
+       p_queue->queue_relative_offset = (u8)dval;
+       p_queue->cq_sb_pi = p_params->gl_rq_pi;
+       p_queue->cmdq_sb_pi = p_params->gl_cmd_pi;
+
+       for (i = 0; i < p_params->num_queues; i++) {
+               val = p_hwfn->sbs_info[i]->igu_sb_id;
+               p_queue->cq_cmdq_sb_num_arr[i] = cpu_to_le16(val);
+       }
+
+       p_queue->bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ);
+
+       DMA_REGPAIR_LE(p_queue->bdq_pbl_base_address[BDQ_ID_RQ],
+                      p_params->bdq_pbl_base_addr[BDQ_ID_RQ]);
+       p_queue->bdq_pbl_num_entries[BDQ_ID_RQ] =
+           p_params->bdq_pbl_num_entries[BDQ_ID_RQ];
+       val = p_params->bdq_xoff_threshold[BDQ_ID_RQ];
+       p_queue->bdq_xoff_threshold[BDQ_ID_RQ] = cpu_to_le16(val);
+       val = p_params->bdq_xon_threshold[BDQ_ID_RQ];
+       p_queue->bdq_xon_threshold[BDQ_ID_RQ] = cpu_to_le16(val);
+
+       DMA_REGPAIR_LE(p_queue->bdq_pbl_base_address[BDQ_ID_IMM_DATA],
+                      p_params->bdq_pbl_base_addr[BDQ_ID_IMM_DATA]);
+       p_queue->bdq_pbl_num_entries[BDQ_ID_IMM_DATA] =
+           p_params->bdq_pbl_num_entries[BDQ_ID_IMM_DATA];
+       val = p_params->bdq_xoff_threshold[BDQ_ID_IMM_DATA];
+       p_queue->bdq_xoff_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(val);
+       val = p_params->bdq_xon_threshold[BDQ_ID_IMM_DATA];
+       p_queue->bdq_xon_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(val);
+       val = p_params->rq_buffer_size;
+       p_queue->rq_buffer_size = cpu_to_le16(val);
+       if (p_params->is_target) {
+               SET_FIELD(p_queue->q_validity,
+                         SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1);
+               if (p_queue->bdq_pbl_num_entries[BDQ_ID_IMM_DATA])
+                       SET_FIELD(p_queue->q_validity,
+                                 SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID, 1);
+               SET_FIELD(p_queue->q_validity,
+                         SCSI_INIT_FUNC_QUEUES_CMD_VALID, 1);
+       } else {
+               SET_FIELD(p_queue->q_validity,
+                         SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1);
+       }
+       p_ramrod->tcp_init.two_msl_timer = cpu_to_le32(p_params->two_msl_timer);
+       val = p_params->tx_sws_timer;
+       p_ramrod->tcp_init.tx_sws_timer = cpu_to_le16(val);
+       p_ramrod->tcp_init.maxfinrt = p_params->max_fin_rt;
+
+       p_hwfn->p_iscsi_info->event_context = event_context;
+       p_hwfn->p_iscsi_info->event_cb = async_event_cb;
+
+       return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
+                                    struct qed_iscsi_conn *p_conn,
+                                    enum spq_mode comp_mode,
+                                    struct qed_spq_comp_cb *p_comp_addr)
+{
+       struct iscsi_spe_conn_offload *p_ramrod = NULL;
+       struct tcp_offload_params_opt2 *p_tcp2 = NULL;
+       struct tcp_offload_params *p_tcp = NULL;
+       struct qed_spq_entry *p_ent = NULL;
+       struct qed_sp_init_data init_data;
+       dma_addr_t r2tq_pbl_addr;
+       dma_addr_t xhq_pbl_addr;
+       dma_addr_t uhq_pbl_addr;
+       u16 physical_q;
+       int rc = 0;
+       u32 dval;
+       u16 wval;
+       u8 i;
+       u16 *p;
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = p_conn->icid;
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = comp_mode;
+       init_data.p_comp_data = p_comp_addr;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN,
+                                PROTOCOLID_ISCSI, &init_data);
+       if (rc)
+               return rc;
+
+       p_ramrod = &p_ent->ramrod.iscsi_conn_offload;
+
+       /* Transmission PQ is the first of the PF */
+       physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+       p_conn->physical_q0 = cpu_to_le16(physical_q);
+       p_ramrod->iscsi.physical_q0 = cpu_to_le16(physical_q);
+
+       /* iSCSI Pure-ACK PQ */
+       physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
+       p_conn->physical_q1 = cpu_to_le16(physical_q);
+       p_ramrod->iscsi.physical_q1 = cpu_to_le16(physical_q);
+
+       p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN;
+       SET_FIELD(p_ramrod->hdr.flags, ISCSI_SLOW_PATH_HDR_LAYER_CODE,
+                 p_conn->layer_code);
+
+       p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
+       p_ramrod->fw_cid = cpu_to_le32(p_conn->icid);
+
+       DMA_REGPAIR_LE(p_ramrod->iscsi.sq_pbl_addr, p_conn->sq_pbl_addr);
+
+       r2tq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->r2tq);
+       DMA_REGPAIR_LE(p_ramrod->iscsi.r2tq_pbl_addr, r2tq_pbl_addr);
+
+       xhq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->xhq);
+       DMA_REGPAIR_LE(p_ramrod->iscsi.xhq_pbl_addr, xhq_pbl_addr);
+
+       uhq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->uhq);
+       DMA_REGPAIR_LE(p_ramrod->iscsi.uhq_pbl_addr, uhq_pbl_addr);
+
+       p_ramrod->iscsi.initial_ack = cpu_to_le32(p_conn->initial_ack);
+       p_ramrod->iscsi.flags = p_conn->offl_flags;
+       p_ramrod->iscsi.default_cq = p_conn->default_cq;
+       p_ramrod->iscsi.stat_sn = cpu_to_le32(p_conn->stat_sn);
+
+       if (!GET_FIELD(p_ramrod->iscsi.flags,
+                      ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B)) {
+               p_tcp = &p_ramrod->tcp;
+
+               p = (u16 *)p_conn->local_mac;
+               p_tcp->local_mac_addr_hi = swab16(get_unaligned(p));
+               p_tcp->local_mac_addr_mid = swab16(get_unaligned(p + 1));
+               p_tcp->local_mac_addr_lo = swab16(get_unaligned(p + 2));
+
+               p = (u16 *)p_conn->remote_mac;
+               p_tcp->remote_mac_addr_hi = swab16(get_unaligned(p));
+               p_tcp->remote_mac_addr_mid = swab16(get_unaligned(p + 1));
+               p_tcp->remote_mac_addr_lo = swab16(get_unaligned(p + 2));
+
+               p_tcp->vlan_id = cpu_to_le16(p_conn->vlan_id);
+
+               p_tcp->flags = p_conn->tcp_flags;
+               p_tcp->ip_version = p_conn->ip_version;
+               for (i = 0; i < 4; i++) {
+                       dval = p_conn->remote_ip[i];
+                       p_tcp->remote_ip[i] = cpu_to_le32(dval);
+                       dval = p_conn->local_ip[i];
+                       p_tcp->local_ip[i] = cpu_to_le32(dval);
+               }
+               p_tcp->ka_max_probe_cnt = p_conn->ka_max_probe_cnt;
+               p_tcp->dup_ack_theshold = p_conn->dup_ack_theshold;
+
+               p_tcp->rcv_next = cpu_to_le32(p_conn->rcv_next);
+               p_tcp->snd_una = cpu_to_le32(p_conn->snd_una);
+               p_tcp->snd_next = cpu_to_le32(p_conn->snd_next);
+               p_tcp->snd_max = cpu_to_le32(p_conn->snd_max);
+               p_tcp->snd_wnd = cpu_to_le32(p_conn->snd_wnd);
+               p_tcp->rcv_wnd = cpu_to_le32(p_conn->rcv_wnd);
+               p_tcp->snd_wl1 = cpu_to_le32(p_conn->snd_wl1);
+               p_tcp->cwnd = cpu_to_le32(p_conn->cwnd);
+               p_tcp->ss_thresh = cpu_to_le32(p_conn->ss_thresh);
+               p_tcp->srtt = cpu_to_le16(p_conn->srtt);
+               p_tcp->rtt_var = cpu_to_le16(p_conn->rtt_var);
+               p_tcp->ts_time = cpu_to_le32(p_conn->ts_time);
+               p_tcp->ts_recent = cpu_to_le32(p_conn->ts_recent);
+               p_tcp->ts_recent_age = cpu_to_le32(p_conn->ts_recent_age);
+               p_tcp->total_rt = cpu_to_le32(p_conn->total_rt);
+               dval = p_conn->ka_timeout_delta;
+               p_tcp->ka_timeout_delta = cpu_to_le32(dval);
+               dval = p_conn->rt_timeout_delta;
+               p_tcp->rt_timeout_delta = cpu_to_le32(dval);
+               p_tcp->dup_ack_cnt = p_conn->dup_ack_cnt;
+               p_tcp->snd_wnd_probe_cnt = p_conn->snd_wnd_probe_cnt;
+               p_tcp->ka_probe_cnt = p_conn->ka_probe_cnt;
+               p_tcp->rt_cnt = p_conn->rt_cnt;
+               p_tcp->flow_label = cpu_to_le32(p_conn->flow_label);
+               p_tcp->ka_timeout = cpu_to_le32(p_conn->ka_timeout);
+               p_tcp->ka_interval = cpu_to_le32(p_conn->ka_interval);
+               p_tcp->max_rt_time = cpu_to_le32(p_conn->max_rt_time);
+               dval = p_conn->initial_rcv_wnd;
+               p_tcp->initial_rcv_wnd = cpu_to_le32(dval);
+               p_tcp->ttl = p_conn->ttl;
+               p_tcp->tos_or_tc = p_conn->tos_or_tc;
+               p_tcp->remote_port = cpu_to_le16(p_conn->remote_port);
+               p_tcp->local_port = cpu_to_le16(p_conn->local_port);
+               p_tcp->mss = cpu_to_le16(p_conn->mss);
+               p_tcp->snd_wnd_scale = p_conn->snd_wnd_scale;
+               p_tcp->rcv_wnd_scale = p_conn->rcv_wnd_scale;
+               dval = p_conn->ts_ticks_per_second;
+               p_tcp->ts_ticks_per_second = cpu_to_le32(dval);
+               wval = p_conn->da_timeout_value;
+               p_tcp->da_timeout_value = cpu_to_le16(wval);
+               p_tcp->ack_frequency = p_conn->ack_frequency;
+               p_tcp->connect_mode = p_conn->connect_mode;
+       } else {
+               p_tcp2 =
+                   &((struct iscsi_spe_conn_offload_option2 *)p_ramrod)->tcp;
+
+               p = (u16 *)p_conn->local_mac;
+               p_tcp2->local_mac_addr_hi = swab16(get_unaligned(p));
+               p_tcp2->local_mac_addr_mid = swab16(get_unaligned(p + 1));
+               p_tcp2->local_mac_addr_lo = swab16(get_unaligned(p + 2));
+
+               p = (u16 *)p_conn->remote_mac;
+               p_tcp2->remote_mac_addr_hi = swab16(get_unaligned(p));
+               p_tcp2->remote_mac_addr_mid = swab16(get_unaligned(p + 1));
+               p_tcp2->remote_mac_addr_lo = swab16(get_unaligned(p + 2));
+
+               p_tcp2->vlan_id = cpu_to_le16(p_conn->vlan_id);
+               p_tcp2->flags = p_conn->tcp_flags;
+
+               p_tcp2->ip_version = p_conn->ip_version;
+               for (i = 0; i < 4; i++) {
+                       dval = p_conn->remote_ip[i];
+                       p_tcp2->remote_ip[i] = cpu_to_le32(dval);
+                       dval = p_conn->local_ip[i];
+                       p_tcp2->local_ip[i] = cpu_to_le32(dval);
+               }
+
+               p_tcp2->flow_label = cpu_to_le32(p_conn->flow_label);
+               p_tcp2->ttl = p_conn->ttl;
+               p_tcp2->tos_or_tc = p_conn->tos_or_tc;
+               p_tcp2->remote_port = cpu_to_le16(p_conn->remote_port);
+               p_tcp2->local_port = cpu_to_le16(p_conn->local_port);
+               p_tcp2->mss = cpu_to_le16(p_conn->mss);
+               p_tcp2->rcv_wnd_scale = p_conn->rcv_wnd_scale;
+               p_tcp2->connect_mode = p_conn->connect_mode;
+               wval = p_conn->syn_ip_payload_length;
+               p_tcp2->syn_ip_payload_length = cpu_to_le16(wval);
+               p_tcp2->syn_phy_addr_lo = DMA_LO_LE(p_conn->syn_phy_addr);
+               p_tcp2->syn_phy_addr_hi = DMA_HI_LE(p_conn->syn_phy_addr);
+       }
+
+       return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_iscsi_conn_update(struct qed_hwfn *p_hwfn,
+                                   struct qed_iscsi_conn *p_conn,
+                                   enum spq_mode comp_mode,
+                                   struct qed_spq_comp_cb *p_comp_addr)
+{
+       struct iscsi_conn_update_ramrod_params *p_ramrod = NULL;
+       struct qed_spq_entry *p_ent = NULL;
+       struct qed_sp_init_data init_data;
+       int rc = -EINVAL;
+       u32 dval;
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = p_conn->icid;
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = comp_mode;
+       init_data.p_comp_data = p_comp_addr;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
+                                PROTOCOLID_ISCSI, &init_data);
+       if (rc)
+               return rc;
+
+       p_ramrod = &p_ent->ramrod.iscsi_conn_update;
+       p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_UPDATE_CONN;
+       SET_FIELD(p_ramrod->hdr.flags,
+                 ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code);
+
+       p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
+       p_ramrod->fw_cid = cpu_to_le32(p_conn->icid);
+       p_ramrod->flags = p_conn->update_flag;
+       p_ramrod->max_seq_size = cpu_to_le32(p_conn->max_seq_size);
+       dval = p_conn->max_recv_pdu_length;
+       p_ramrod->max_recv_pdu_length = cpu_to_le32(dval);
+       dval = p_conn->max_send_pdu_length;
+       p_ramrod->max_send_pdu_length = cpu_to_le32(dval);
+       dval = p_conn->first_seq_length;
+       p_ramrod->first_seq_length = cpu_to_le32(dval);
+       p_ramrod->exp_stat_sn = cpu_to_le32(p_conn->exp_stat_sn);
+
+       return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_iscsi_conn_terminate(struct qed_hwfn *p_hwfn,
+                                      struct qed_iscsi_conn *p_conn,
+                                      enum spq_mode comp_mode,
+                                      struct qed_spq_comp_cb *p_comp_addr)
+{
+       struct iscsi_spe_conn_termination *p_ramrod = NULL;
+       struct qed_spq_entry *p_ent = NULL;
+       struct qed_sp_init_data init_data;
+       int rc = -EINVAL;
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = p_conn->icid;
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = comp_mode;
+       init_data.p_comp_data = p_comp_addr;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                ISCSI_RAMROD_CMD_ID_TERMINATION_CONN,
+                                PROTOCOLID_ISCSI, &init_data);
+       if (rc)
+               return rc;
+
+       p_ramrod = &p_ent->ramrod.iscsi_conn_terminate;
+       p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_TERMINATION_CONN;
+       SET_FIELD(p_ramrod->hdr.flags,
+                 ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code);
+
+       p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
+       p_ramrod->fw_cid = cpu_to_le32(p_conn->icid);
+       p_ramrod->abortive = p_conn->abortive_dsconnect;
+
+       DMA_REGPAIR_LE(p_ramrod->query_params_addr,
+                      p_conn->tcp_upload_params_phys_addr);
+       DMA_REGPAIR_LE(p_ramrod->queue_cnts_addr, p_conn->queue_cnts_phys_addr);
+
+       return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_iscsi_conn_clear_sq(struct qed_hwfn *p_hwfn,
+                                     struct qed_iscsi_conn *p_conn,
+                                     enum spq_mode comp_mode,
+                                     struct qed_spq_comp_cb *p_comp_addr)
+{
+       struct iscsi_slow_path_hdr *p_ramrod = NULL;
+       struct qed_spq_entry *p_ent = NULL;
+       struct qed_sp_init_data init_data;
+       int rc = -EINVAL;
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = p_conn->icid;
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = comp_mode;
+       init_data.p_comp_data = p_comp_addr;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                ISCSI_RAMROD_CMD_ID_CLEAR_SQ,
+                                PROTOCOLID_ISCSI, &init_data);
+       if (rc)
+               return rc;
+
+       p_ramrod = &p_ent->ramrod.iscsi_empty;
+       p_ramrod->op_code = ISCSI_RAMROD_CMD_ID_CLEAR_SQ;
+       SET_FIELD(p_ramrod->flags,
+                 ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code);
+
+       return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_iscsi_func_stop(struct qed_hwfn *p_hwfn,
+                                 enum spq_mode comp_mode,
+                                 struct qed_spq_comp_cb *p_comp_addr)
+{
+       struct iscsi_spe_func_dstry *p_ramrod = NULL;
+       struct qed_spq_entry *p_ent = NULL;
+       struct qed_sp_init_data init_data;
+       int rc = 0;
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = qed_spq_get_cid(p_hwfn);
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = comp_mode;
+       init_data.p_comp_data = p_comp_addr;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                ISCSI_RAMROD_CMD_ID_DESTROY_FUNC,
+                                PROTOCOLID_ISCSI, &init_data);
+       if (rc)
+               return rc;
+
+       p_ramrod = &p_ent->ramrod.iscsi_destroy;
+       p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_DESTROY_FUNC;
+
+       return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static void __iomem *qed_iscsi_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
+{
+       return (u8 __iomem *)p_hwfn->doorbells +
+                            qed_db_addr(cid, DQ_DEMS_LEGACY);
+}
+
+static void __iomem *qed_iscsi_get_primary_bdq_prod(struct qed_hwfn *p_hwfn,
+                                                   u8 bdq_id)
+{
+       if (RESC_NUM(p_hwfn, QED_BDQ)) {
+               return (u8 __iomem *)p_hwfn->regview +
+                      GTT_BAR0_MAP_REG_MSDM_RAM +
+                      MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
+                                                                 QED_BDQ),
+                                                      bdq_id);
+       } else {
+               DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
+               return NULL;
+       }
+}
+
+static void __iomem *qed_iscsi_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
+                                                     u8 bdq_id)
+{
+       if (RESC_NUM(p_hwfn, QED_BDQ)) {
+               return (u8 __iomem *)p_hwfn->regview +
+                      GTT_BAR0_MAP_REG_TSDM_RAM +
+                      TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
+                                                                 QED_BDQ),
+                                                      bdq_id);
+       } else {
+               DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
+               return NULL;
+       }
+}
+
+static int qed_iscsi_setup_connection(struct qed_hwfn *p_hwfn,
+                                     struct qed_iscsi_conn *p_conn)
+{
+       if (!p_conn->queue_cnts_virt_addr)
+               goto nomem;
+       memset(p_conn->queue_cnts_virt_addr, 0,
+              sizeof(*p_conn->queue_cnts_virt_addr));
+
+       if (!p_conn->tcp_upload_params_virt_addr)
+               goto nomem;
+       memset(p_conn->tcp_upload_params_virt_addr, 0,
+              sizeof(*p_conn->tcp_upload_params_virt_addr));
+
+       if (!p_conn->r2tq.p_virt_addr)
+               goto nomem;
+       qed_chain_pbl_zero_mem(&p_conn->r2tq);
+
+       if (!p_conn->uhq.p_virt_addr)
+               goto nomem;
+       qed_chain_pbl_zero_mem(&p_conn->uhq);
+
+       if (!p_conn->xhq.p_virt_addr)
+               goto nomem;
+       qed_chain_pbl_zero_mem(&p_conn->xhq);
+
+       return 0;
+nomem:
+       return -ENOMEM;
+}
+
+static int qed_iscsi_allocate_connection(struct qed_hwfn *p_hwfn,
+                                        struct qed_iscsi_conn **p_out_conn)
+{
+       u16 uhq_num_elements = 0, xhq_num_elements = 0, r2tq_num_elements = 0;
+       struct scsi_terminate_extra_params *p_q_cnts = NULL;
+       struct qed_iscsi_pf_params *p_params = NULL;
+       struct tcp_upload_params *p_tcp = NULL;
+       struct qed_iscsi_conn *p_conn = NULL;
+       int rc = 0;
+
+       /* Try finding a free connection that can be used */
+       spin_lock_bh(&p_hwfn->p_iscsi_info->lock);
+       if (!list_empty(&p_hwfn->p_iscsi_info->free_list))
+               p_conn = list_first_entry(&p_hwfn->p_iscsi_info->free_list,
+                                         struct qed_iscsi_conn, list_entry);
+       if (p_conn) {
+               list_del(&p_conn->list_entry);
+               spin_unlock_bh(&p_hwfn->p_iscsi_info->lock);
+               *p_out_conn = p_conn;
+               return 0;
+       }
+       spin_unlock_bh(&p_hwfn->p_iscsi_info->lock);
+
+       /* Need to allocate a new connection */
+       p_params = &p_hwfn->pf_params.iscsi_pf_params;
+
+       p_conn = kzalloc(sizeof(*p_conn), GFP_KERNEL);
+       if (!p_conn)
+               return -ENOMEM;
+
+       p_q_cnts = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+                                     sizeof(*p_q_cnts),
+                                     &p_conn->queue_cnts_phys_addr,
+                                     GFP_KERNEL);
+       if (!p_q_cnts)
+               goto nomem_queue_cnts_param;
+       p_conn->queue_cnts_virt_addr = p_q_cnts;
+
+       p_tcp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+                                  sizeof(*p_tcp),
+                                  &p_conn->tcp_upload_params_phys_addr,
+                                  GFP_KERNEL);
+       if (!p_tcp)
+               goto nomem_upload_param;
+       p_conn->tcp_upload_params_virt_addr = p_tcp;
+
+       r2tq_num_elements = p_params->num_r2tq_pages_in_ring *
+                           QED_CHAIN_PAGE_SIZE / 0x80;
+       rc = qed_chain_alloc(p_hwfn->cdev,
+                            QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+                            QED_CHAIN_MODE_PBL,
+                            QED_CHAIN_CNT_TYPE_U16,
+                            r2tq_num_elements, 0x80, &p_conn->r2tq);
+       if (rc)
+               goto nomem_r2tq;
+
+       uhq_num_elements = p_params->num_uhq_pages_in_ring *
+                          QED_CHAIN_PAGE_SIZE / sizeof(struct iscsi_uhqe);
+       rc = qed_chain_alloc(p_hwfn->cdev,
+                            QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+                            QED_CHAIN_MODE_PBL,
+                            QED_CHAIN_CNT_TYPE_U16,
+                            uhq_num_elements,
+                            sizeof(struct iscsi_uhqe), &p_conn->uhq);
+       if (rc)
+               goto nomem_uhq;
+
+       xhq_num_elements = uhq_num_elements;
+       rc = qed_chain_alloc(p_hwfn->cdev,
+                            QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+                            QED_CHAIN_MODE_PBL,
+                            QED_CHAIN_CNT_TYPE_U16,
+                            xhq_num_elements,
+                            sizeof(struct iscsi_xhqe), &p_conn->xhq);
+       if (rc)
+               goto nomem;
+
+       p_conn->free_on_delete = true;
+       *p_out_conn = p_conn;
+       return 0;
+
+nomem:
+       qed_chain_free(p_hwfn->cdev, &p_conn->uhq);
+nomem_uhq:
+       qed_chain_free(p_hwfn->cdev, &p_conn->r2tq);
+nomem_r2tq:
+       dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                         sizeof(struct tcp_upload_params),
+                         p_conn->tcp_upload_params_virt_addr,
+                         p_conn->tcp_upload_params_phys_addr);
+nomem_upload_param:
+       dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                         sizeof(struct scsi_terminate_extra_params),
+                         p_conn->queue_cnts_virt_addr,
+                         p_conn->queue_cnts_phys_addr);
+nomem_queue_cnts_param:
+       kfree(p_conn);
+
+       return -ENOMEM;
+}
+
+static int qed_iscsi_acquire_connection(struct qed_hwfn *p_hwfn,
+                                       struct qed_iscsi_conn *p_in_conn,
+                                       struct qed_iscsi_conn **p_out_conn)
+{
+       struct qed_iscsi_conn *p_conn = NULL;
+       int rc = 0;
+       u32 icid;
+
+       spin_lock_bh(&p_hwfn->p_iscsi_info->lock);
+       rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ISCSI, &icid);
+       spin_unlock_bh(&p_hwfn->p_iscsi_info->lock);
+       if (rc)
+               return rc;
+
+       /* Use input connection or allocate a new one */
+       if (p_in_conn)
+               p_conn = p_in_conn;
+       else
+               rc = qed_iscsi_allocate_connection(p_hwfn, &p_conn);
+
+       if (!rc)
+               rc = qed_iscsi_setup_connection(p_hwfn, p_conn);
+
+       if (rc) {
+               spin_lock_bh(&p_hwfn->p_iscsi_info->lock);
+               qed_cxt_release_cid(p_hwfn, icid);
+               spin_unlock_bh(&p_hwfn->p_iscsi_info->lock);
+               return rc;
+       }
+
+       p_conn->icid = icid;
+       p_conn->conn_id = (u16)icid;
+       p_conn->fw_cid = (p_hwfn->hw_info.opaque_fid << 16) | icid;
+
+       *p_out_conn = p_conn;
+
+       return rc;
+}
+
+static void qed_iscsi_release_connection(struct qed_hwfn *p_hwfn,
+                                        struct qed_iscsi_conn *p_conn)
+{
+       spin_lock_bh(&p_hwfn->p_iscsi_info->lock);
+       list_add_tail(&p_conn->list_entry, &p_hwfn->p_iscsi_info->free_list);
+       qed_cxt_release_cid(p_hwfn, p_conn->icid);
+       spin_unlock_bh(&p_hwfn->p_iscsi_info->lock);
+}
+
+void qed_iscsi_free_connection(struct qed_hwfn *p_hwfn,
+                              struct qed_iscsi_conn *p_conn)
+{
+       qed_chain_free(p_hwfn->cdev, &p_conn->xhq);
+       qed_chain_free(p_hwfn->cdev, &p_conn->uhq);
+       qed_chain_free(p_hwfn->cdev, &p_conn->r2tq);
+       dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                         sizeof(struct tcp_upload_params),
+                         p_conn->tcp_upload_params_virt_addr,
+                         p_conn->tcp_upload_params_phys_addr);
+       dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                         sizeof(struct scsi_terminate_extra_params),
+                         p_conn->queue_cnts_virt_addr,
+                         p_conn->queue_cnts_phys_addr);
+       kfree(p_conn);
+}
+
+struct qed_iscsi_info *qed_iscsi_alloc(struct qed_hwfn *p_hwfn)
+{
+       struct qed_iscsi_info *p_iscsi_info;
+
+       p_iscsi_info = kzalloc(sizeof(*p_iscsi_info), GFP_KERNEL);
+       if (!p_iscsi_info)
+               return NULL;
+
+       INIT_LIST_HEAD(&p_iscsi_info->free_list);
+       return p_iscsi_info;
+}
+
+void qed_iscsi_setup(struct qed_hwfn *p_hwfn,
+                    struct qed_iscsi_info *p_iscsi_info)
+{
+       spin_lock_init(&p_iscsi_info->lock);
+}
+
+void qed_iscsi_free(struct qed_hwfn *p_hwfn,
+                   struct qed_iscsi_info *p_iscsi_info)
+{
+       struct qed_iscsi_conn *p_conn = NULL;
+
+       while (!list_empty(&p_hwfn->p_iscsi_info->free_list)) {
+               p_conn = list_first_entry(&p_hwfn->p_iscsi_info->free_list,
+                                         struct qed_iscsi_conn, list_entry);
+               if (p_conn) {
+                       list_del(&p_conn->list_entry);
+                       qed_iscsi_free_connection(p_hwfn, p_conn);
+               }
+       }
+
+       kfree(p_iscsi_info);
+}
+
+static void _qed_iscsi_get_tstats(struct qed_hwfn *p_hwfn,
+                                 struct qed_ptt *p_ptt,
+                                 struct qed_iscsi_stats *p_stats)
+{
+       struct tstorm_iscsi_stats_drv tstats;
+       u32 tstats_addr;
+
+       memset(&tstats, 0, sizeof(tstats));
+       tstats_addr = BAR0_MAP_REG_TSDM_RAM +
+                     TSTORM_ISCSI_RX_STATS_OFFSET(p_hwfn->rel_pf_id);
+       qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
+
+       p_stats->iscsi_rx_bytes_cnt =
+           HILO_64_REGPAIR(tstats.iscsi_rx_bytes_cnt);
+       p_stats->iscsi_rx_packet_cnt =
+           HILO_64_REGPAIR(tstats.iscsi_rx_packet_cnt);
+       p_stats->iscsi_rx_new_ooo_isle_events_cnt =
+           HILO_64_REGPAIR(tstats.iscsi_rx_new_ooo_isle_events_cnt);
+       p_stats->iscsi_cmdq_threshold_cnt =
+           le32_to_cpu(tstats.iscsi_cmdq_threshold_cnt);
+       p_stats->iscsi_rq_threshold_cnt =
+           le32_to_cpu(tstats.iscsi_rq_threshold_cnt);
+       p_stats->iscsi_immq_threshold_cnt =
+           le32_to_cpu(tstats.iscsi_immq_threshold_cnt);
+}
+
+static void _qed_iscsi_get_mstats(struct qed_hwfn *p_hwfn,
+                                 struct qed_ptt *p_ptt,
+                                 struct qed_iscsi_stats *p_stats)
+{
+       struct mstorm_iscsi_stats_drv mstats;
+       u32 mstats_addr;
+
+       memset(&mstats, 0, sizeof(mstats));
+       mstats_addr = BAR0_MAP_REG_MSDM_RAM +
+                     MSTORM_ISCSI_RX_STATS_OFFSET(p_hwfn->rel_pf_id);
+       qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, sizeof(mstats));
+
+       p_stats->iscsi_rx_dropped_pdus_task_not_valid =
+           HILO_64_REGPAIR(mstats.iscsi_rx_dropped_pdus_task_not_valid);
+}
+
+static void _qed_iscsi_get_ustats(struct qed_hwfn *p_hwfn,
+                                 struct qed_ptt *p_ptt,
+                                 struct qed_iscsi_stats *p_stats)
+{
+       struct ustorm_iscsi_stats_drv ustats;
+       u32 ustats_addr;
+
+       memset(&ustats, 0, sizeof(ustats));
+       ustats_addr = BAR0_MAP_REG_USDM_RAM +
+                     USTORM_ISCSI_RX_STATS_OFFSET(p_hwfn->rel_pf_id);
+       qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
+
+       p_stats->iscsi_rx_data_pdu_cnt =
+           HILO_64_REGPAIR(ustats.iscsi_rx_data_pdu_cnt);
+       p_stats->iscsi_rx_r2t_pdu_cnt =
+           HILO_64_REGPAIR(ustats.iscsi_rx_r2t_pdu_cnt);
+       p_stats->iscsi_rx_total_pdu_cnt =
+           HILO_64_REGPAIR(ustats.iscsi_rx_total_pdu_cnt);
+}
+
+static void _qed_iscsi_get_xstats(struct qed_hwfn *p_hwfn,
+                                 struct qed_ptt *p_ptt,
+                                 struct qed_iscsi_stats *p_stats)
+{
+       struct xstorm_iscsi_stats_drv xstats;
+       u32 xstats_addr;
+
+       memset(&xstats, 0, sizeof(xstats));
+       xstats_addr = BAR0_MAP_REG_XSDM_RAM +
+                     XSTORM_ISCSI_TX_STATS_OFFSET(p_hwfn->rel_pf_id);
+       qed_memcpy_from(p_hwfn, p_ptt, &xstats, xstats_addr, sizeof(xstats));
+
+       p_stats->iscsi_tx_go_to_slow_start_event_cnt =
+           HILO_64_REGPAIR(xstats.iscsi_tx_go_to_slow_start_event_cnt);
+       p_stats->iscsi_tx_fast_retransmit_event_cnt =
+           HILO_64_REGPAIR(xstats.iscsi_tx_fast_retransmit_event_cnt);
+}
+
+static void _qed_iscsi_get_ystats(struct qed_hwfn *p_hwfn,
+                                 struct qed_ptt *p_ptt,
+                                 struct qed_iscsi_stats *p_stats)
+{
+       struct ystorm_iscsi_stats_drv ystats;
+       u32 ystats_addr;
+
+       memset(&ystats, 0, sizeof(ystats));
+       ystats_addr = BAR0_MAP_REG_YSDM_RAM +
+                     YSTORM_ISCSI_TX_STATS_OFFSET(p_hwfn->rel_pf_id);
+       qed_memcpy_from(p_hwfn, p_ptt, &ystats, ystats_addr, sizeof(ystats));
+
+       p_stats->iscsi_tx_data_pdu_cnt =
+           HILO_64_REGPAIR(ystats.iscsi_tx_data_pdu_cnt);
+       p_stats->iscsi_tx_r2t_pdu_cnt =
+           HILO_64_REGPAIR(ystats.iscsi_tx_r2t_pdu_cnt);
+       p_stats->iscsi_tx_total_pdu_cnt =
+           HILO_64_REGPAIR(ystats.iscsi_tx_total_pdu_cnt);
+}
+
+static void _qed_iscsi_get_pstats(struct qed_hwfn *p_hwfn,
+                                 struct qed_ptt *p_ptt,
+                                 struct qed_iscsi_stats *p_stats)
+{
+       struct pstorm_iscsi_stats_drv pstats;
+       u32 pstats_addr;
+
+       memset(&pstats, 0, sizeof(pstats));
+       pstats_addr = BAR0_MAP_REG_PSDM_RAM +
+                     PSTORM_ISCSI_TX_STATS_OFFSET(p_hwfn->rel_pf_id);
+       qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
+
+       p_stats->iscsi_tx_bytes_cnt =
+           HILO_64_REGPAIR(pstats.iscsi_tx_bytes_cnt);
+       p_stats->iscsi_tx_packet_cnt =
+           HILO_64_REGPAIR(pstats.iscsi_tx_packet_cnt);
+}
+
+static int qed_iscsi_get_stats(struct qed_hwfn *p_hwfn,
+                              struct qed_iscsi_stats *stats)
+{
+       struct qed_ptt *p_ptt;
+
+       memset(stats, 0, sizeof(*stats));
+
+       p_ptt = qed_ptt_acquire(p_hwfn);
+       if (!p_ptt) {
+               DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+               return -EAGAIN;
+       }
+
+       _qed_iscsi_get_tstats(p_hwfn, p_ptt, stats);
+       _qed_iscsi_get_mstats(p_hwfn, p_ptt, stats);
+       _qed_iscsi_get_ustats(p_hwfn, p_ptt, stats);
+
+       _qed_iscsi_get_xstats(p_hwfn, p_ptt, stats);
+       _qed_iscsi_get_ystats(p_hwfn, p_ptt, stats);
+       _qed_iscsi_get_pstats(p_hwfn, p_ptt, stats);
+
+       qed_ptt_release(p_hwfn, p_ptt);
+
+       return 0;
+}
+
+struct qed_hash_iscsi_con {
+       struct hlist_node node;
+       struct qed_iscsi_conn *con;
+};
+
+static int qed_fill_iscsi_dev_info(struct qed_dev *cdev,
+                                  struct qed_dev_iscsi_info *info)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+
+       int rc;
+
+       memset(info, 0, sizeof(*info));
+       rc = qed_fill_dev_info(cdev, &info->common);
+
+       info->primary_dbq_rq_addr =
+           qed_iscsi_get_primary_bdq_prod(hwfn, BDQ_ID_RQ);
+       info->secondary_bdq_rq_addr =
+           qed_iscsi_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ);
+
+       info->num_cqs = FEAT_NUM(hwfn, QED_ISCSI_CQ);
+
+       return rc;
+}
+
+static void qed_register_iscsi_ops(struct qed_dev *cdev,
+                                  struct qed_iscsi_cb_ops *ops, void *cookie)
+{
+       cdev->protocol_ops.iscsi = ops;
+       cdev->ops_cookie = cookie;
+}
+
+static struct qed_hash_iscsi_con *qed_iscsi_get_hash(struct qed_dev *cdev,
+                                                    u32 handle)
+{
+       struct qed_hash_iscsi_con *hash_con = NULL;
+
+       if (!(cdev->flags & QED_FLAG_STORAGE_STARTED))
+               return NULL;
+
+       hash_for_each_possible(cdev->connections, hash_con, node, handle) {
+               if (hash_con->con->icid == handle)
+                       break;
+       }
+
+       if (!hash_con || (hash_con->con->icid != handle))
+               return NULL;
+
+       return hash_con;
+}
+
+static int qed_iscsi_stop(struct qed_dev *cdev)
+{
+       int rc;
+
+       if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) {
+               DP_NOTICE(cdev, "iscsi already stopped\n");
+               return 0;
+       }
+
+       if (!hash_empty(cdev->connections)) {
+               DP_NOTICE(cdev,
+                         "Can't stop iscsi - not all connections were returned\n");
+               return -EINVAL;
+       }
+
+       /* Stop the iscsi */
+       rc = qed_sp_iscsi_func_stop(QED_LEADING_HWFN(cdev),
+                                   QED_SPQ_MODE_EBLOCK, NULL);
+       cdev->flags &= ~QED_FLAG_STORAGE_STARTED;
+
+       return rc;
+}
+
+static int qed_iscsi_start(struct qed_dev *cdev,
+                          struct qed_iscsi_tid *tasks,
+                          void *event_context,
+                          iscsi_event_cb_t async_event_cb)
+{
+       int rc;
+       struct qed_tid_mem *tid_info;
+
+       if (cdev->flags & QED_FLAG_STORAGE_STARTED) {
+               DP_NOTICE(cdev, "iscsi already started;\n");
+               return 0;
+       }
+
+       rc = qed_sp_iscsi_func_start(QED_LEADING_HWFN(cdev),
+                                    QED_SPQ_MODE_EBLOCK, NULL, event_context,
+                                    async_event_cb);
+       if (rc) {
+               DP_NOTICE(cdev, "Failed to start iscsi\n");
+               return rc;
+       }
+
+       cdev->flags |= QED_FLAG_STORAGE_STARTED;
+       hash_init(cdev->connections);
+
+       if (!tasks)
+               return 0;
+
+       tid_info = kzalloc(sizeof(*tid_info), GFP_KERNEL);
+
+       if (!tid_info) {
+               qed_iscsi_stop(cdev);
+               return -ENOMEM;
+       }
+
+       rc = qed_cxt_get_tid_mem_info(QED_LEADING_HWFN(cdev),
+                                     tid_info);
+       if (rc) {
+               DP_NOTICE(cdev, "Failed to gather task information\n");
+               qed_iscsi_stop(cdev);
+               kfree(tid_info);
+               return rc;
+       }
+
+       /* Fill task information */
+       tasks->size = tid_info->tid_size;
+       tasks->num_tids_per_block = tid_info->num_tids_per_block;
+       memcpy(tasks->blocks, tid_info->blocks,
+              MAX_TID_BLOCKS_ISCSI * sizeof(u8 *));
+
+       kfree(tid_info);
+
+       return 0;
+}
+
+static int qed_iscsi_acquire_conn(struct qed_dev *cdev,
+                                 u32 *handle,
+                                 u32 *fw_cid, void __iomem **p_doorbell)
+{
+       struct qed_hash_iscsi_con *hash_con;
+       int rc;
+
+       /* Allocate a hashed connection */
+       hash_con = kzalloc(sizeof(*hash_con), GFP_ATOMIC);
+       if (!hash_con)
+               return -ENOMEM;
+
+       /* Acquire the connection */
+       rc = qed_iscsi_acquire_connection(QED_LEADING_HWFN(cdev), NULL,
+                                         &hash_con->con);
+       if (rc) {
+               DP_NOTICE(cdev, "Failed to acquire Connection\n");
+               kfree(hash_con);
+               return rc;
+       }
+
+       /* Added the connection to hash table */
+       *handle = hash_con->con->icid;
+       *fw_cid = hash_con->con->fw_cid;
+       hash_add(cdev->connections, &hash_con->node, *handle);
+
+       if (p_doorbell)
+               *p_doorbell = qed_iscsi_get_db_addr(QED_LEADING_HWFN(cdev),
+                                                   *handle);
+
+       return 0;
+}
+
+static int qed_iscsi_release_conn(struct qed_dev *cdev, u32 handle)
+{
+       struct qed_hash_iscsi_con *hash_con;
+
+       hash_con = qed_iscsi_get_hash(cdev, handle);
+       if (!hash_con) {
+               DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+                         handle);
+               return -EINVAL;
+       }
+
+       hlist_del(&hash_con->node);
+       qed_iscsi_release_connection(QED_LEADING_HWFN(cdev), hash_con->con);
+       kfree(hash_con);
+
+       return 0;
+}
+
+static int qed_iscsi_offload_conn(struct qed_dev *cdev,
+                                 u32 handle,
+                                 struct qed_iscsi_params_offload *conn_info)
+{
+       struct qed_hash_iscsi_con *hash_con;
+       struct qed_iscsi_conn *con;
+
+       hash_con = qed_iscsi_get_hash(cdev, handle);
+       if (!hash_con) {
+               DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+                         handle);
+               return -EINVAL;
+       }
+
+       /* Update the connection with information from the params */
+       con = hash_con->con;
+
+       ether_addr_copy(con->local_mac, conn_info->src.mac);
+       ether_addr_copy(con->remote_mac, conn_info->dst.mac);
+       memcpy(con->local_ip, conn_info->src.ip, sizeof(con->local_ip));
+       memcpy(con->remote_ip, conn_info->dst.ip, sizeof(con->remote_ip));
+       con->local_port = conn_info->src.port;
+       con->remote_port = conn_info->dst.port;
+
+       con->layer_code = conn_info->layer_code;
+       con->sq_pbl_addr = conn_info->sq_pbl_addr;
+       con->initial_ack = conn_info->initial_ack;
+       con->vlan_id = conn_info->vlan_id;
+       con->tcp_flags = conn_info->tcp_flags;
+       con->ip_version = conn_info->ip_version;
+       con->default_cq = conn_info->default_cq;
+       con->ka_max_probe_cnt = conn_info->ka_max_probe_cnt;
+       con->dup_ack_theshold = conn_info->dup_ack_theshold;
+       con->rcv_next = conn_info->rcv_next;
+       con->snd_una = conn_info->snd_una;
+       con->snd_next = conn_info->snd_next;
+       con->snd_max = conn_info->snd_max;
+       con->snd_wnd = conn_info->snd_wnd;
+       con->rcv_wnd = conn_info->rcv_wnd;
+       con->snd_wl1 = conn_info->snd_wl1;
+       con->cwnd = conn_info->cwnd;
+       con->ss_thresh = conn_info->ss_thresh;
+       con->srtt = conn_info->srtt;
+       con->rtt_var = conn_info->rtt_var;
+       con->ts_time = conn_info->ts_time;
+       con->ts_recent = conn_info->ts_recent;
+       con->ts_recent_age = conn_info->ts_recent_age;
+       con->total_rt = conn_info->total_rt;
+       con->ka_timeout_delta = conn_info->ka_timeout_delta;
+       con->rt_timeout_delta = conn_info->rt_timeout_delta;
+       con->dup_ack_cnt = conn_info->dup_ack_cnt;
+       con->snd_wnd_probe_cnt = conn_info->snd_wnd_probe_cnt;
+       con->ka_probe_cnt = conn_info->ka_probe_cnt;
+       con->rt_cnt = conn_info->rt_cnt;
+       con->flow_label = conn_info->flow_label;
+       con->ka_timeout = conn_info->ka_timeout;
+       con->ka_interval = conn_info->ka_interval;
+       con->max_rt_time = conn_info->max_rt_time;
+       con->initial_rcv_wnd = conn_info->initial_rcv_wnd;
+       con->ttl = conn_info->ttl;
+       con->tos_or_tc = conn_info->tos_or_tc;
+       con->remote_port = conn_info->remote_port;
+       con->local_port = conn_info->local_port;
+       con->mss = conn_info->mss;
+       con->snd_wnd_scale = conn_info->snd_wnd_scale;
+       con->rcv_wnd_scale = conn_info->rcv_wnd_scale;
+       con->ts_ticks_per_second = conn_info->ts_ticks_per_second;
+       con->da_timeout_value = conn_info->da_timeout_value;
+       con->ack_frequency = conn_info->ack_frequency;
+
+       /* Set default values on other connection fields */
+       con->offl_flags = 0x1;
+
+       return qed_sp_iscsi_conn_offload(QED_LEADING_HWFN(cdev), con,
+                                        QED_SPQ_MODE_EBLOCK, NULL);
+}
+
+static int qed_iscsi_update_conn(struct qed_dev *cdev,
+                                u32 handle,
+                                struct qed_iscsi_params_update *conn_info)
+{
+       struct qed_hash_iscsi_con *hash_con;
+       struct qed_iscsi_conn *con;
+
+       hash_con = qed_iscsi_get_hash(cdev, handle);
+       if (!hash_con) {
+               DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+                         handle);
+               return -EINVAL;
+       }
+
+       /* Update the connection with information from the params */
+       con = hash_con->con;
+       con->update_flag = conn_info->update_flag;
+       con->max_seq_size = conn_info->max_seq_size;
+       con->max_recv_pdu_length = conn_info->max_recv_pdu_length;
+       con->max_send_pdu_length = conn_info->max_send_pdu_length;
+       con->first_seq_length = conn_info->first_seq_length;
+       con->exp_stat_sn = conn_info->exp_stat_sn;
+
+       return qed_sp_iscsi_conn_update(QED_LEADING_HWFN(cdev), con,
+                                       QED_SPQ_MODE_EBLOCK, NULL);
+}
+
+static int qed_iscsi_clear_conn_sq(struct qed_dev *cdev, u32 handle)
+{
+       struct qed_hash_iscsi_con *hash_con;
+
+       hash_con = qed_iscsi_get_hash(cdev, handle);
+       if (!hash_con) {
+               DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+                         handle);
+               return -EINVAL;
+       }
+
+       return qed_sp_iscsi_conn_clear_sq(QED_LEADING_HWFN(cdev),
+                                         hash_con->con,
+                                         QED_SPQ_MODE_EBLOCK, NULL);
+}
+
+static int qed_iscsi_destroy_conn(struct qed_dev *cdev,
+                                 u32 handle, u8 abrt_conn)
+{
+       struct qed_hash_iscsi_con *hash_con;
+
+       hash_con = qed_iscsi_get_hash(cdev, handle);
+       if (!hash_con) {
+               DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+                         handle);
+               return -EINVAL;
+       }
+
+       hash_con->con->abortive_dsconnect = abrt_conn;
+
+       return qed_sp_iscsi_conn_terminate(QED_LEADING_HWFN(cdev),
+                                          hash_con->con,
+                                          QED_SPQ_MODE_EBLOCK, NULL);
+}
+
+static int qed_iscsi_stats(struct qed_dev *cdev, struct qed_iscsi_stats *stats)
+{
+       return qed_iscsi_get_stats(QED_LEADING_HWFN(cdev), stats);
+}
+
+void qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
+                                 struct qed_mcp_iscsi_stats *stats)
+{
+       struct qed_iscsi_stats proto_stats;
+
+       /* Retrieve FW statistics */
+       memset(&proto_stats, 0, sizeof(proto_stats));
+       if (qed_iscsi_stats(cdev, &proto_stats)) {
+               DP_VERBOSE(cdev, QED_MSG_STORAGE,
+                          "Failed to collect ISCSI statistics\n");
+               return;
+       }
+
+       /* Translate FW statistics into struct */
+       stats->rx_pdus = proto_stats.iscsi_rx_total_pdu_cnt;
+       stats->tx_pdus = proto_stats.iscsi_tx_total_pdu_cnt;
+       stats->rx_bytes = proto_stats.iscsi_rx_bytes_cnt;
+       stats->tx_bytes = proto_stats.iscsi_tx_bytes_cnt;
+}
+
+static const struct qed_iscsi_ops qed_iscsi_ops_pass = {
+       .common = &qed_common_ops_pass,
+       .ll2 = &qed_ll2_ops_pass,
+       .fill_dev_info = &qed_fill_iscsi_dev_info,
+       .register_ops = &qed_register_iscsi_ops,
+       .start = &qed_iscsi_start,
+       .stop = &qed_iscsi_stop,
+       .acquire_conn = &qed_iscsi_acquire_conn,
+       .release_conn = &qed_iscsi_release_conn,
+       .offload_conn = &qed_iscsi_offload_conn,
+       .update_conn = &qed_iscsi_update_conn,
+       .destroy_conn = &qed_iscsi_destroy_conn,
+       .clear_sq = &qed_iscsi_clear_conn_sq,
+       .get_stats = &qed_iscsi_stats,
+};
+
+const struct qed_iscsi_ops *qed_get_iscsi_ops(void)
+{
+       return &qed_iscsi_ops_pass;
+}
+EXPORT_SYMBOL(qed_get_iscsi_ops);
+
+void qed_put_iscsi_ops(void)
+{
+}
+EXPORT_SYMBOL(qed_put_iscsi_ops);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
new file mode 100644 (file)
index 0000000..ae98f77
--- /dev/null
@@ -0,0 +1,88 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _QED_ISCSI_H
+#define _QED_ISCSI_H
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/qed/tcp_common.h>
+#include <linux/qed/qed_iscsi_if.h>
+#include <linux/qed/qed_chain.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_mcp.h"
+#include "qed_sp.h"
+
+struct qed_iscsi_info {
+       spinlock_t lock; /* Connection resources. */
+       struct list_head free_list;
+       u16 max_num_outstanding_tasks;
+       void *event_context;
+       iscsi_event_cb_t event_cb;
+};
+
+#ifdef CONFIG_QED_LL2
+extern const struct qed_ll2_ops qed_ll2_ops_pass;
+#endif
+
+#if IS_ENABLED(CONFIG_QED_ISCSI)
+struct qed_iscsi_info *qed_iscsi_alloc(struct qed_hwfn *p_hwfn);
+
+void qed_iscsi_setup(struct qed_hwfn *p_hwfn,
+                    struct qed_iscsi_info *p_iscsi_info);
+
+void qed_iscsi_free(struct qed_hwfn *p_hwfn,
+                   struct qed_iscsi_info *p_iscsi_info);
+
+/**
+ * @brief - Fills provided statistics struct with statistics.
+ *
+ * @param cdev
+ * @param stats - points to struct that will be filled with statistics.
+ */
+void qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
+                                 struct qed_mcp_iscsi_stats *stats);
+#else /* IS_ENABLED(CONFIG_QED_ISCSI) */
+static inline struct qed_iscsi_info *qed_iscsi_alloc(
+               struct qed_hwfn *p_hwfn) { return NULL; }
+static inline void qed_iscsi_setup(struct qed_hwfn *p_hwfn,
+                                  struct qed_iscsi_info *p_iscsi_info) {}
+static inline void qed_iscsi_free(struct qed_hwfn *p_hwfn,
+                                struct qed_iscsi_info *p_iscsi_info) {}
+static inline void
+qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
+                            struct qed_mcp_iscsi_stats *stats) {}
+#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
+
+#endif
index 401e738543b5080a0fe1f394c19f18728c533c59..eb5e280eb1045aeec3930760553517242aa7a27a 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #include <linux/types.h>
@@ -23,6 +47,7 @@
 #include <linux/workqueue.h>
 #include <linux/bitops.h>
 #include <linux/bug.h>
+#include <linux/vmalloc.h>
 #include "qed.h"
 #include <linux/qed/qed_chain.h>
 #include "qed_cxt.h"
 #define QED_MAX_SGES_NUM 16
 #define CRC32_POLY 0x1edc6f41
 
+void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn,
+                              struct qed_queue_cid *p_cid)
+{
+       /* VFs' CIDs are 0-based in PF-view, and uninitialized on VF */
+       if (!p_cid->is_vf && IS_PF(p_hwfn->cdev))
+               qed_cxt_release_cid(p_hwfn, p_cid->cid);
+       vfree(p_cid);
+}
+
+/* The internal is only meant to be directly called by PFs initializeing CIDs
+ * for their VFs.
+ */
+struct qed_queue_cid *
+_qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
+                     u16 opaque_fid,
+                     u32 cid,
+                     u8 vf_qid,
+                     struct qed_queue_start_common_params *p_params)
+{
+       bool b_is_same = (p_hwfn->hw_info.opaque_fid == opaque_fid);
+       struct qed_queue_cid *p_cid;
+       int rc;
+
+       p_cid = vmalloc(sizeof(*p_cid));
+       if (!p_cid)
+               return NULL;
+       memset(p_cid, 0, sizeof(*p_cid));
+
+       p_cid->opaque_fid = opaque_fid;
+       p_cid->cid = cid;
+       p_cid->vf_qid = vf_qid;
+       p_cid->rel = *p_params;
+       p_cid->p_owner = p_hwfn;
+
+       /* Don't try calculating the absolute indices for VFs */
+       if (IS_VF(p_hwfn->cdev)) {
+               p_cid->abs = p_cid->rel;
+               goto out;
+       }
+
+       /* Calculate the engine-absolute indices of the resources.
+        * This would guarantee they're valid later on.
+        * In some cases [SBs] we already have the right values.
+        */
+       rc = qed_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id);
+       if (rc)
+               goto fail;
+
+       rc = qed_fw_l2_queue(p_hwfn, p_cid->rel.queue_id, &p_cid->abs.queue_id);
+       if (rc)
+               goto fail;
+
+       /* In case of a PF configuring its VF's queues, the stats-id is already
+        * absolute [since there's a single index that's suitable per-VF].
+        */
+       if (b_is_same) {
+               rc = qed_fw_vport(p_hwfn, p_cid->rel.stats_id,
+                                 &p_cid->abs.stats_id);
+               if (rc)
+                       goto fail;
+       } else {
+               p_cid->abs.stats_id = p_cid->rel.stats_id;
+       }
+
+       /* SBs relevant information was already provided as absolute */
+       p_cid->abs.sb = p_cid->rel.sb;
+       p_cid->abs.sb_idx = p_cid->rel.sb_idx;
+
+       /* This is tricky - we're actually interested in whehter this is a PF
+        * entry meant for the VF.
+        */
+       if (!b_is_same)
+               p_cid->is_vf = true;
+out:
+       DP_VERBOSE(p_hwfn,
+                  QED_MSG_SP,
+                  "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
+                  p_cid->opaque_fid,
+                  p_cid->cid,
+                  p_cid->rel.vport_id,
+                  p_cid->abs.vport_id,
+                  p_cid->rel.queue_id,
+                  p_cid->abs.queue_id,
+                  p_cid->rel.stats_id,
+                  p_cid->abs.stats_id, p_cid->abs.sb, p_cid->abs.sb_idx);
+
+       return p_cid;
+
+fail:
+       vfree(p_cid);
+       return NULL;
+}
+
+static struct qed_queue_cid *qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
+                                                 u16 opaque_fid, struct
+                                                 qed_queue_start_common_params
+                                                 *p_params)
+{
+       struct qed_queue_cid *p_cid;
+       u32 cid = 0;
+
+       /* Get a unique firmware CID for this queue, in case it's a PF.
+        * VF's don't need a CID as the queue configuration will be done
+        * by PF.
+        */
+       if (IS_PF(p_hwfn->cdev)) {
+               if (qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &cid)) {
+                       DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
+                       return NULL;
+               }
+       }
+
+       p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid, 0, p_params);
+       if (!p_cid && IS_PF(p_hwfn->cdev))
+               qed_cxt_release_cid(p_hwfn, cid);
+
+       return p_cid;
+}
+
 int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
                           struct qed_sp_vport_start_params *p_params)
 {
@@ -52,7 +196,7 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
        u16 rx_mode = 0;
 
        rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
-       if (rc != 0)
+       if (rc)
                return rc;
 
        memset(&init_data, 0, sizeof(init_data));
@@ -70,6 +214,7 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
        p_ramrod->vport_id      = abs_vport_id;
 
        p_ramrod->mtu                   = cpu_to_le16(p_params->mtu);
+       p_ramrod->handle_ptp_pkts       = p_params->handle_ptp_pkts;
        p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
        p_ramrod->drop_ttl0_en          = p_params->drop_ttl0;
        p_ramrod->untagged              = p_params->only_untagged;
@@ -80,8 +225,7 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
        p_ramrod->rx_mode.state = cpu_to_le16(rx_mode);
 
        /* TPA related fields */
-       memset(&p_ramrod->tpa_param, 0,
-              sizeof(struct eth_vport_tpa_param));
+       memset(&p_ramrod->tpa_param, 0, sizeof(struct eth_vport_tpa_param));
 
        p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
 
@@ -102,6 +246,9 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
 
        p_ramrod->tx_switching_en = p_params->tx_switching;
 
+       p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac;
+       p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype;
+
        /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
        p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
                                                  p_params->concrete_fid);
@@ -109,8 +256,8 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
        return qed_spq_post(p_hwfn, p_ent, NULL);
 }
 
-int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
-                      struct qed_sp_vport_start_params *p_params)
+static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
+                             struct qed_sp_vport_start_params *p_params)
 {
        if (IS_VF(p_hwfn->cdev)) {
                return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id,
@@ -127,76 +274,103 @@ int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
 static int
 qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn,
                        struct vport_update_ramrod_data *p_ramrod,
-                       struct qed_rss_params *p_params)
+                       struct qed_rss_params *p_rss)
 {
-       struct eth_vport_rss_config *rss = &p_ramrod->rss_config;
-       u16 abs_l2_queue = 0, capabilities = 0;
-       int rc = 0, i;
+       struct eth_vport_rss_config *p_config;
+       u16 capabilities = 0;
+       int i, table_size;
+       int rc = 0;
 
-       if (!p_params) {
+       if (!p_rss) {
                p_ramrod->common.update_rss_flg = 0;
                return rc;
        }
+       p_config = &p_ramrod->rss_config;
 
-       BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE !=
-                    ETH_RSS_IND_TABLE_ENTRIES_NUM);
+       BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE != ETH_RSS_IND_TABLE_ENTRIES_NUM);
 
-       rc = qed_fw_rss_eng(p_hwfn, p_params->rss_eng_id, &rss->rss_id);
+       rc = qed_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id);
        if (rc)
                return rc;
 
-       p_ramrod->common.update_rss_flg = p_params->update_rss_config;
-       rss->update_rss_capabilities = p_params->update_rss_capabilities;
-       rss->update_rss_ind_table = p_params->update_rss_ind_table;
-       rss->update_rss_key = p_params->update_rss_key;
+       p_ramrod->common.update_rss_flg = p_rss->update_rss_config;
+       p_config->update_rss_capabilities = p_rss->update_rss_capabilities;
+       p_config->update_rss_ind_table = p_rss->update_rss_ind_table;
+       p_config->update_rss_key = p_rss->update_rss_key;
 
-       rss->rss_mode = p_params->rss_enable ?
-                       ETH_VPORT_RSS_MODE_REGULAR :
-                       ETH_VPORT_RSS_MODE_DISABLED;
+       p_config->rss_mode = p_rss->rss_enable ?
+                            ETH_VPORT_RSS_MODE_REGULAR :
+                            ETH_VPORT_RSS_MODE_DISABLED;
 
        SET_FIELD(capabilities,
                  ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
-                 !!(p_params->rss_caps & QED_RSS_IPV4));
+                 !!(p_rss->rss_caps & QED_RSS_IPV4));
        SET_FIELD(capabilities,
                  ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
-                 !!(p_params->rss_caps & QED_RSS_IPV6));
+                 !!(p_rss->rss_caps & QED_RSS_IPV6));
        SET_FIELD(capabilities,
                  ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
-                 !!(p_params->rss_caps & QED_RSS_IPV4_TCP));
+                 !!(p_rss->rss_caps & QED_RSS_IPV4_TCP));
        SET_FIELD(capabilities,
                  ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
-                 !!(p_params->rss_caps & QED_RSS_IPV6_TCP));
+                 !!(p_rss->rss_caps & QED_RSS_IPV6_TCP));
        SET_FIELD(capabilities,
                  ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
-                 !!(p_params->rss_caps & QED_RSS_IPV4_UDP));
+                 !!(p_rss->rss_caps & QED_RSS_IPV4_UDP));
        SET_FIELD(capabilities,
                  ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
-                 !!(p_params->rss_caps & QED_RSS_IPV6_UDP));
-       rss->tbl_size = p_params->rss_table_size_log;
+                 !!(p_rss->rss_caps & QED_RSS_IPV6_UDP));
+       p_config->tbl_size = p_rss->rss_table_size_log;
 
-       rss->capabilities = cpu_to_le16(capabilities);
+       p_config->capabilities = cpu_to_le16(capabilities);
 
        DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
                   "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
                   p_ramrod->common.update_rss_flg,
-                  rss->rss_mode, rss->update_rss_capabilities,
-                  capabilities, rss->update_rss_ind_table,
-                  rss->update_rss_key);
+                  p_config->rss_mode,
+                  p_config->update_rss_capabilities,
+                  p_config->capabilities,
+                  p_config->update_rss_ind_table, p_config->update_rss_key);
 
-       for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
-               rc = qed_fw_l2_queue(p_hwfn,
-                                    (u8)p_params->rss_ind_table[i],
-                                    &abs_l2_queue);
-               if (rc)
-                       return rc;
+       table_size = min_t(int, QED_RSS_IND_TABLE_SIZE,
+                          1 << p_config->tbl_size);
+       for (i = 0; i < table_size; i++) {
+               struct qed_queue_cid *p_queue = p_rss->rss_ind_table[i];
 
-               rss->indirection_table[i] = cpu_to_le16(abs_l2_queue);
-               DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "i= %d, queue = %d\n",
-                          i, rss->indirection_table[i]);
+               if (!p_queue)
+                       return -EINVAL;
+
+               p_config->indirection_table[i] =
+                   cpu_to_le16(p_queue->abs.queue_id);
+       }
+
+       DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
+                  "Configured RSS indirection table [%d entries]:\n",
+                  table_size);
+       for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i += 0x10) {
+               DP_VERBOSE(p_hwfn,
+                          NETIF_MSG_IFUP,
+                          "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n",
+                          le16_to_cpu(p_config->indirection_table[i]),
+                          le16_to_cpu(p_config->indirection_table[i + 1]),
+                          le16_to_cpu(p_config->indirection_table[i + 2]),
+                          le16_to_cpu(p_config->indirection_table[i + 3]),
+                          le16_to_cpu(p_config->indirection_table[i + 4]),
+                          le16_to_cpu(p_config->indirection_table[i + 5]),
+                          le16_to_cpu(p_config->indirection_table[i + 6]),
+                          le16_to_cpu(p_config->indirection_table[i + 7]),
+                          le16_to_cpu(p_config->indirection_table[i + 8]),
+                          le16_to_cpu(p_config->indirection_table[i + 9]),
+                          le16_to_cpu(p_config->indirection_table[i + 10]),
+                          le16_to_cpu(p_config->indirection_table[i + 11]),
+                          le16_to_cpu(p_config->indirection_table[i + 12]),
+                          le16_to_cpu(p_config->indirection_table[i + 13]),
+                          le16_to_cpu(p_config->indirection_table[i + 14]),
+                          le16_to_cpu(p_config->indirection_table[i + 15]));
        }
 
        for (i = 0; i < 10; i++)
-               rss->rss_key[i] = cpu_to_le32(p_params->rss_key[i]);
+               p_config->rss_key[i] = cpu_to_le32(p_rss->rss_key[i]);
 
        return rc;
 }
@@ -306,14 +480,14 @@ qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
        memset(&p_ramrod->approx_mcast.bins, 0,
               sizeof(p_ramrod->approx_mcast.bins));
 
-       if (p_params->update_approx_mcast_flg) {
-               p_ramrod->common.update_approx_mcast_flg = 1;
-               for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
-                       u32 *p_bins = (u32 *)p_params->bins;
-                       __le32 val = cpu_to_le32(p_bins[i]);
+       if (!p_params->update_approx_mcast_flg)
+               return;
 
-                       p_ramrod->approx_mcast.bins[i] = val;
-               }
+       p_ramrod->common.update_approx_mcast_flg = 1;
+       for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
+               u32 *p_bins = (u32 *)p_params->bins;
+
+               p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]);
        }
 }
 
@@ -336,7 +510,7 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
        }
 
        rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
-       if (rc != 0)
+       if (rc)
                return rc;
 
        memset(&init_data, 0, sizeof(init_data));
@@ -361,8 +535,8 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
        p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
        p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
        p_cmn->accept_any_vlan = p_params->accept_any_vlan;
-       p_cmn->update_accept_any_vlan_flg =
-                       p_params->update_accept_any_vlan_flg;
+       val = p_params->update_accept_any_vlan_flg;
+       p_cmn->update_accept_any_vlan_flg = val;
 
        p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
        val = p_params->update_inner_vlan_removal_flg;
@@ -411,7 +585,7 @@ int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id)
                return qed_vf_pf_vport_stop(p_hwfn);
 
        rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
-       if (rc != 0)
+       if (rc)
                return rc;
 
        memset(&init_data, 0, sizeof(init_data));
@@ -476,7 +650,7 @@ static int qed_filter_accept_cmd(struct qed_dev *cdev,
 
                rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
                                         comp_mode, p_comp_data);
-               if (rc != 0) {
+               if (rc) {
                        DP_ERR(cdev, "Update rx_mode failed %d\n", rc);
                        return rc;
                }
@@ -494,60 +668,26 @@ static int qed_filter_accept_cmd(struct qed_dev *cdev,
        return 0;
 }
 
-static int qed_sp_release_queue_cid(
-       struct qed_hwfn *p_hwfn,
-       struct qed_hw_cid_data *p_cid_data)
-{
-       if (!p_cid_data->b_cid_allocated)
-               return 0;
-
-       qed_cxt_release_cid(p_hwfn, p_cid_data->cid);
-
-       p_cid_data->b_cid_allocated = false;
-
-       return 0;
-}
-
-int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
-                               u16 opaque_fid,
-                               u32 cid,
-                               struct qed_queue_start_common_params *params,
-                               u8 stats_id,
-                               u16 bd_max_bytes,
-                               dma_addr_t bd_chain_phys_addr,
-                               dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size)
+int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
+                            struct qed_queue_cid *p_cid,
+                            u16 bd_max_bytes,
+                            dma_addr_t bd_chain_phys_addr,
+                            dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size)
 {
        struct rx_queue_start_ramrod_data *p_ramrod = NULL;
        struct qed_spq_entry *p_ent = NULL;
        struct qed_sp_init_data init_data;
-       struct qed_hw_cid_data *p_rx_cid;
-       u16 abs_rx_q_id = 0;
-       u8 abs_vport_id = 0;
        int rc = -EINVAL;
 
-       /* Store information for the stop */
-       p_rx_cid                = &p_hwfn->p_rx_cids[params->queue_id];
-       p_rx_cid->cid           = cid;
-       p_rx_cid->opaque_fid    = opaque_fid;
-       p_rx_cid->vport_id      = params->vport_id;
-
-       rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_vport_id);
-       if (rc != 0)
-               return rc;
-
-       rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_rx_q_id);
-       if (rc != 0)
-               return rc;
-
        DP_VERBOSE(p_hwfn, QED_MSG_SP,
-                  "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
-                  opaque_fid, cid, params->queue_id, params->vport_id,
-                  params->sb);
+                  "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n",
+                  p_cid->opaque_fid, p_cid->cid,
+                  p_cid->abs.queue_id, p_cid->abs.vport_id, p_cid->abs.sb);
 
        /* Get SPQ entry */
        memset(&init_data, 0, sizeof(init_data));
-       init_data.cid = cid;
-       init_data.opaque_fid = opaque_fid;
+       init_data.cid = p_cid->cid;
+       init_data.opaque_fid = p_cid->opaque_fid;
        init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 
        rc = qed_sp_init_request(p_hwfn, &p_ent,
@@ -558,97 +698,99 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
 
        p_ramrod = &p_ent->ramrod.rx_queue_start;
 
-       p_ramrod->sb_id                 = cpu_to_le16(params->sb);
-       p_ramrod->sb_index              = params->sb_idx;
-       p_ramrod->vport_id              = abs_vport_id;
-       p_ramrod->stats_counter_id      = stats_id;
-       p_ramrod->rx_queue_id           = cpu_to_le16(abs_rx_q_id);
-       p_ramrod->complete_cqe_flg      = 0;
-       p_ramrod->complete_event_flg    = 1;
+       p_ramrod->sb_id = cpu_to_le16(p_cid->abs.sb);
+       p_ramrod->sb_index = p_cid->abs.sb_idx;
+       p_ramrod->vport_id = p_cid->abs.vport_id;
+       p_ramrod->stats_counter_id = p_cid->abs.stats_id;
+       p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
+       p_ramrod->complete_cqe_flg = 0;
+       p_ramrod->complete_event_flg = 1;
 
-       p_ramrod->bd_max_bytes  = cpu_to_le16(bd_max_bytes);
+       p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes);
        DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
 
-       p_ramrod->num_of_pbl_pages      = cpu_to_le16(cqe_pbl_size);
+       p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
        DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
 
-       p_ramrod->vf_rx_prod_index = params->vf_qid;
-       if (params->vf_qid)
+       if (p_cid->is_vf) {
+               p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
                DP_VERBOSE(p_hwfn, QED_MSG_SP,
-                          "Queue is meant for VF rxq[%04x]\n", params->vf_qid);
+                          "Queue%s is meant for VF rxq[%02x]\n",
+                          !!p_cid->b_legacy_vf ? " [legacy]" : "",
+                          p_cid->vf_qid);
+               p_ramrod->vf_rx_prod_use_zone_a = !!p_cid->b_legacy_vf;
+       }
 
        return qed_spq_post(p_hwfn, p_ent, NULL);
 }
 
 static int
-qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
-                         u16 opaque_fid,
-                         struct qed_queue_start_common_params *params,
+qed_eth_pf_rx_queue_start(struct qed_hwfn *p_hwfn,
+                         struct qed_queue_cid *p_cid,
                          u16 bd_max_bytes,
                          dma_addr_t bd_chain_phys_addr,
                          dma_addr_t cqe_pbl_addr,
                          u16 cqe_pbl_size, void __iomem **pp_prod)
 {
-       struct qed_hw_cid_data *p_rx_cid;
        u32 init_prod_val = 0;
-       u16 abs_l2_queue = 0;
-       u8 abs_stats_id = 0;
-       int rc;
-
-       if (IS_VF(p_hwfn->cdev)) {
-               return qed_vf_pf_rxq_start(p_hwfn,
-                                          params->queue_id,
-                                          params->sb,
-                                          params->sb_idx,
-                                          bd_max_bytes,
-                                          bd_chain_phys_addr,
-                                          cqe_pbl_addr, cqe_pbl_size, pp_prod);
-       }
-
-       rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_l2_queue);
-       if (rc != 0)
-               return rc;
-
-       rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_stats_id);
-       if (rc != 0)
-               return rc;
 
-       *pp_prod = (u8 __iomem *)p_hwfn->regview +
-                                GTT_BAR0_MAP_REG_MSDM_RAM +
-                                MSTORM_ETH_PF_PRODS_OFFSET(abs_l2_queue);
+       *pp_prod = p_hwfn->regview +
+                  GTT_BAR0_MAP_REG_MSDM_RAM +
+                   MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id);
 
        /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
        __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
                          (u32 *)(&init_prod_val));
 
+       return qed_eth_rxq_start_ramrod(p_hwfn, p_cid,
+                                       bd_max_bytes,
+                                       bd_chain_phys_addr,
+                                       cqe_pbl_addr, cqe_pbl_size);
+}
+
+static int
+qed_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
+                      u16 opaque_fid,
+                      struct qed_queue_start_common_params *p_params,
+                      u16 bd_max_bytes,
+                      dma_addr_t bd_chain_phys_addr,
+                      dma_addr_t cqe_pbl_addr,
+                      u16 cqe_pbl_size,
+                      struct qed_rxq_start_ret_params *p_ret_params)
+{
+       struct qed_queue_cid *p_cid;
+       int rc;
+
        /* Allocate a CID for the queue */
-       p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id];
-       rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
-                                &p_rx_cid->cid);
-       if (rc) {
-               DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
-               return rc;
-       }
-       p_rx_cid->b_cid_allocated = true;
+       p_cid = qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params);
+       if (!p_cid)
+               return -ENOMEM;
 
-       rc = qed_sp_eth_rxq_start_ramrod(p_hwfn,
-                                        opaque_fid,
-                                        p_rx_cid->cid,
-                                        params,
-                                        abs_stats_id,
+       if (IS_PF(p_hwfn->cdev)) {
+               rc = qed_eth_pf_rx_queue_start(p_hwfn, p_cid,
+                                              bd_max_bytes,
+                                              bd_chain_phys_addr,
+                                              cqe_pbl_addr, cqe_pbl_size,
+                                              &p_ret_params->p_prod);
+       } else {
+               rc = qed_vf_pf_rxq_start(p_hwfn, p_cid,
                                         bd_max_bytes,
                                         bd_chain_phys_addr,
                                         cqe_pbl_addr,
-                                        cqe_pbl_size);
+                                        cqe_pbl_size, &p_ret_params->p_prod);
+       }
 
-       if (rc != 0)
-               qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
+       /* Provide the caller with a reference to as handler */
+       if (rc)
+               qed_eth_queue_cid_release(p_hwfn, p_cid);
+       else
+               p_ret_params->p_handle = (void *)p_cid;
 
        return rc;
 }
 
 int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
-                               u16 rx_queue_id,
+                               void **pp_rxq_handles,
                                u8 num_rxqs,
                                u8 complete_cqe_flg,
                                u8 complete_event_flg,
@@ -658,8 +800,7 @@ int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
        struct rx_queue_update_ramrod_data *p_ramrod = NULL;
        struct qed_spq_entry *p_ent = NULL;
        struct qed_sp_init_data init_data;
-       struct qed_hw_cid_data *p_rx_cid;
-       u16 qid, abs_rx_q_id = 0;
+       struct qed_queue_cid *p_cid;
        int rc = -EINVAL;
        u8 i;
 
@@ -668,12 +809,11 @@ int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
        init_data.p_comp_data = p_comp_data;
 
        for (i = 0; i < num_rxqs; i++) {
-               qid = rx_queue_id + i;
-               p_rx_cid = &p_hwfn->p_rx_cids[qid];
+               p_cid = ((struct qed_queue_cid **)pp_rxq_handles)[i];
 
                /* Get SPQ entry */
-               init_data.cid = p_rx_cid->cid;
-               init_data.opaque_fid = p_rx_cid->opaque_fid;
+               init_data.cid = p_cid->cid;
+               init_data.opaque_fid = p_cid->opaque_fid;
 
                rc = qed_sp_init_request(p_hwfn, &p_ent,
                                         ETH_RAMROD_RX_QUEUE_UPDATE,
@@ -682,10 +822,9 @@ int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
                        return rc;
 
                p_ramrod = &p_ent->ramrod.rx_queue_update;
+               p_ramrod->vport_id = p_cid->abs.vport_id;
 
-               qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
-               qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
-               p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
+               p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
                p_ramrod->complete_cqe_flg = complete_cqe_flg;
                p_ramrod->complete_event_flg = complete_event_flg;
 
@@ -697,24 +836,19 @@ int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
        return rc;
 }
 
-int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
-                            u16 rx_queue_id,
-                            bool eq_completion_only, bool cqe_completion)
+static int
+qed_eth_pf_rx_queue_stop(struct qed_hwfn *p_hwfn,
+                        struct qed_queue_cid *p_cid,
+                        bool b_eq_completion_only, bool b_cqe_completion)
 {
-       struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
        struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
        struct qed_spq_entry *p_ent = NULL;
        struct qed_sp_init_data init_data;
-       u16 abs_rx_q_id = 0;
-       int rc = -EINVAL;
-
-       if (IS_VF(p_hwfn->cdev))
-               return qed_vf_pf_rxq_stop(p_hwfn, rx_queue_id, cqe_completion);
+       int rc;
 
-       /* Get SPQ entry */
        memset(&init_data, 0, sizeof(init_data));
-       init_data.cid = p_rx_cid->cid;
-       init_data.opaque_fid = p_rx_cid->opaque_fid;
+       init_data.cid = p_cid->cid;
+       init_data.opaque_fid = p_cid->opaque_fid;
        init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 
        rc = qed_sp_init_request(p_hwfn, &p_ent,
@@ -724,62 +858,53 @@ int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
                return rc;
 
        p_ramrod = &p_ent->ramrod.rx_queue_stop;
-
-       qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
-       qed_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id);
-       p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
+       p_ramrod->vport_id = p_cid->abs.vport_id;
+       p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
 
        /* Cleaning the queue requires the completion to arrive there.
         * In addition, VFs require the answer to come as eqe to PF.
         */
-       p_ramrod->complete_cqe_flg =
-               (!!(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) &&
-                !eq_completion_only) || cqe_completion;
-       p_ramrod->complete_event_flg =
-               !(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) ||
-               eq_completion_only;
+       p_ramrod->complete_cqe_flg = (!p_cid->is_vf &&
+                                     !b_eq_completion_only) ||
+                                    b_cqe_completion;
+       p_ramrod->complete_event_flg = p_cid->is_vf || b_eq_completion_only;
 
-       rc = qed_spq_post(p_hwfn, p_ent, NULL);
-       if (rc)
-               return rc;
+       return qed_spq_post(p_hwfn, p_ent, NULL);
+}
 
-       return qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
+int qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
+                         void *p_rxq,
+                         bool eq_completion_only, bool cqe_completion)
+{
+       struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_rxq;
+       int rc = -EINVAL;
+
+       if (IS_PF(p_hwfn->cdev))
+               rc = qed_eth_pf_rx_queue_stop(p_hwfn, p_cid,
+                                             eq_completion_only,
+                                             cqe_completion);
+       else
+               rc = qed_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion);
+
+       if (!rc)
+               qed_eth_queue_cid_release(p_hwfn, p_cid);
+       return rc;
 }
 
-int qed_sp_eth_txq_start_ramrod(struct qed_hwfn  *p_hwfn,
-                               u16  opaque_fid,
-                               u32  cid,
-                               struct qed_queue_start_common_params *p_params,
-                               u8  stats_id,
-                               dma_addr_t pbl_addr,
-                               u16 pbl_size,
-                               union qed_qm_pq_params *p_pq_params)
+int
+qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
+                        struct qed_queue_cid *p_cid,
+                        dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id)
 {
        struct tx_queue_start_ramrod_data *p_ramrod = NULL;
        struct qed_spq_entry *p_ent = NULL;
        struct qed_sp_init_data init_data;
-       struct qed_hw_cid_data *p_tx_cid;
-       u16 pq_id, abs_tx_q_id = 0;
        int rc = -EINVAL;
-       u8 abs_vport_id;
-
-       /* Store information for the stop */
-       p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
-       p_tx_cid->cid           = cid;
-       p_tx_cid->opaque_fid    = opaque_fid;
-
-       rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
-       if (rc)
-               return rc;
-
-       rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_tx_q_id);
-       if (rc)
-               return rc;
 
        /* Get SPQ entry */
        memset(&init_data, 0, sizeof(init_data));
-       init_data.cid = cid;
-       init_data.opaque_fid = opaque_fid;
+       init_data.cid = p_cid->cid;
+       init_data.opaque_fid = p_cid->opaque_fid;
        init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 
        rc = qed_sp_init_request(p_hwfn, &p_ent,
@@ -788,99 +913,90 @@ int qed_sp_eth_txq_start_ramrod(struct qed_hwfn  *p_hwfn,
        if (rc)
                return rc;
 
-       p_ramrod                = &p_ent->ramrod.tx_queue_start;
-       p_ramrod->vport_id      = abs_vport_id;
+       p_ramrod = &p_ent->ramrod.tx_queue_start;
+       p_ramrod->vport_id = p_cid->abs.vport_id;
+
+       p_ramrod->sb_id = cpu_to_le16(p_cid->abs.sb);
+       p_ramrod->sb_index = p_cid->abs.sb_idx;
+       p_ramrod->stats_counter_id = p_cid->abs.stats_id;
 
-       p_ramrod->sb_id                 = cpu_to_le16(p_params->sb);
-       p_ramrod->sb_index              = p_params->sb_idx;
-       p_ramrod->stats_counter_id      = stats_id;
+       p_ramrod->queue_zone_id = cpu_to_le16(p_cid->abs.queue_id);
+       p_ramrod->same_as_last_id = cpu_to_le16(p_cid->abs.queue_id);
 
-       p_ramrod->queue_zone_id         = cpu_to_le16(abs_tx_q_id);
-       p_ramrod->pbl_size              = cpu_to_le16(pbl_size);
+       p_ramrod->pbl_size = cpu_to_le16(pbl_size);
        DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
 
-       pq_id                   = qed_get_qm_pq(p_hwfn,
-                                               PROTOCOLID_ETH,
-                                               p_pq_params);
-       p_ramrod->qm_pq_id      = cpu_to_le16(pq_id);
+       p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
 
        return qed_spq_post(p_hwfn, p_ent, NULL);
 }
 
 static int
-qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
-                         u16 opaque_fid,
-                         struct qed_queue_start_common_params *p_params,
+qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn,
+                         struct qed_queue_cid *p_cid,
+                         u8 tc,
                          dma_addr_t pbl_addr,
                          u16 pbl_size, void __iomem **pp_doorbell)
 {
-       struct qed_hw_cid_data *p_tx_cid;
-       union qed_qm_pq_params pq_params;
-       u8 abs_stats_id = 0;
        int rc;
 
-       if (IS_VF(p_hwfn->cdev)) {
-               return qed_vf_pf_txq_start(p_hwfn,
-                                          p_params->queue_id,
-                                          p_params->sb,
-                                          p_params->sb_idx,
-                                          pbl_addr, pbl_size, pp_doorbell);
-       }
 
-       rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id);
+       rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
+                                     pbl_addr, pbl_size,
+                                     qed_get_cm_pq_idx_mcos(p_hwfn, tc));
        if (rc)
                return rc;
 
-       p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
-       memset(p_tx_cid, 0, sizeof(*p_tx_cid));
-       memset(&pq_params, 0, sizeof(pq_params));
+       /* Provide the caller with the necessary return values */
+       *pp_doorbell = p_hwfn->doorbells +
+                      qed_db_addr(p_cid->cid, DQ_DEMS_LEGACY);
 
-       /* Allocate a CID for the queue */
-       rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
-                                &p_tx_cid->cid);
-       if (rc) {
-               DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
-               return rc;
-       }
-       p_tx_cid->b_cid_allocated = true;
+       return 0;
+}
 
-       DP_VERBOSE(p_hwfn, QED_MSG_SP,
-                  "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
-                  opaque_fid, p_tx_cid->cid,
-                  p_params->queue_id, p_params->vport_id, p_params->sb);
-
-       rc = qed_sp_eth_txq_start_ramrod(p_hwfn,
-                                        opaque_fid,
-                                        p_tx_cid->cid,
-                                        p_params,
-                                        abs_stats_id,
-                                        pbl_addr,
-                                        pbl_size,
-                                        &pq_params);
-
-       *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
-                                    qed_db_addr(p_tx_cid->cid, DQ_DEMS_LEGACY);
+static int
+qed_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
+                      u16 opaque_fid,
+                      struct qed_queue_start_common_params *p_params,
+                      u8 tc,
+                      dma_addr_t pbl_addr,
+                      u16 pbl_size,
+                      struct qed_txq_start_ret_params *p_ret_params)
+{
+       struct qed_queue_cid *p_cid;
+       int rc;
+
+       p_cid = qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params);
+       if (!p_cid)
+               return -EINVAL;
+
+       if (IS_PF(p_hwfn->cdev))
+               rc = qed_eth_pf_tx_queue_start(p_hwfn, p_cid, tc,
+                                              pbl_addr, pbl_size,
+                                              &p_ret_params->p_doorbell);
+       else
+               rc = qed_vf_pf_txq_start(p_hwfn, p_cid,
+                                        pbl_addr, pbl_size,
+                                        &p_ret_params->p_doorbell);
 
        if (rc)
-               qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
+               qed_eth_queue_cid_release(p_hwfn, p_cid);
+       else
+               p_ret_params->p_handle = (void *)p_cid;
 
        return rc;
 }
 
-int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id)
+static int
+qed_eth_pf_tx_queue_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid)
 {
-       struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
        struct qed_spq_entry *p_ent = NULL;
        struct qed_sp_init_data init_data;
-       int rc = -EINVAL;
-
-       if (IS_VF(p_hwfn->cdev))
-               return qed_vf_pf_txq_stop(p_hwfn, tx_queue_id);
+       int rc;
 
-       /* Get SPQ entry */
        memset(&init_data, 0, sizeof(init_data));
-       init_data.cid = p_tx_cid->cid;
-       init_data.opaque_fid = p_tx_cid->opaque_fid;
+       init_data.cid = p_cid->cid;
+       init_data.opaque_fid = p_cid->opaque_fid;
        init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 
        rc = qed_sp_init_request(p_hwfn, &p_ent,
@@ -889,15 +1005,25 @@ int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id)
        if (rc)
                return rc;
 
-       rc = qed_spq_post(p_hwfn, p_ent, NULL);
-       if (rc)
-               return rc;
+       return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_handle)
+{
+       struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_handle;
+       int rc;
+
+       if (IS_PF(p_hwfn->cdev))
+               rc = qed_eth_pf_tx_queue_stop(p_hwfn, p_cid);
+       else
+               rc = qed_vf_pf_txq_stop(p_hwfn, p_cid);
 
-       return qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
+       if (!rc)
+               qed_eth_queue_cid_release(p_hwfn, p_cid);
+       return rc;
 }
 
-static enum eth_filter_action
-qed_filter_action(enum qed_filter_opcode opcode)
+static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode)
 {
        enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
 
@@ -1033,19 +1159,19 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
                p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni);
 
        if (p_filter_cmd->opcode == QED_FILTER_MOVE) {
-               p_second_filter->type           = p_first_filter->type;
-               p_second_filter->mac_msb        = p_first_filter->mac_msb;
-               p_second_filter->mac_mid        = p_first_filter->mac_mid;
-               p_second_filter->mac_lsb        = p_first_filter->mac_lsb;
-               p_second_filter->vlan_id        = p_first_filter->vlan_id;
-               p_second_filter->vni            = p_first_filter->vni;
+               p_second_filter->type = p_first_filter->type;
+               p_second_filter->mac_msb = p_first_filter->mac_msb;
+               p_second_filter->mac_mid = p_first_filter->mac_mid;
+               p_second_filter->mac_lsb = p_first_filter->mac_lsb;
+               p_second_filter->vlan_id = p_first_filter->vlan_id;
+               p_second_filter->vni = p_first_filter->vni;
 
                p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
 
                p_first_filter->vport_id = vport_to_remove_from;
 
-               p_second_filter->action         = ETH_FILTER_ACTION_ADD;
-               p_second_filter->vport_id       = vport_to_add_to;
+               p_second_filter->action = ETH_FILTER_ACTION_ADD;
+               p_second_filter->vport_id = vport_to_add_to;
        } else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) {
                p_first_filter->vport_id = vport_to_add_to;
                memcpy(p_second_filter, p_first_filter,
@@ -1086,7 +1212,7 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
        rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
                                     &p_ramrod, &p_ent,
                                     comp_mode, p_comp_data);
-       if (rc != 0) {
+       if (rc) {
                DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
                return rc;
        }
@@ -1094,10 +1220,8 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
        p_header->assert_on_error = p_filter_cmd->assert_on_error;
 
        rc = qed_spq_post(p_hwfn, p_ent, NULL);
-       if (rc != 0) {
-               DP_ERR(p_hwfn,
-                      "Unicast filter ADD command failed %d\n",
-                      rc);
+       if (rc) {
+               DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc);
                return rc;
        }
 
@@ -1136,15 +1260,10 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
  * Return:
  ******************************************************************************/
 static u32 qed_calc_crc32c(u8 *crc32_packet,
-                          u32 crc32_length,
-                          u32 crc32_seed,
-                          u8 complement)
+                          u32 crc32_length, u32 crc32_seed, u8 complement)
 {
-       u32 byte = 0;
-       u32 bit = 0;
-       u8 msb = 0;
-       u8 current_byte = 0;
-       u32 crc32_result = crc32_seed;
+       u32 byte = 0, bit = 0, crc32_result = crc32_seed;
+       u8 msb = 0, current_byte = 0;
 
        if ((!crc32_packet) ||
            (crc32_length == 0) ||
@@ -1164,9 +1283,7 @@ static u32 qed_calc_crc32c(u8 *crc32_packet,
        return crc32_result;
 }
 
-static inline u32 qed_crc32c_le(u32 seed,
-                               u8 *mac,
-                               u32 len)
+static u32 qed_crc32c_le(u32 seed, u8 *mac, u32 len)
 {
        u32 packet_buf[2] = { 0 };
 
@@ -1196,17 +1313,14 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
        u8 abs_vport_id = 0;
        int rc, i;
 
-       if (p_filter_cmd->opcode == QED_FILTER_ADD) {
+       if (p_filter_cmd->opcode == QED_FILTER_ADD)
                rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
                                  &abs_vport_id);
-               if (rc)
-                       return rc;
-       } else {
+       else
                rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
                                  &abs_vport_id);
-               if (rc)
-                       return rc;
-       }
+       if (rc)
+               return rc;
 
        /* Get SPQ entry */
        memset(&init_data, 0, sizeof(init_data));
@@ -1244,11 +1358,11 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
 
                /* Convert to correct endianity */
                for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
+                       struct vport_update_ramrod_mcast *p_ramrod_bins;
                        u32 *p_bins = (u32 *)bins;
-                       struct vport_update_ramrod_mcast *approx_mcast;
 
-                       approx_mcast = &p_ramrod->approx_mcast;
-                       approx_mcast->bins[i] = cpu_to_le32(p_bins[i]);
+                       p_ramrod_bins = &p_ramrod->approx_mcast;
+                       p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]);
                }
        }
 
@@ -1286,8 +1400,7 @@ static int qed_filter_mcast_cmd(struct qed_dev *cdev,
                rc = qed_sp_eth_filter_mcast(p_hwfn,
                                             opaque_fid,
                                             p_filter_cmd,
-                                            comp_mode,
-                                            p_comp_data);
+                                            comp_mode, p_comp_data);
        }
        return rc;
 }
@@ -1314,9 +1427,8 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev,
                rc = qed_sp_eth_filter_ucast(p_hwfn,
                                             opaque_fid,
                                             p_filter_cmd,
-                                            comp_mode,
-                                            p_comp_data);
-               if (rc != 0)
+                                            comp_mode, p_comp_data);
+               if (rc)
                        break;
        }
 
@@ -1355,13 +1467,20 @@ static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
        memset(&pstats, 0, sizeof(pstats));
        qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
 
-       p_stats->tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes);
-       p_stats->tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes);
-       p_stats->tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes);
-       p_stats->tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts);
-       p_stats->tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts);
-       p_stats->tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts);
-       p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts);
+       p_stats->common.tx_ucast_bytes +=
+           HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+       p_stats->common.tx_mcast_bytes +=
+           HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+       p_stats->common.tx_bcast_bytes +=
+           HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+       p_stats->common.tx_ucast_pkts +=
+           HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+       p_stats->common.tx_mcast_pkts +=
+           HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+       p_stats->common.tx_bcast_pkts +=
+           HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+       p_stats->common.tx_err_drop_pkts +=
+           HILO_64_REGPAIR(pstats.error_drop_pkts);
 }
 
 static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
@@ -1387,10 +1506,10 @@ static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
        memset(&tstats, 0, sizeof(tstats));
        qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
 
-       p_stats->mftag_filter_discards +=
-               HILO_64_REGPAIR(tstats.mftag_filter_discard);
-       p_stats->mac_filter_discards +=
-               HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
+       p_stats->common.mftag_filter_discards +=
+           HILO_64_REGPAIR(tstats.mftag_filter_discard);
+       p_stats->common.mac_filter_discards +=
+           HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
 }
 
 static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
@@ -1424,12 +1543,15 @@ static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
        memset(&ustats, 0, sizeof(ustats));
        qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
 
-       p_stats->rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
-       p_stats->rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
-       p_stats->rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
-       p_stats->rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
-       p_stats->rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
-       p_stats->rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+       p_stats->common.rx_ucast_bytes +=
+           HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+       p_stats->common.rx_mcast_bytes +=
+           HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+       p_stats->common.rx_bcast_bytes +=
+           HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+       p_stats->common.rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+       p_stats->common.rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+       p_stats->common.rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
 }
 
 static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
@@ -1463,23 +1585,26 @@ static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
        memset(&mstats, 0, sizeof(mstats));
        qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
 
-       p_stats->no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard);
-       p_stats->packet_too_big_discard +=
-               HILO_64_REGPAIR(mstats.packet_too_big_discard);
-       p_stats->ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
-       p_stats->tpa_coalesced_pkts +=
-               HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
-       p_stats->tpa_coalesced_events +=
-               HILO_64_REGPAIR(mstats.tpa_coalesced_events);
-       p_stats->tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num);
-       p_stats->tpa_coalesced_bytes +=
-               HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
+       p_stats->common.no_buff_discards +=
+           HILO_64_REGPAIR(mstats.no_buff_discard);
+       p_stats->common.packet_too_big_discard +=
+           HILO_64_REGPAIR(mstats.packet_too_big_discard);
+       p_stats->common.ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
+       p_stats->common.tpa_coalesced_pkts +=
+           HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
+       p_stats->common.tpa_coalesced_events +=
+           HILO_64_REGPAIR(mstats.tpa_coalesced_events);
+       p_stats->common.tpa_aborts_num +=
+           HILO_64_REGPAIR(mstats.tpa_aborts_num);
+       p_stats->common.tpa_coalesced_bytes +=
+           HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
 }
 
 static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
                                       struct qed_ptt *p_ptt,
                                       struct qed_eth_stats *p_stats)
 {
+       struct qed_eth_stats_common *p_common = &p_stats->common;
        struct port_stats port_stats;
        int j;
 
@@ -1490,54 +1615,75 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
                        offsetof(struct public_port, stats),
                        sizeof(port_stats));
 
-       p_stats->rx_64_byte_packets             += port_stats.eth.r64;
-       p_stats->rx_65_to_127_byte_packets      += port_stats.eth.r127;
-       p_stats->rx_128_to_255_byte_packets     += port_stats.eth.r255;
-       p_stats->rx_256_to_511_byte_packets     += port_stats.eth.r511;
-       p_stats->rx_512_to_1023_byte_packets    += port_stats.eth.r1023;
-       p_stats->rx_1024_to_1518_byte_packets   += port_stats.eth.r1518;
-       p_stats->rx_1519_to_1522_byte_packets   += port_stats.eth.r1522;
-       p_stats->rx_1519_to_2047_byte_packets   += port_stats.eth.r2047;
-       p_stats->rx_2048_to_4095_byte_packets   += port_stats.eth.r4095;
-       p_stats->rx_4096_to_9216_byte_packets   += port_stats.eth.r9216;
-       p_stats->rx_9217_to_16383_byte_packets  += port_stats.eth.r16383;
-       p_stats->rx_crc_errors                  += port_stats.eth.rfcs;
-       p_stats->rx_mac_crtl_frames             += port_stats.eth.rxcf;
-       p_stats->rx_pause_frames                += port_stats.eth.rxpf;
-       p_stats->rx_pfc_frames                  += port_stats.eth.rxpp;
-       p_stats->rx_align_errors                += port_stats.eth.raln;
-       p_stats->rx_carrier_errors              += port_stats.eth.rfcr;
-       p_stats->rx_oversize_packets            += port_stats.eth.rovr;
-       p_stats->rx_jabbers                     += port_stats.eth.rjbr;
-       p_stats->rx_undersize_packets           += port_stats.eth.rund;
-       p_stats->rx_fragments                   += port_stats.eth.rfrg;
-       p_stats->tx_64_byte_packets             += port_stats.eth.t64;
-       p_stats->tx_65_to_127_byte_packets      += port_stats.eth.t127;
-       p_stats->tx_128_to_255_byte_packets     += port_stats.eth.t255;
-       p_stats->tx_256_to_511_byte_packets     += port_stats.eth.t511;
-       p_stats->tx_512_to_1023_byte_packets    += port_stats.eth.t1023;
-       p_stats->tx_1024_to_1518_byte_packets   += port_stats.eth.t1518;
-       p_stats->tx_1519_to_2047_byte_packets   += port_stats.eth.t2047;
-       p_stats->tx_2048_to_4095_byte_packets   += port_stats.eth.t4095;
-       p_stats->tx_4096_to_9216_byte_packets   += port_stats.eth.t9216;
-       p_stats->tx_9217_to_16383_byte_packets  += port_stats.eth.t16383;
-       p_stats->tx_pause_frames                += port_stats.eth.txpf;
-       p_stats->tx_pfc_frames                  += port_stats.eth.txpp;
-       p_stats->tx_lpi_entry_count             += port_stats.eth.tlpiec;
-       p_stats->tx_total_collisions            += port_stats.eth.tncl;
-       p_stats->rx_mac_bytes                   += port_stats.eth.rbyte;
-       p_stats->rx_mac_uc_packets              += port_stats.eth.rxuca;
-       p_stats->rx_mac_mc_packets              += port_stats.eth.rxmca;
-       p_stats->rx_mac_bc_packets              += port_stats.eth.rxbca;
-       p_stats->rx_mac_frames_ok               += port_stats.eth.rxpok;
-       p_stats->tx_mac_bytes                   += port_stats.eth.tbyte;
-       p_stats->tx_mac_uc_packets              += port_stats.eth.txuca;
-       p_stats->tx_mac_mc_packets              += port_stats.eth.txmca;
-       p_stats->tx_mac_bc_packets              += port_stats.eth.txbca;
-       p_stats->tx_mac_ctrl_frames             += port_stats.eth.txcf;
+       p_common->rx_64_byte_packets += port_stats.eth.r64;
+       p_common->rx_65_to_127_byte_packets += port_stats.eth.r127;
+       p_common->rx_128_to_255_byte_packets += port_stats.eth.r255;
+       p_common->rx_256_to_511_byte_packets += port_stats.eth.r511;
+       p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
+       p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
+       p_common->rx_crc_errors += port_stats.eth.rfcs;
+       p_common->rx_mac_crtl_frames += port_stats.eth.rxcf;
+       p_common->rx_pause_frames += port_stats.eth.rxpf;
+       p_common->rx_pfc_frames += port_stats.eth.rxpp;
+       p_common->rx_align_errors += port_stats.eth.raln;
+       p_common->rx_carrier_errors += port_stats.eth.rfcr;
+       p_common->rx_oversize_packets += port_stats.eth.rovr;
+       p_common->rx_jabbers += port_stats.eth.rjbr;
+       p_common->rx_undersize_packets += port_stats.eth.rund;
+       p_common->rx_fragments += port_stats.eth.rfrg;
+       p_common->tx_64_byte_packets += port_stats.eth.t64;
+       p_common->tx_65_to_127_byte_packets += port_stats.eth.t127;
+       p_common->tx_128_to_255_byte_packets += port_stats.eth.t255;
+       p_common->tx_256_to_511_byte_packets += port_stats.eth.t511;
+       p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
+       p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
+       p_common->tx_pause_frames += port_stats.eth.txpf;
+       p_common->tx_pfc_frames += port_stats.eth.txpp;
+       p_common->rx_mac_bytes += port_stats.eth.rbyte;
+       p_common->rx_mac_uc_packets += port_stats.eth.rxuca;
+       p_common->rx_mac_mc_packets += port_stats.eth.rxmca;
+       p_common->rx_mac_bc_packets += port_stats.eth.rxbca;
+       p_common->rx_mac_frames_ok += port_stats.eth.rxpok;
+       p_common->tx_mac_bytes += port_stats.eth.tbyte;
+       p_common->tx_mac_uc_packets += port_stats.eth.txuca;
+       p_common->tx_mac_mc_packets += port_stats.eth.txmca;
+       p_common->tx_mac_bc_packets += port_stats.eth.txbca;
+       p_common->tx_mac_ctrl_frames += port_stats.eth.txcf;
        for (j = 0; j < 8; j++) {
-               p_stats->brb_truncates  += port_stats.brb.brb_truncate[j];
-               p_stats->brb_discards   += port_stats.brb.brb_discard[j];
+               p_common->brb_truncates += port_stats.brb.brb_truncate[j];
+               p_common->brb_discards += port_stats.brb.brb_discard[j];
+       }
+
+       if (QED_IS_BB(p_hwfn->cdev)) {
+               struct qed_eth_stats_bb *p_bb = &p_stats->bb;
+
+               p_bb->rx_1519_to_1522_byte_packets +=
+                   port_stats.eth.u0.bb0.r1522;
+               p_bb->rx_1519_to_2047_byte_packets +=
+                   port_stats.eth.u0.bb0.r2047;
+               p_bb->rx_2048_to_4095_byte_packets +=
+                   port_stats.eth.u0.bb0.r4095;
+               p_bb->rx_4096_to_9216_byte_packets +=
+                   port_stats.eth.u0.bb0.r9216;
+               p_bb->rx_9217_to_16383_byte_packets +=
+                   port_stats.eth.u0.bb0.r16383;
+               p_bb->tx_1519_to_2047_byte_packets +=
+                   port_stats.eth.u1.bb1.t2047;
+               p_bb->tx_2048_to_4095_byte_packets +=
+                   port_stats.eth.u1.bb1.t4095;
+               p_bb->tx_4096_to_9216_byte_packets +=
+                   port_stats.eth.u1.bb1.t9216;
+               p_bb->tx_9217_to_16383_byte_packets +=
+                   port_stats.eth.u1.bb1.t16383;
+               p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec;
+               p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl;
+       } else {
+               struct qed_eth_stats_ah *p_ah = &p_stats->ah;
+
+               p_ah->rx_1519_to_max_byte_packets +=
+                   port_stats.eth.u0.ah0.r1519_to_max;
+               p_ah->tx_1519_to_max_byte_packets =
+                   port_stats.eth.u1.ah1.t1519_to_max;
        }
 }
 
@@ -1590,8 +1736,7 @@ out:
        }
 }
 
-void qed_get_vport_stats(struct qed_dev *cdev,
-                        struct qed_eth_stats *stats)
+void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
 {
        u32 i;
 
@@ -1654,6 +1799,84 @@ void qed_reset_vport_stats(struct qed_dev *cdev)
                _qed_get_vport_stats(cdev, cdev->reset_stats);
 }
 
+static void
+qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+                       struct qed_arfs_config_params *p_cfg_params)
+{
+       if (p_cfg_params->arfs_enable) {
+               qed_set_rfs_mode_enable(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+                                       p_cfg_params->tcp, p_cfg_params->udp,
+                                       p_cfg_params->ipv4, p_cfg_params->ipv6);
+               DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                          "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n",
+                          p_cfg_params->tcp ? "Enable" : "Disable",
+                          p_cfg_params->udp ? "Enable" : "Disable",
+                          p_cfg_params->ipv4 ? "Enable" : "Disable",
+                          p_cfg_params->ipv6 ? "Enable" : "Disable");
+       } else {
+               qed_set_rfs_mode_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+       }
+
+       DP_VERBOSE(p_hwfn, QED_MSG_SP, "Configured ARFS mode : %s\n",
+                  p_cfg_params->arfs_enable ? "Enable" : "Disable");
+}
+
+static int
+qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+                               struct qed_spq_comp_cb *p_cb,
+                               dma_addr_t p_addr, u16 length, u16 qid,
+                               u8 vport_id, bool b_is_add)
+{
+       struct rx_update_gft_filter_data *p_ramrod = NULL;
+       struct qed_spq_entry *p_ent = NULL;
+       struct qed_sp_init_data init_data;
+       u16 abs_rx_q_id = 0;
+       u8 abs_vport_id = 0;
+       int rc = -EINVAL;
+
+       rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+       if (rc)
+               return rc;
+
+       rc = qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
+       if (rc)
+               return rc;
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = qed_spq_get_cid(p_hwfn);
+
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+       if (p_cb) {
+               init_data.comp_mode = QED_SPQ_MODE_CB;
+               init_data.p_comp_data = p_cb;
+       } else {
+               init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+       }
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                ETH_RAMROD_GFT_UPDATE_FILTER,
+                                PROTOCOLID_ETH, &init_data);
+       if (rc)
+               return rc;
+
+       p_ramrod = &p_ent->ramrod.rx_update_gft;
+       DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr);
+       p_ramrod->pkt_hdr_length = cpu_to_le16(length);
+       p_ramrod->rx_qid_or_action_icid = cpu_to_le16(abs_rx_q_id);
+       p_ramrod->vport_id = abs_vport_id;
+       p_ramrod->filter_type = RFS_FILTER_TYPE;
+       p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER : GFT_DELETE_FILTER;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                  "V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n",
+                  abs_vport_id, abs_rx_q_id,
+                  b_is_add ? "Adding" : "Removing", (u64)p_addr, length);
+
+       return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
 static int qed_fill_eth_dev_info(struct qed_dev *cdev,
                                 struct qed_dev_eth_info *info)
 {
@@ -1665,24 +1888,50 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
 
        if (IS_PF(cdev)) {
                int max_vf_vlan_filters = 0;
+               int max_vf_mac_filters = 0;
 
                if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
-                       for_each_hwfn(cdev, i)
-                           info->num_queues +=
-                           FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
-                       if (cdev->int_params.fp_msix_cnt)
-                               info->num_queues =
-                                   min_t(u8, info->num_queues,
-                                         cdev->int_params.fp_msix_cnt);
+                       u16 num_queues = 0;
+
+                       /* Since the feature controls only queue-zones,
+                        * make sure we have the contexts [rx, tx, xdp] to
+                        * match.
+                        */
+                       for_each_hwfn(cdev, i) {
+                               struct qed_hwfn *hwfn = &cdev->hwfns[i];
+                               u16 l2_queues = (u16)FEAT_NUM(hwfn,
+                                                             QED_PF_L2_QUE);
+                               u16 cids;
+
+                               cids = hwfn->pf_params.eth_pf_params.num_cons;
+                               num_queues += min_t(u16, l2_queues, cids / 3);
+                       }
+
+                       /* queues might theoretically be >256, but interrupts'
+                        * upper-limit guarantes that it would fit in a u8.
+                        */
+                       if (cdev->int_params.fp_msix_cnt) {
+                               u8 irqs = cdev->int_params.fp_msix_cnt;
+
+                               info->num_queues = (u8)min_t(u16,
+                                                            num_queues, irqs);
+                       }
                } else {
                        info->num_queues = cdev->num_hwfns;
                }
 
-               if (IS_QED_SRIOV(cdev))
+               if (IS_QED_SRIOV(cdev)) {
                        max_vf_vlan_filters = cdev->p_iov_info->total_vfs *
                                              QED_ETH_VF_NUM_VLAN_FILTERS;
-               info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN) -
+                       max_vf_mac_filters = cdev->p_iov_info->total_vfs *
+                                            QED_ETH_VF_NUM_MAC_FILTERS;
+               }
+               info->num_vlan_filters = RESC_NUM(QED_LEADING_HWFN(cdev),
+                                                 QED_VLAN) -
                                         max_vf_vlan_filters;
+               info->num_mac_filters = RESC_NUM(QED_LEADING_HWFN(cdev),
+                                                QED_MAC) -
+                                       max_vf_mac_filters;
 
                ether_addr_copy(info->port_mac,
                                cdev->hwfns[0].hw_info.hw_mac_addr);
@@ -1696,14 +1945,18 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
                }
 
                qed_vf_get_num_vlan_filters(&cdev->hwfns[0],
-                                           &info->num_vlan_filters);
+                                           (u8 *)&info->num_vlan_filters);
+               qed_vf_get_num_mac_filters(&cdev->hwfns[0],
+                                          (u8 *)&info->num_mac_filters);
                qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac);
+
+               info->is_legacy = !!cdev->hwfns[0].vf_iov_info->b_pre_fp_hsi;
        }
 
        qed_fill_dev_info(cdev, &info->common);
 
        if (IS_VF(cdev))
-               memset(info->common.hw_mac, 0, ETH_ALEN);
+               eth_zero_addr(info->common.hw_mac);
 
        return 0;
 }
@@ -1743,6 +1996,7 @@ static int qed_start_vport(struct qed_dev *cdev,
                start.drop_ttl0 = params->drop_ttl0;
                start.opaque_fid = p_hwfn->hw_info.opaque_fid;
                start.concrete_fid = p_hwfn->hw_info.concrete_fid;
+               start.handle_ptp_pkts = params->handle_ptp_pkts;
                start.vport_id = params->vport_id;
                start.max_buffers_per_cqe = 16;
                start.mtu = params->mtu;
@@ -1753,7 +2007,11 @@ static int qed_start_vport(struct qed_dev *cdev,
                        return rc;
                }
 
-               qed_hw_start_fastpath(p_hwfn);
+               rc = qed_hw_start_fastpath(p_hwfn);
+               if (rc) {
+                       DP_ERR(cdev, "Failed to start VPORT fastpath\n");
+                       return rc;
+               }
 
                DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
                           "Started V-PORT %d with MTU %d\n",
@@ -1766,8 +2024,7 @@ static int qed_start_vport(struct qed_dev *cdev,
        return 0;
 }
 
-static int qed_stop_vport(struct qed_dev *cdev,
-                         u8 vport_id)
+static int qed_stop_vport(struct qed_dev *cdev, u8 vport_id)
 {
        int rc, i;
 
@@ -1775,8 +2032,7 @@ static int qed_stop_vport(struct qed_dev *cdev,
                struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 
                rc = qed_sp_vport_stop(p_hwfn,
-                                      p_hwfn->hw_info.opaque_fid,
-                                      vport_id);
+                                      p_hwfn->hw_info.opaque_fid, vport_id);
 
                if (rc) {
                        DP_ERR(cdev, "Failed to stop VPORT\n");
@@ -1786,25 +2042,89 @@ static int qed_stop_vport(struct qed_dev *cdev,
        return 0;
 }
 
+static int qed_update_vport_rss(struct qed_dev *cdev,
+                               struct qed_update_vport_rss_params *input,
+                               struct qed_rss_params *rss)
+{
+       int i, fn;
+
+       /* Update configuration with what's correct regardless of CMT */
+       rss->update_rss_config = 1;
+       rss->rss_enable = 1;
+       rss->update_rss_capabilities = 1;
+       rss->update_rss_ind_table = 1;
+       rss->update_rss_key = 1;
+       rss->rss_caps = input->rss_caps;
+       memcpy(rss->rss_key, input->rss_key, QED_RSS_KEY_SIZE * sizeof(u32));
+
+       /* In regular scenario, we'd simply need to take input handlers.
+        * But in CMT, we'd have to split the handlers according to the
+        * engine they were configured on. We'd then have to understand
+        * whether RSS is really required, since 2-queues on CMT doesn't
+        * require RSS.
+        */
+       if (cdev->num_hwfns == 1) {
+               memcpy(rss->rss_ind_table,
+                      input->rss_ind_table,
+                      QED_RSS_IND_TABLE_SIZE * sizeof(void *));
+               rss->rss_table_size_log = 7;
+               return 0;
+       }
+
+       /* Start by copying the non-spcific information to the 2nd copy */
+       memcpy(&rss[1], &rss[0], sizeof(struct qed_rss_params));
+
+       /* CMT should be round-robin */
+       for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+               struct qed_queue_cid *cid = input->rss_ind_table[i];
+               struct qed_rss_params *t_rss;
+
+               if (cid->p_owner == QED_LEADING_HWFN(cdev))
+                       t_rss = &rss[0];
+               else
+                       t_rss = &rss[1];
+
+               t_rss->rss_ind_table[i / cdev->num_hwfns] = cid;
+       }
+
+       /* Make sure RSS is actually required */
+       for_each_hwfn(cdev, fn) {
+               for (i = 1; i < QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns; i++) {
+                       if (rss[fn].rss_ind_table[i] !=
+                           rss[fn].rss_ind_table[0])
+                               break;
+               }
+               if (i == QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns) {
+                       DP_VERBOSE(cdev, NETIF_MSG_IFUP,
+                                  "CMT - 1 queue per-hwfn; Disabling RSS\n");
+                       return -EINVAL;
+               }
+               rss[fn].rss_table_size_log = 6;
+       }
+
+       return 0;
+}
+
 static int qed_update_vport(struct qed_dev *cdev,
                            struct qed_update_vport_params *params)
 {
        struct qed_sp_vport_update_params sp_params;
-       struct qed_rss_params sp_rss_params;
-       int rc, i;
+       struct qed_rss_params *rss;
+       int rc = 0, i;
 
        if (!cdev)
                return -ENODEV;
 
+       rss = vzalloc(sizeof(*rss) * cdev->num_hwfns);
+       if (!rss)
+               return -ENOMEM;
+
        memset(&sp_params, 0, sizeof(sp_params));
-       memset(&sp_rss_params, 0, sizeof(sp_rss_params));
 
        /* Translate protocol params into sp params */
        sp_params.vport_id = params->vport_id;
-       sp_params.update_vport_active_rx_flg =
-               params->update_vport_active_flg;
-       sp_params.update_vport_active_tx_flg =
-               params->update_vport_active_flg;
+       sp_params.update_vport_active_rx_flg = params->update_vport_active_flg;
+       sp_params.update_vport_active_tx_flg = params->update_vport_active_flg;
        sp_params.vport_active_rx_flg = params->vport_active_flg;
        sp_params.vport_active_tx_flg = params->vport_active_flg;
        sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
@@ -1813,67 +2133,24 @@ static int qed_update_vport(struct qed_dev *cdev,
        sp_params.update_accept_any_vlan_flg =
                params->update_accept_any_vlan_flg;
 
-       /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
-        * We need to re-fix the rss values per engine for CMT.
-        */
-       if (cdev->num_hwfns > 1 && params->update_rss_flg) {
-               struct qed_update_vport_rss_params *rss =
-                       &params->rss_params;
-               int k, max = 0;
-
-               /* Find largest entry, since it's possible RSS needs to
-                * be disabled [in case only 1 queue per-hwfn]
-                */
-               for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++)
-                       max = (max > rss->rss_ind_table[k]) ?
-                               max : rss->rss_ind_table[k];
-
-               /* Either fix RSS values or disable RSS */
-               if (cdev->num_hwfns < max + 1) {
-                       int divisor = (max + cdev->num_hwfns - 1) /
-                               cdev->num_hwfns;
-
-                       DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
-                                  "CMT - fixing RSS values (modulo %02x)\n",
-                                  divisor);
-
-                       for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++)
-                               rss->rss_ind_table[k] =
-                                       rss->rss_ind_table[k] % divisor;
-               } else {
-                       DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
-                                  "CMT - 1 queue per-hwfn; Disabling RSS\n");
+       /* Prepare the RSS configuration */
+       if (params->update_rss_flg)
+               if (qed_update_vport_rss(cdev, &params->rss_params, rss))
                        params->update_rss_flg = 0;
-               }
-       }
-
-       /* Now, update the RSS configuration for actual configuration */
-       if (params->update_rss_flg) {
-               sp_rss_params.update_rss_config = 1;
-               sp_rss_params.rss_enable = 1;
-               sp_rss_params.update_rss_capabilities = 1;
-               sp_rss_params.update_rss_ind_table = 1;
-               sp_rss_params.update_rss_key = 1;
-               sp_rss_params.rss_caps = params->rss_params.rss_caps;
-               sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */
-               memcpy(sp_rss_params.rss_ind_table,
-                      params->rss_params.rss_ind_table,
-                      QED_RSS_IND_TABLE_SIZE * sizeof(u16));
-               memcpy(sp_rss_params.rss_key, params->rss_params.rss_key,
-                      QED_RSS_KEY_SIZE * sizeof(u32));
-       }
-       sp_params.rss_params = &sp_rss_params;
 
        for_each_hwfn(cdev, i) {
                struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 
+               if (params->update_rss_flg)
+                       sp_params.rss_params = &rss[i];
+
                sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
                rc = qed_sp_vport_update(p_hwfn, &sp_params,
                                         QED_SPQ_MODE_EBLOCK,
                                         NULL);
                if (rc) {
                        DP_ERR(cdev, "Failed to update VPORT\n");
-                       return rc;
+                       goto out;
                }
 
                DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
@@ -1882,63 +2159,59 @@ static int qed_update_vport(struct qed_dev *cdev,
                           params->update_vport_active_flg);
        }
 
-       return 0;
+out:
+       vfree(rss);
+       return rc;
 }
 
 static int qed_start_rxq(struct qed_dev *cdev,
-                        struct qed_queue_start_common_params *params,
+                        u8 rss_num,
+                        struct qed_queue_start_common_params *p_params,
                         u16 bd_max_bytes,
                         dma_addr_t bd_chain_phys_addr,
                         dma_addr_t cqe_pbl_addr,
                         u16 cqe_pbl_size,
-                        void __iomem **pp_prod)
+                        struct qed_rxq_start_ret_params *ret_params)
 {
-       int rc, hwfn_index;
        struct qed_hwfn *p_hwfn;
+       int rc, hwfn_index;
 
-       hwfn_index = params->rss_id % cdev->num_hwfns;
+       hwfn_index = rss_num % cdev->num_hwfns;
        p_hwfn = &cdev->hwfns[hwfn_index];
 
-       /* Fix queue ID in 100g mode */
-       params->queue_id /= cdev->num_hwfns;
-
-       rc = qed_sp_eth_rx_queue_start(p_hwfn,
-                                      p_hwfn->hw_info.opaque_fid,
-                                      params,
-                                      bd_max_bytes,
-                                      bd_chain_phys_addr,
-                                      cqe_pbl_addr,
-                                      cqe_pbl_size,
-                                      pp_prod);
+       p_params->queue_id = p_params->queue_id / cdev->num_hwfns;
+       p_params->stats_id = p_params->vport_id;
 
+       rc = qed_eth_rx_queue_start(p_hwfn,
+                                   p_hwfn->hw_info.opaque_fid,
+                                   p_params,
+                                   bd_max_bytes,
+                                   bd_chain_phys_addr,
+                                   cqe_pbl_addr, cqe_pbl_size, ret_params);
        if (rc) {
-               DP_ERR(cdev, "Failed to start RXQ#%d\n", params->queue_id);
+               DP_ERR(cdev, "Failed to start RXQ#%d\n", p_params->queue_id);
                return rc;
        }
 
        DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
-                  "Started RX-Q %d [rss %d] on V-PORT %d and SB %d\n",
-                  params->queue_id, params->rss_id, params->vport_id,
-                  params->sb);
+                  "Started RX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
+                  p_params->queue_id, rss_num, p_params->vport_id,
+                  p_params->sb);
 
        return 0;
 }
 
-static int qed_stop_rxq(struct qed_dev *cdev,
-                       struct qed_stop_rxq_params *params)
+static int qed_stop_rxq(struct qed_dev *cdev, u8 rss_id, void *handle)
 {
        int rc, hwfn_index;
        struct qed_hwfn *p_hwfn;
 
-       hwfn_index      = params->rss_id % cdev->num_hwfns;
-       p_hwfn          = &cdev->hwfns[hwfn_index];
+       hwfn_index rss_id % cdev->num_hwfns;
+       p_hwfn = &cdev->hwfns[hwfn_index];
 
-       rc = qed_sp_eth_rx_queue_stop(p_hwfn,
-                                     params->rx_queue_id / cdev->num_hwfns,
-                                     params->eq_completion_only,
-                                     false);
+       rc = qed_eth_rx_queue_stop(p_hwfn, handle, false, false);
        if (rc) {
-               DP_ERR(cdev, "Failed to stop RXQ#%d\n", params->rx_queue_id);
+               DP_ERR(cdev, "Failed to stop RXQ#%02x\n", rss_id);
                return rc;
        }
 
@@ -1946,26 +2219,24 @@ static int qed_stop_rxq(struct qed_dev *cdev,
 }
 
 static int qed_start_txq(struct qed_dev *cdev,
+                        u8 rss_num,
                         struct qed_queue_start_common_params *p_params,
                         dma_addr_t pbl_addr,
                         u16 pbl_size,
-                        void __iomem **pp_doorbell)
+                        struct qed_txq_start_ret_params *ret_params)
 {
        struct qed_hwfn *p_hwfn;
        int rc, hwfn_index;
 
-       hwfn_index      = p_params->rss_id % cdev->num_hwfns;
-       p_hwfn          = &cdev->hwfns[hwfn_index];
-
-       /* Fix queue ID in 100g mode */
-       p_params->queue_id /= cdev->num_hwfns;
+       hwfn_index = rss_num % cdev->num_hwfns;
+       p_hwfn = &cdev->hwfns[hwfn_index];
+       p_params->queue_id = p_params->queue_id / cdev->num_hwfns;
+       p_params->stats_id = p_params->vport_id;
 
-       rc = qed_sp_eth_tx_queue_start(p_hwfn,
-                                      p_hwfn->hw_info.opaque_fid,
-                                      p_params,
-                                      pbl_addr,
-                                      pbl_size,
-                                      pp_doorbell);
+       rc = qed_eth_tx_queue_start(p_hwfn,
+                                   p_hwfn->hw_info.opaque_fid,
+                                   p_params, 0,
+                                   pbl_addr, pbl_size, ret_params);
 
        if (rc) {
                DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id);
@@ -1973,8 +2244,8 @@ static int qed_start_txq(struct qed_dev *cdev,
        }
 
        DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
-                  "Started TX-Q %d [rss %d] on V-PORT %d and SB %d\n",
-                  p_params->queue_id, p_params->rss_id, p_params->vport_id,
+                  "Started TX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
+                  p_params->queue_id, rss_num, p_params->vport_id,
                   p_params->sb);
 
        return 0;
@@ -1983,24 +2254,28 @@ static int qed_start_txq(struct qed_dev *cdev,
 #define QED_HW_STOP_RETRY_LIMIT (10)
 static int qed_fastpath_stop(struct qed_dev *cdev)
 {
-       qed_hw_stop_fastpath(cdev);
+       int rc;
+
+       rc = qed_hw_stop_fastpath(cdev);
+       if (rc) {
+               DP_ERR(cdev, "Failed to stop Fastpath\n");
+               return rc;
+       }
 
        return 0;
 }
 
-static int qed_stop_txq(struct qed_dev *cdev,
-                       struct qed_stop_txq_params *params)
+static int qed_stop_txq(struct qed_dev *cdev, u8 rss_id, void *handle)
 {
        struct qed_hwfn *p_hwfn;
        int rc, hwfn_index;
 
-       hwfn_index      = params->rss_id % cdev->num_hwfns;
-       p_hwfn          = &cdev->hwfns[hwfn_index];
+       hwfn_index rss_id % cdev->num_hwfns;
+       p_hwfn = &cdev->hwfns[hwfn_index];
 
-       rc = qed_sp_eth_tx_queue_stop(p_hwfn,
-                                     params->tx_queue_id / cdev->num_hwfns);
+       rc = qed_eth_tx_queue_stop(p_hwfn, handle);
        if (rc) {
-               DP_ERR(cdev, "Failed to stop TXQ#%d\n", params->tx_queue_id);
+               DP_ERR(cdev, "Failed to stop TXQ#%02x\n", rss_id);
                return rc;
        }
 
@@ -2047,20 +2322,23 @@ static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
 
        memset(&accept_flags, 0, sizeof(accept_flags));
 
-       accept_flags.update_rx_mode_config      = 1;
-       accept_flags.update_tx_mode_config      = 1;
-       accept_flags.rx_accept_filter           = QED_ACCEPT_UCAST_MATCHED |
-                                                 QED_ACCEPT_MCAST_MATCHED |
-                                                 QED_ACCEPT_BCAST;
+       accept_flags.update_rx_mode_config = 1;
+       accept_flags.update_tx_mode_config = 1;
+       accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
+                                       QED_ACCEPT_MCAST_MATCHED |
+                                       QED_ACCEPT_BCAST;
        accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
                                        QED_ACCEPT_MCAST_MATCHED |
                                        QED_ACCEPT_BCAST;
 
-       if (type == QED_FILTER_RX_MODE_TYPE_PROMISC)
+       if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
                accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
                                                 QED_ACCEPT_MCAST_UNMATCHED;
-       else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC)
+               accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
+       } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
                accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
+               accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
+       }
 
        return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false,
                                     QED_SPQ_MODE_CB, NULL);
@@ -2072,9 +2350,8 @@ static int qed_configure_filter_ucast(struct qed_dev *cdev,
        struct qed_filter_ucast ucast;
 
        if (!params->vlan_valid && !params->mac_valid) {
-               DP_NOTICE(
-                       cdev,
-                       "Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
+               DP_NOTICE(cdev,
+                         "Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
                return -EINVAL;
        }
 
@@ -2135,8 +2412,7 @@ static int qed_configure_filter_mcast(struct qed_dev *cdev,
        for (i = 0; i < mcast.num_mc_addrs; i++)
                ether_addr_copy(mcast.mac[i], params->mac[i]);
 
-       return qed_filter_mcast_cmd(cdev, &mcast,
-                                   QED_SPQ_MODE_CB, NULL);
+       return qed_filter_mcast_cmd(cdev, &mcast, QED_SPQ_MODE_CB, NULL);
 }
 
 static int qed_configure_filter(struct qed_dev *cdev,
@@ -2153,15 +2429,66 @@ static int qed_configure_filter(struct qed_dev *cdev,
                accept_flags = params->filter.accept_flags;
                return qed_configure_filter_rx_mode(cdev, accept_flags);
        default:
-               DP_NOTICE(cdev, "Unknown filter type %d\n",
-                         (int)params->type);
+               DP_NOTICE(cdev, "Unknown filter type %d\n", (int)params->type);
                return -EINVAL;
        }
 }
 
+static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher)
+{
+       struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_arfs_config_params arfs_config_params;
+
+       memset(&arfs_config_params, 0, sizeof(arfs_config_params));
+       arfs_config_params.tcp = true;
+       arfs_config_params.udp = true;
+       arfs_config_params.ipv4 = true;
+       arfs_config_params.ipv6 = true;
+       arfs_config_params.arfs_enable = en_searcher;
+
+       qed_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
+                               &arfs_config_params);
+       return 0;
+}
+
+static void
+qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn,
+                            void *cookie, union event_ring_data *data,
+                            u8 fw_return_code)
+{
+       struct qed_common_cb_ops *op = p_hwfn->cdev->protocol_ops.common;
+       void *dev = p_hwfn->cdev->ops_cookie;
+
+       op->arfs_filter_op(dev, cookie, fw_return_code);
+}
+
+static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie,
+                                        dma_addr_t mapping, u16 length,
+                                        u16 vport_id, u16 rx_queue_id,
+                                        bool add_filter)
+{
+       struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_spq_comp_cb cb;
+       int rc = -EINVAL;
+
+       cb.function = qed_arfs_sp_response_handler;
+       cb.cookie = cookie;
+
+       rc = qed_configure_rfs_ntuple_filter(p_hwfn, p_hwfn->p_arfs_ptt,
+                                            &cb, mapping, length, rx_queue_id,
+                                            vport_id, add_filter);
+       if (rc)
+               DP_NOTICE(p_hwfn,
+                         "Failed to issue a-RFS filter configuration\n");
+       else
+               DP_VERBOSE(p_hwfn, NETIF_MSG_DRV,
+                          "Successfully issued a-RFS filter configuration\n");
+
+       return rc;
+}
+
 static int qed_fp_cqe_completion(struct qed_dev *dev,
-                                u8 rss_id,
-                                struct eth_slow_path_rx_cqe *cqe)
+                                u8 rss_id, struct eth_slow_path_rx_cqe *cqe)
 {
        return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns],
                                      cqe);
@@ -2175,6 +2502,8 @@ extern const struct qed_iov_hv_ops qed_iov_ops_pass;
 extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass;
 #endif
 
+extern const struct qed_eth_ptp_ops qed_ptp_ops_pass;
+
 static const struct qed_eth_ops qed_eth_ops_pass = {
        .common = &qed_common_ops_pass,
 #ifdef CONFIG_QED_SRIOV
@@ -2183,6 +2512,7 @@ static const struct qed_eth_ops qed_eth_ops_pass = {
 #ifdef CONFIG_DCB
        .dcb = &qed_dcbnl_ops_pass,
 #endif
+       .ptp = &qed_ptp_ops_pass,
        .fill_dev_info = &qed_fill_eth_dev_info,
        .register_ops = &qed_register_eth_ops,
        .check_mac = &qed_check_mac,
@@ -2198,6 +2528,8 @@ static const struct qed_eth_ops qed_eth_ops_pass = {
        .eth_cqe_completion = &qed_fp_cqe_completion,
        .get_vport_stats = &qed_get_vport_stats,
        .tunn_config = &qed_tunn_configure,
+       .ntuple_filter_config = &qed_ntuple_arfs_filter_config,
+       .configure_arfs_searcher = &qed_configure_arfs_searcher,
 };
 
 const struct qed_eth_ops *qed_get_eth_ops(void)
index 0021145434511f445c8c973d8df963fe29fc9f35..6f44229899ebc06acdcfa44fb8dab7b6e625006f 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 #ifndef _QED_L2_H
 #define _QED_L2_H
 #include "qed.h"
 #include "qed_hw.h"
 #include "qed_sp.h"
+struct qed_rss_params {
+       u8 update_rss_config;
+       u8 rss_enable;
+       u8 rss_eng_id;
+       u8 update_rss_capabilities;
+       u8 update_rss_ind_table;
+       u8 update_rss_key;
+       u8 rss_caps;
+       u8 rss_table_size_log;
+
+       /* Indirection table consist of rx queue handles */
+       void *rss_ind_table[QED_RSS_IND_TABLE_SIZE];
+       u32 rss_key[QED_RSS_KEY_SIZE];
+};
 
 struct qed_sge_tpa_params {
        u8 max_buffers_per_cqe;
@@ -78,11 +116,34 @@ struct qed_filter_mcast {
        unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN];
 };
 
-int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
-                            u16 rx_queue_id,
-                            bool eq_completion_only, bool cqe_completion);
+/**
+ * @brief qed_eth_rx_queue_stop - This ramrod closes an Rx queue
+ *
+ * @param p_hwfn
+ * @param p_rxq                        Handler of queue to close
+ * @param eq_completion_only   If True completion will be on
+ *                             EQe, if False completion will be
+ *                             on EQe if p_hwfn opaque
+ *                             different from the RXQ opaque
+ *                             otherwise on CQe.
+ * @param cqe_completion       If True completion will be
+ *                             receive on CQe.
+ * @return int
+ */
+int
+qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
+                     void *p_rxq,
+                     bool eq_completion_only, bool cqe_completion);
 
-int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id);
+/**
+ * @brief qed_eth_tx_queue_stop - closes a Tx queue
+ *
+ * @param p_hwfn
+ * @param p_txq - handle to Tx queue needed to be closed
+ *
+ * @return int
+ */
+int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_txq);
 
 enum qed_tpa_mode {
        QED_TPA_MODE_NONE,
@@ -95,6 +156,7 @@ struct qed_sp_vport_start_params {
        enum qed_tpa_mode tpa_mode;
        bool remove_inner_vlan;
        bool tx_switching;
+       bool handle_ptp_pkts;
        bool only_untagged;
        bool drop_ttl0;
        u8 max_buffers_per_cqe;
@@ -102,23 +164,13 @@ struct qed_sp_vport_start_params {
        u16 opaque_fid;
        u8 vport_id;
        u16 mtu;
+       bool check_mac;
+       bool check_ethtype;
 };
 
 int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
                           struct qed_sp_vport_start_params *p_params);
 
-struct qed_rss_params {
-       u8      update_rss_config;
-       u8      rss_enable;
-       u8      rss_eng_id;
-       u8      update_rss_capabilities;
-       u8      update_rss_ind_table;
-       u8      update_rss_key;
-       u8      rss_caps;
-       u8      rss_table_size_log;
-       u16     rss_ind_table[QED_RSS_IND_TABLE_SIZE];
-       u32     rss_key[QED_RSS_KEY_SIZE];
-};
 
 struct qed_filter_accept_flags {
        u8      update_rx_mode_config;
@@ -133,6 +185,14 @@ struct qed_filter_accept_flags {
 #define QED_ACCEPT_BCAST                0x20
 };
 
+struct qed_arfs_config_params {
+       bool tcp;
+       bool udp;
+       bool ipv4;
+       bool ipv6;
+       bool arfs_enable;
+};
+
 struct qed_sp_vport_update_params {
        u16                             opaque_fid;
        u8                              vport_id;
@@ -194,45 +254,102 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
  * @note At the moment - only used by non-linux VFs.
  *
  * @param p_hwfn
- * @param rx_queue_id          RX Queue ID
- * @param num_rxqs             Allow to update multiple rx
- *                             queues, from rx_queue_id to
- *                             (rx_queue_id + num_rxqs)
+ * @param pp_rxq_handlers      An array of queue handlers to be updated.
+ * @param num_rxqs              number of queues to update.
  * @param complete_cqe_flg     Post completion to the CQE Ring if set
  * @param complete_event_flg   Post completion to the Event Ring if set
+ * @param comp_mode
+ * @param p_comp_data
  *
  * @return int
  */
 
 int
 qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
-                           u16 rx_queue_id,
+                           void **pp_rxq_handlers,
                            u8 num_rxqs,
                            u8 complete_cqe_flg,
                            u8 complete_event_flg,
                            enum spq_mode comp_mode,
                            struct qed_spq_comp_cb *p_comp_data);
 
-int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
-                          struct qed_sp_vport_start_params *p_params);
+void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats);
+
+void qed_reset_vport_stats(struct qed_dev *cdev);
 
-int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
-                               u16 opaque_fid,
-                               u32 cid,
-                               struct qed_queue_start_common_params *params,
-                               u8 stats_id,
-                               u16 bd_max_bytes,
-                               dma_addr_t bd_chain_phys_addr,
-                               dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size);
-
-int qed_sp_eth_txq_start_ramrod(struct qed_hwfn  *p_hwfn,
-                               u16  opaque_fid,
-                               u32  cid,
-                               struct qed_queue_start_common_params *p_params,
-                               u8  stats_id,
-                               dma_addr_t pbl_addr,
-                               u16 pbl_size,
-                               union qed_qm_pq_params *p_pq_params);
+struct qed_queue_cid {
+       /* 'Relative' is a relative term ;-). Usually the indices [not counting
+        * SBs] would be PF-relative, but there are some cases where that isn't
+        * the case - specifically for a PF configuring its VF indices it's
+        * possible some fields [E.g., stats-id] in 'rel' would already be abs.
+        */
+       struct qed_queue_start_common_params rel;
+       struct qed_queue_start_common_params abs;
+       u32 cid;
+       u16 opaque_fid;
+
+       /* VFs queues are mapped differently, so we need to know the
+        * relative queue associated with them [0-based].
+        * Notice this is relevant on the *PF* queue-cid of its VF's queues,
+        * and not on the VF itself.
+        */
+       bool is_vf;
+       u8 vf_qid;
+
+       /* Legacy VFs might have Rx producer located elsewhere */
+       bool b_legacy_vf;
+
+       struct qed_hwfn *p_owner;
+};
+
+void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn,
+                              struct qed_queue_cid *p_cid);
+
+struct qed_queue_cid *_qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
+                                           u16 opaque_fid,
+                                           u32 cid,
+                                           u8 vf_qid,
+                                           struct qed_queue_start_common_params
+                                           *p_params);
+
+int
+qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
+                      struct qed_sp_vport_start_params *p_params);
+
+/**
+ * @brief - Starts an Rx queue, when queue_cid is already prepared
+ *
+ * @param p_hwfn
+ * @param p_cid
+ * @param bd_max_bytes
+ * @param bd_chain_phys_addr
+ * @param cqe_pbl_addr
+ * @param cqe_pbl_size
+ *
+ * @return int
+ */
+int
+qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
+                        struct qed_queue_cid *p_cid,
+                        u16 bd_max_bytes,
+                        dma_addr_t bd_chain_phys_addr,
+                        dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size);
+
+/**
+ * @brief - Starts a Tx queue, where queue_cid is already prepared
+ *
+ * @param p_hwfn
+ * @param p_cid
+ * @param pbl_addr
+ * @param pbl_size
+ * @param p_pq_params - parameters for choosing the PQ for this Tx queue
+ *
+ * @return int
+ */
+int
+qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
+                        struct qed_queue_cid *p_cid,
+                        dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id);
 
 u8 qed_mcast_bin_from_mac(u8 *mac);
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
new file mode 100644 (file)
index 0000000..3bd1195
--- /dev/null
@@ -0,0 +1,2355 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/dma-mapping.h>
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <net/ipv6.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/qed/qed_ll2_if.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_ll2.h"
+#include "qed_mcp.h"
+#include "qed_ooo.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+#include "qed_roce.h"
+
+#define QED_LL2_RX_REGISTERED(ll2)     ((ll2)->rx_queue.b_cb_registred)
+#define QED_LL2_TX_REGISTERED(ll2)     ((ll2)->tx_queue.b_cb_registred)
+
+#define QED_LL2_TX_SIZE (256)
+#define QED_LL2_RX_SIZE (4096)
+
+struct qed_cb_ll2_info {
+       int rx_cnt;
+       u32 rx_size;
+       u8 handle;
+       bool frags_mapped;
+
+       /* Lock protecting LL2 buffer lists in sleepless context */
+       spinlock_t lock;
+       struct list_head list;
+
+       const struct qed_ll2_cb_ops *cbs;
+       void *cb_cookie;
+};
+
+struct qed_ll2_buffer {
+       struct list_head list;
+       void *data;
+       dma_addr_t phys_addr;
+};
+
+static void qed_ll2b_complete_tx_packet(struct qed_hwfn *p_hwfn,
+                                       u8 connection_handle,
+                                       void *cookie,
+                                       dma_addr_t first_frag_addr,
+                                       bool b_last_fragment,
+                                       bool b_last_packet)
+{
+       struct qed_dev *cdev = p_hwfn->cdev;
+       struct sk_buff *skb = cookie;
+
+       /* All we need to do is release the mapping */
+       dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr,
+                        skb_headlen(skb), DMA_TO_DEVICE);
+
+       if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb)
+               cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
+                                     b_last_fragment);
+
+       if (cdev->ll2->frags_mapped)
+               /* Case where mapped frags were received, need to
+                * free skb with nr_frags marked as 0
+                */
+               skb_shinfo(skb)->nr_frags = 0;
+
+       dev_kfree_skb_any(skb);
+}
+
+static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
+                               u8 **data, dma_addr_t *phys_addr)
+{
+       *data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
+       if (!(*data)) {
+               DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
+               return -ENOMEM;
+       }
+
+       *phys_addr = dma_map_single(&cdev->pdev->dev,
+                                   ((*data) + NET_SKB_PAD),
+                                   cdev->ll2->rx_size, DMA_FROM_DEVICE);
+       if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) {
+               DP_INFO(cdev, "Failed to map LL2 buffer data\n");
+               kfree((*data));
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
+                                struct qed_ll2_buffer *buffer)
+{
+       spin_lock_bh(&cdev->ll2->lock);
+
+       dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
+                        cdev->ll2->rx_size, DMA_FROM_DEVICE);
+       kfree(buffer->data);
+       list_del(&buffer->list);
+
+       cdev->ll2->rx_cnt--;
+       if (!cdev->ll2->rx_cnt)
+               DP_INFO(cdev, "All LL2 entries were removed\n");
+
+       spin_unlock_bh(&cdev->ll2->lock);
+
+       return 0;
+}
+
+static void qed_ll2_kill_buffers(struct qed_dev *cdev)
+{
+       struct qed_ll2_buffer *buffer, *tmp_buffer;
+
+       list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list)
+               qed_ll2_dealloc_buffer(cdev, buffer);
+}
+
+static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
+                                       u8 connection_handle,
+                                       struct qed_ll2_rx_packet *p_pkt,
+                                       struct core_rx_fast_path_cqe *p_cqe,
+                                       bool b_last_packet)
+{
+       u16 packet_length = le16_to_cpu(p_cqe->packet_length);
+       struct qed_ll2_buffer *buffer = p_pkt->cookie;
+       struct qed_dev *cdev = p_hwfn->cdev;
+       u16 vlan = le16_to_cpu(p_cqe->vlan);
+       u32 opaque_data_0, opaque_data_1;
+       u8 pad = p_cqe->placement_offset;
+       dma_addr_t new_phys_addr;
+       struct sk_buff *skb;
+       bool reuse = false;
+       int rc = -EINVAL;
+       u8 *new_data;
+
+       opaque_data_0 = le32_to_cpu(p_cqe->opaque_data.data[0]);
+       opaque_data_1 = le32_to_cpu(p_cqe->opaque_data.data[1]);
+
+       DP_VERBOSE(p_hwfn,
+                  (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
+                  "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
+                  (u64)p_pkt->rx_buf_addr, pad, packet_length,
+                  le16_to_cpu(p_cqe->parse_flags.flags), vlan,
+                  opaque_data_0, opaque_data_1);
+
+       if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
+               print_hex_dump(KERN_INFO, "",
+                              DUMP_PREFIX_OFFSET, 16, 1,
+                              buffer->data, packet_length, false);
+       }
+
+       /* Determine if data is valid */
+       if (packet_length < ETH_HLEN)
+               reuse = true;
+
+       /* Allocate a replacement for buffer; Reuse upon failure */
+       if (!reuse)
+               rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data,
+                                         &new_phys_addr);
+
+       /* If need to reuse or there's no replacement buffer, repost this */
+       if (rc)
+               goto out_post;
+       dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
+                        cdev->ll2->rx_size, DMA_FROM_DEVICE);
+
+       skb = build_skb(buffer->data, 0);
+       if (!skb) {
+               rc = -ENOMEM;
+               goto out_post;
+       }
+
+       pad += NET_SKB_PAD;
+       skb_reserve(skb, pad);
+       skb_put(skb, packet_length);
+       skb_checksum_none_assert(skb);
+
+       /* Get parital ethernet information instead of eth_type_trans(),
+        * Since we don't have an associated net_device.
+        */
+       skb_reset_mac_header(skb);
+       skb->protocol = eth_hdr(skb)->h_proto;
+
+       /* Pass SKB onward */
+       if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
+               if (vlan)
+                       __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
+               cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
+                                     opaque_data_0, opaque_data_1);
+       }
+
+       /* Update Buffer information and update FW producer */
+       buffer->data = new_data;
+       buffer->phys_addr = new_phys_addr;
+
+out_post:
+       rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev), cdev->ll2->handle,
+                                   buffer->phys_addr, 0,  buffer, 1);
+
+       if (rc)
+               qed_ll2_dealloc_buffer(cdev, buffer);
+}
+
+static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
+                                                   u8 connection_handle,
+                                                   bool b_lock,
+                                                   bool b_only_active)
+{
+       struct qed_ll2_info *p_ll2_conn, *p_ret = NULL;
+
+       if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS)
+               return NULL;
+
+       if (!p_hwfn->p_ll2_info)
+               return NULL;
+
+       p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
+
+       if (b_only_active) {
+               if (b_lock)
+                       mutex_lock(&p_ll2_conn->mutex);
+               if (p_ll2_conn->b_active)
+                       p_ret = p_ll2_conn;
+               if (b_lock)
+                       mutex_unlock(&p_ll2_conn->mutex);
+       } else {
+               p_ret = p_ll2_conn;
+       }
+
+       return p_ret;
+}
+
+static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
+                                                 u8 connection_handle)
+{
+       return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true);
+}
+
+static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn,
+                                                      u8 connection_handle)
+{
+       return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true);
+}
+
+static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn
+                                                          *p_hwfn,
+                                                          u8 connection_handle)
+{
+       return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false);
+}
+
+static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
+{
+       bool b_last_packet = false, b_last_frag = false;
+       struct qed_ll2_tx_packet *p_pkt = NULL;
+       struct qed_ll2_info *p_ll2_conn;
+       struct qed_ll2_tx_queue *p_tx;
+       dma_addr_t tx_frag;
+
+       p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
+       if (!p_ll2_conn)
+               return;
+
+       p_tx = &p_ll2_conn->tx_queue;
+
+       while (!list_empty(&p_tx->active_descq)) {
+               p_pkt = list_first_entry(&p_tx->active_descq,
+                                        struct qed_ll2_tx_packet, list_entry);
+               if (!p_pkt)
+                       break;
+
+               list_del(&p_pkt->list_entry);
+               b_last_packet = list_empty(&p_tx->active_descq);
+               list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
+               if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
+                       struct qed_ooo_buffer *p_buffer;
+
+                       p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
+                       qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
+                                               p_buffer);
+               } else {
+                       p_tx->cur_completing_packet = *p_pkt;
+                       p_tx->cur_completing_bd_idx = 1;
+                       b_last_frag =
+                               p_tx->cur_completing_bd_idx == p_pkt->bd_used;
+                       tx_frag = p_pkt->bds_set[0].tx_frag;
+                       if (p_ll2_conn->conn.gsi_enable)
+                               qed_ll2b_release_tx_gsi_packet(p_hwfn,
+                                                              p_ll2_conn->
+                                                              my_id,
+                                                              p_pkt->cookie,
+                                                              tx_frag,
+                                                              b_last_frag,
+                                                              b_last_packet);
+                       else
+                               qed_ll2b_complete_tx_packet(p_hwfn,
+                                                           p_ll2_conn->my_id,
+                                                           p_pkt->cookie,
+                                                           tx_frag,
+                                                           b_last_frag,
+                                                           b_last_packet);
+               }
+       }
+}
+
+static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
+{
+       struct qed_ll2_info *p_ll2_conn = p_cookie;
+       struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
+       u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0;
+       struct qed_ll2_tx_packet *p_pkt;
+       bool b_last_frag = false;
+       unsigned long flags;
+       dma_addr_t tx_frag;
+       int rc = -EINVAL;
+
+       spin_lock_irqsave(&p_tx->lock, flags);
+       if (p_tx->b_completing_packet) {
+               rc = -EBUSY;
+               goto out;
+       }
+
+       new_idx = le16_to_cpu(*p_tx->p_fw_cons);
+       num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
+       while (num_bds) {
+               if (list_empty(&p_tx->active_descq))
+                       goto out;
+
+               p_pkt = list_first_entry(&p_tx->active_descq,
+                                        struct qed_ll2_tx_packet, list_entry);
+               if (!p_pkt)
+                       goto out;
+
+               p_tx->b_completing_packet = true;
+               p_tx->cur_completing_packet = *p_pkt;
+               num_bds_in_packet = p_pkt->bd_used;
+               list_del(&p_pkt->list_entry);
+
+               if (num_bds < num_bds_in_packet) {
+                       DP_NOTICE(p_hwfn,
+                                 "Rest of BDs does not cover whole packet\n");
+                       goto out;
+               }
+
+               num_bds -= num_bds_in_packet;
+               p_tx->bds_idx += num_bds_in_packet;
+               while (num_bds_in_packet--)
+                       qed_chain_consume(&p_tx->txq_chain);
+
+               p_tx->cur_completing_bd_idx = 1;
+               b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
+               list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
+
+               spin_unlock_irqrestore(&p_tx->lock, flags);
+               tx_frag = p_pkt->bds_set[0].tx_frag;
+               if (p_ll2_conn->conn.gsi_enable)
+                       qed_ll2b_complete_tx_gsi_packet(p_hwfn,
+                                                       p_ll2_conn->my_id,
+                                                       p_pkt->cookie,
+                                                       tx_frag,
+                                                       b_last_frag, !num_bds);
+               else
+                       qed_ll2b_complete_tx_packet(p_hwfn,
+                                                   p_ll2_conn->my_id,
+                                                   p_pkt->cookie,
+                                                   tx_frag,
+                                                   b_last_frag, !num_bds);
+               spin_lock_irqsave(&p_tx->lock, flags);
+       }
+
+       p_tx->b_completing_packet = false;
+       rc = 0;
+out:
+       spin_unlock_irqrestore(&p_tx->lock, flags);
+       return rc;
+}
+
+static int
+qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn,
+                          struct qed_ll2_info *p_ll2_info,
+                          union core_rx_cqe_union *p_cqe,
+                          unsigned long lock_flags, bool b_last_cqe)
+{
+       struct qed_ll2_rx_queue *p_rx = &p_ll2_info->rx_queue;
+       struct qed_ll2_rx_packet *p_pkt = NULL;
+       u16 packet_length, parse_flags, vlan;
+       u32 src_mac_addrhi;
+       u16 src_mac_addrlo;
+
+       if (!list_empty(&p_rx->active_descq))
+               p_pkt = list_first_entry(&p_rx->active_descq,
+                                        struct qed_ll2_rx_packet, list_entry);
+       if (!p_pkt) {
+               DP_NOTICE(p_hwfn,
+                         "GSI Rx completion but active_descq is empty\n");
+               return -EIO;
+       }
+
+       list_del(&p_pkt->list_entry);
+       parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
+       packet_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
+       vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
+       src_mac_addrhi = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
+       src_mac_addrlo = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
+       if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
+               DP_NOTICE(p_hwfn,
+                         "Mismatch between active_descq and the LL2 Rx chain\n");
+       list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
+
+       spin_unlock_irqrestore(&p_rx->lock, lock_flags);
+       qed_ll2b_complete_rx_gsi_packet(p_hwfn,
+                                       p_ll2_info->my_id,
+                                       p_pkt->cookie,
+                                       p_pkt->rx_buf_addr,
+                                       packet_length,
+                                       p_cqe->rx_cqe_gsi.data_length_error,
+                                       parse_flags,
+                                       vlan,
+                                       src_mac_addrhi,
+                                       src_mac_addrlo, b_last_cqe);
+       spin_lock_irqsave(&p_rx->lock, lock_flags);
+
+       return 0;
+}
+
+static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
+                                     struct qed_ll2_info *p_ll2_conn,
+                                     union core_rx_cqe_union *p_cqe,
+                                     unsigned long *p_lock_flags,
+                                     bool b_last_cqe)
+{
+       struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
+       struct qed_ll2_rx_packet *p_pkt = NULL;
+
+       if (!list_empty(&p_rx->active_descq))
+               p_pkt = list_first_entry(&p_rx->active_descq,
+                                        struct qed_ll2_rx_packet, list_entry);
+       if (!p_pkt) {
+               DP_NOTICE(p_hwfn,
+                         "LL2 Rx completion but active_descq is empty\n");
+               return -EIO;
+       }
+       list_del(&p_pkt->list_entry);
+
+       if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
+               DP_NOTICE(p_hwfn,
+                         "Mismatch between active_descq and the LL2 Rx chain\n");
+       list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
+
+       spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
+       qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id,
+                                   p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe);
+       spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
+
+       return 0;
+}
+
+static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
+{
+       struct qed_ll2_info *p_ll2_conn = cookie;
+       struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
+       union core_rx_cqe_union *cqe = NULL;
+       u16 cq_new_idx = 0, cq_old_idx = 0;
+       unsigned long flags = 0;
+       int rc = 0;
+
+       spin_lock_irqsave(&p_rx->lock, flags);
+       cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
+       cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
+
+       while (cq_new_idx != cq_old_idx) {
+               bool b_last_cqe = (cq_new_idx == cq_old_idx);
+
+               cqe = qed_chain_consume(&p_rx->rcq_chain);
+               cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
+
+               DP_VERBOSE(p_hwfn,
+                          QED_MSG_LL2,
+                          "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
+                          cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type);
+
+               switch (cqe->rx_cqe_sp.type) {
+               case CORE_RX_CQE_TYPE_SLOW_PATH:
+                       DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n");
+                       rc = -EINVAL;
+                       break;
+               case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
+                       rc = qed_ll2_rxq_completion_gsi(p_hwfn, p_ll2_conn,
+                                                       cqe, flags, b_last_cqe);
+                       break;
+               case CORE_RX_CQE_TYPE_REGULAR:
+                       rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn,
+                                                       cqe, &flags,
+                                                       b_last_cqe);
+                       break;
+               default:
+                       rc = -EIO;
+               }
+       }
+
+       spin_unlock_irqrestore(&p_rx->lock, flags);
+       return rc;
+}
+
+static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
+{
+       struct qed_ll2_info *p_ll2_conn = NULL;
+       struct qed_ll2_rx_packet *p_pkt = NULL;
+       struct qed_ll2_rx_queue *p_rx;
+
+       p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
+       if (!p_ll2_conn)
+               return;
+
+       p_rx = &p_ll2_conn->rx_queue;
+
+       while (!list_empty(&p_rx->active_descq)) {
+               dma_addr_t rx_buf_addr;
+               void *cookie;
+               bool b_last;
+
+               p_pkt = list_first_entry(&p_rx->active_descq,
+                                        struct qed_ll2_rx_packet, list_entry);
+               if (!p_pkt)
+                       break;
+
+               list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
+
+               if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
+                       struct qed_ooo_buffer *p_buffer;
+
+                       p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
+                       qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
+                                               p_buffer);
+               } else {
+                       rx_buf_addr = p_pkt->rx_buf_addr;
+                       cookie = p_pkt->cookie;
+
+                       b_last = list_empty(&p_rx->active_descq);
+               }
+       }
+}
+
+#if IS_ENABLED(CONFIG_QED_ISCSI)
+static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
+{
+       u8 bd_flags = 0;
+
+       if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
+               SET_FIELD(bd_flags, CORE_TX_BD_DATA_VLAN_INSERTION, 1);
+
+       return bd_flags;
+}
+
+static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
+                                 struct qed_ll2_info *p_ll2_conn)
+{
+       struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
+       u16 packet_length = 0, parse_flags = 0, vlan = 0;
+       struct qed_ll2_rx_packet *p_pkt = NULL;
+       u32 num_ooo_add_to_peninsula = 0, cid;
+       union core_rx_cqe_union *cqe = NULL;
+       u16 cq_new_idx = 0, cq_old_idx = 0;
+       struct qed_ooo_buffer *p_buffer;
+       struct ooo_opaque *iscsi_ooo;
+       u8 placement_offset = 0;
+       u8 cqe_type;
+
+       cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
+       cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
+       if (cq_new_idx == cq_old_idx)
+               return 0;
+
+       while (cq_new_idx != cq_old_idx) {
+               struct core_rx_fast_path_cqe *p_cqe_fp;
+
+               cqe = qed_chain_consume(&p_rx->rcq_chain);
+               cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
+               cqe_type = cqe->rx_cqe_sp.type;
+
+               if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
+                       DP_NOTICE(p_hwfn,
+                                 "Got a non-regular LB LL2 completion [type 0x%02x]\n",
+                                 cqe_type);
+                       return -EINVAL;
+               }
+               p_cqe_fp = &cqe->rx_cqe_fp;
+
+               placement_offset = p_cqe_fp->placement_offset;
+               parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags);
+               packet_length = le16_to_cpu(p_cqe_fp->packet_length);
+               vlan = le16_to_cpu(p_cqe_fp->vlan);
+               iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data;
+               qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info,
+                                          iscsi_ooo);
+               cid = le32_to_cpu(iscsi_ooo->cid);
+
+               /* Process delete isle first */
+               if (iscsi_ooo->drop_size)
+                       qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid,
+                                            iscsi_ooo->drop_isle,
+                                            iscsi_ooo->drop_size);
+
+               if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP)
+                       continue;
+
+               /* Now process create/add/join isles */
+               if (list_empty(&p_rx->active_descq)) {
+                       DP_NOTICE(p_hwfn,
+                                 "LL2 OOO RX chain has no submitted buffers\n"
+                                 );
+                       return -EIO;
+               }
+
+               p_pkt = list_first_entry(&p_rx->active_descq,
+                                        struct qed_ll2_rx_packet, list_entry);
+
+               if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) ||
+                   (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) ||
+                   (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) ||
+                   (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) ||
+                   (iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) {
+                       if (!p_pkt) {
+                               DP_NOTICE(p_hwfn,
+                                         "LL2 OOO RX packet is not valid\n");
+                               return -EIO;
+                       }
+                       list_del(&p_pkt->list_entry);
+                       p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
+                       p_buffer->packet_length = packet_length;
+                       p_buffer->parse_flags = parse_flags;
+                       p_buffer->vlan = vlan;
+                       p_buffer->placement_offset = placement_offset;
+                       qed_chain_consume(&p_rx->rxq_chain);
+                       list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
+
+                       switch (iscsi_ooo->ooo_opcode) {
+                       case TCP_EVENT_ADD_NEW_ISLE:
+                               qed_ooo_add_new_isle(p_hwfn,
+                                                    p_hwfn->p_ooo_info,
+                                                    cid,
+                                                    iscsi_ooo->ooo_isle,
+                                                    p_buffer);
+                               break;
+                       case TCP_EVENT_ADD_ISLE_RIGHT:
+                               qed_ooo_add_new_buffer(p_hwfn,
+                                                      p_hwfn->p_ooo_info,
+                                                      cid,
+                                                      iscsi_ooo->ooo_isle,
+                                                      p_buffer,
+                                                      QED_OOO_RIGHT_BUF);
+                               break;
+                       case TCP_EVENT_ADD_ISLE_LEFT:
+                               qed_ooo_add_new_buffer(p_hwfn,
+                                                      p_hwfn->p_ooo_info,
+                                                      cid,
+                                                      iscsi_ooo->ooo_isle,
+                                                      p_buffer,
+                                                      QED_OOO_LEFT_BUF);
+                               break;
+                       case TCP_EVENT_JOIN:
+                               qed_ooo_add_new_buffer(p_hwfn,
+                                                      p_hwfn->p_ooo_info,
+                                                      cid,
+                                                      iscsi_ooo->ooo_isle +
+                                                      1,
+                                                      p_buffer,
+                                                      QED_OOO_LEFT_BUF);
+                               qed_ooo_join_isles(p_hwfn,
+                                                  p_hwfn->p_ooo_info,
+                                                  cid, iscsi_ooo->ooo_isle);
+                               break;
+                       case TCP_EVENT_ADD_PEN:
+                               num_ooo_add_to_peninsula++;
+                               qed_ooo_put_ready_buffer(p_hwfn,
+                                                        p_hwfn->p_ooo_info,
+                                                        p_buffer, true);
+                               break;
+                       }
+               } else {
+                       DP_NOTICE(p_hwfn,
+                                 "Unexpected event (%d) TX OOO completion\n",
+                                 iscsi_ooo->ooo_opcode);
+               }
+       }
+
+       return 0;
+}
+
+static void
+qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
+                         struct qed_ll2_info *p_ll2_conn)
+{
+       struct qed_ooo_buffer *p_buffer;
+       int rc;
+       u16 l4_hdr_offset_w;
+       dma_addr_t first_frag;
+       u16 parse_flags;
+       u8 bd_flags;
+
+       /* Submit Tx buffers here */
+       while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn,
+                                                   p_hwfn->p_ooo_info))) {
+               l4_hdr_offset_w = 0;
+               bd_flags = 0;
+
+               first_frag = p_buffer->rx_buffer_phys_addr +
+                            p_buffer->placement_offset;
+               parse_flags = p_buffer->parse_flags;
+               bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags);
+               SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1);
+               SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1);
+
+               rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
+                                              p_buffer->vlan, bd_flags,
+                                              l4_hdr_offset_w,
+                                              p_ll2_conn->conn.tx_dest, 0,
+                                              first_frag,
+                                              p_buffer->packet_length,
+                                              p_buffer, true);
+               if (rc) {
+                       qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info,
+                                                p_buffer, false);
+                       break;
+               }
+       }
+}
+
+static void
+qed_ooo_submit_rx_buffers(struct qed_hwfn *p_hwfn,
+                         struct qed_ll2_info *p_ll2_conn)
+{
+       struct qed_ooo_buffer *p_buffer;
+       int rc;
+
+       while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
+                                                  p_hwfn->p_ooo_info))) {
+               rc = qed_ll2_post_rx_buffer(p_hwfn,
+                                           p_ll2_conn->my_id,
+                                           p_buffer->rx_buffer_phys_addr,
+                                           0, p_buffer, true);
+               if (rc) {
+                       qed_ooo_put_free_buffer(p_hwfn,
+                                               p_hwfn->p_ooo_info, p_buffer);
+                       break;
+               }
+       }
+}
+
+static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
+{
+       struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
+       int rc;
+
+       rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn);
+       if (rc)
+               return rc;
+
+       qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
+       qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
+
+       return 0;
+}
+
+static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
+{
+       struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
+       struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
+       struct qed_ll2_tx_packet *p_pkt = NULL;
+       struct qed_ooo_buffer *p_buffer;
+       bool b_dont_submit_rx = false;
+       u16 new_idx = 0, num_bds = 0;
+       int rc;
+
+       new_idx = le16_to_cpu(*p_tx->p_fw_cons);
+       num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
+
+       if (!num_bds)
+               return 0;
+
+       while (num_bds) {
+               if (list_empty(&p_tx->active_descq))
+                       return -EINVAL;
+
+               p_pkt = list_first_entry(&p_tx->active_descq,
+                                        struct qed_ll2_tx_packet, list_entry);
+               if (!p_pkt)
+                       return -EINVAL;
+
+               if (p_pkt->bd_used != 1) {
+                       DP_NOTICE(p_hwfn,
+                                 "Unexpectedly many BDs(%d) in TX OOO completion\n",
+                                 p_pkt->bd_used);
+                       return -EINVAL;
+               }
+
+               list_del(&p_pkt->list_entry);
+
+               num_bds--;
+               p_tx->bds_idx++;
+               qed_chain_consume(&p_tx->txq_chain);
+
+               p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
+               list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
+
+               if (b_dont_submit_rx) {
+                       qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
+                                               p_buffer);
+                       continue;
+               }
+
+               rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id,
+                                           p_buffer->rx_buffer_phys_addr, 0,
+                                           p_buffer, true);
+               if (rc != 0) {
+                       qed_ooo_put_free_buffer(p_hwfn,
+                                               p_hwfn->p_ooo_info, p_buffer);
+                       b_dont_submit_rx = true;
+               }
+       }
+
+       qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
+
+       return 0;
+}
+
+static int
+qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
+                              struct qed_ll2_info *p_ll2_info,
+                              u16 rx_num_ooo_buffers, u16 mtu)
+{
+       struct qed_ooo_buffer *p_buf = NULL;
+       void *p_virt;
+       u16 buf_idx;
+       int rc = 0;
+
+       if (p_ll2_info->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
+               return rc;
+
+       if (!rx_num_ooo_buffers)
+               return -EINVAL;
+
+       for (buf_idx = 0; buf_idx < rx_num_ooo_buffers; buf_idx++) {
+               p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL);
+               if (!p_buf) {
+                       rc = -ENOMEM;
+                       goto out;
+               }
+
+               p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE;
+               p_buf->rx_buffer_size = (p_buf->rx_buffer_size +
+                                        ETH_CACHE_LINE_SIZE - 1) &
+                                       ~(ETH_CACHE_LINE_SIZE - 1);
+               p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+                                           p_buf->rx_buffer_size,
+                                           &p_buf->rx_buffer_phys_addr,
+                                           GFP_KERNEL);
+               if (!p_virt) {
+                       kfree(p_buf);
+                       rc = -ENOMEM;
+                       goto out;
+               }
+
+               p_buf->rx_buffer_virt_addr = p_virt;
+               qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf);
+       }
+
+       DP_VERBOSE(p_hwfn, QED_MSG_LL2,
+                  "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
+                  rx_num_ooo_buffers, p_buf->rx_buffer_size);
+
+out:
+       return rc;
+}
+
+static void
+qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
+                                struct qed_ll2_info *p_ll2_conn)
+{
+       if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
+               return;
+
+       qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
+       qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
+}
+
+static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
+                                          struct qed_ll2_info *p_ll2_conn)
+{
+       struct qed_ooo_buffer *p_buffer;
+
+       if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
+               return;
+
+       qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
+       while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
+                                                  p_hwfn->p_ooo_info))) {
+               dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                                 p_buffer->rx_buffer_size,
+                                 p_buffer->rx_buffer_virt_addr,
+                                 p_buffer->rx_buffer_phys_addr);
+               kfree(p_buffer);
+       }
+}
+
+static void qed_ll2_stop_ooo(struct qed_dev *cdev)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
+
+       DP_VERBOSE(cdev, QED_MSG_STORAGE, "Stopping LL2 OOO queue [%02x]\n",
+                  *handle);
+
+       qed_ll2_terminate_connection(hwfn, *handle);
+       qed_ll2_release_connection(hwfn, *handle);
+       *handle = QED_LL2_UNUSED_HANDLE;
+}
+
+static int qed_ll2_start_ooo(struct qed_dev *cdev,
+                            struct qed_ll2_params *params)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
+       struct qed_ll2_conn ll2_info = { 0 };
+       int rc;
+
+       ll2_info.conn_type = QED_LL2_TYPE_ISCSI_OOO;
+       ll2_info.mtu = params->mtu;
+       ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
+       ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
+       ll2_info.tx_tc = OOO_LB_TC;
+       ll2_info.tx_dest = CORE_TX_DEST_LB;
+
+       rc = qed_ll2_acquire_connection(hwfn, &ll2_info,
+                                       QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
+                                       handle);
+       if (rc) {
+               DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
+               goto out;
+       }
+
+       rc = qed_ll2_establish_connection(hwfn, *handle);
+       if (rc) {
+               DP_INFO(cdev, "Failed to establist LL2 OOO connection\n");
+               goto fail;
+       }
+
+       return 0;
+
+fail:
+       qed_ll2_release_connection(hwfn, *handle);
+out:
+       *handle = QED_LL2_UNUSED_HANDLE;
+       return rc;
+}
+#else /* IS_ENABLED(CONFIG_QED_ISCSI) */
+static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn,
+                                    void *p_cookie) { return -EINVAL; }
+static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn,
+                                    void *p_cookie) { return -EINVAL; }
+static inline int
+qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
+                              struct qed_ll2_info *p_ll2_info,
+                              u16 rx_num_ooo_buffers, u16 mtu) { return 0; }
+static inline void
+qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
+                                struct qed_ll2_info *p_ll2_conn) { return; }
+static inline void
+qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
+                              struct qed_ll2_info *p_ll2_conn) { return; }
+static inline void qed_ll2_stop_ooo(struct qed_dev *cdev) { return; }
+static inline int qed_ll2_start_ooo(struct qed_dev *cdev,
+                                   struct qed_ll2_params *params)
+                                   { return -EINVAL; }
+#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
+
+static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
+                                    struct qed_ll2_info *p_ll2_conn,
+                                    u8 action_on_error)
+{
+       enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
+       struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
+       struct core_rx_start_ramrod_data *p_ramrod = NULL;
+       struct qed_spq_entry *p_ent = NULL;
+       struct qed_sp_init_data init_data;
+       u16 cqe_pbl_size;
+       int rc = 0;
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = p_ll2_conn->cid;
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                CORE_RAMROD_RX_QUEUE_START,
+                                PROTOCOLID_CORE, &init_data);
+       if (rc)
+               return rc;
+
+       p_ramrod = &p_ent->ramrod.core_rx_queue_start;
+
+       p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
+       p_ramrod->sb_index = p_rx->rx_sb_index;
+       p_ramrod->complete_event_flg = 1;
+
+       p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
+       DMA_REGPAIR_LE(p_ramrod->bd_base,
+                      p_rx->rxq_chain.p_phys_addr);
+       cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
+       p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
+       DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
+                      qed_chain_get_pbl_phys(&p_rx->rcq_chain));
+
+       p_ramrod->drop_ttl0_flg = p_ll2_conn->conn.rx_drop_ttl0_flg;
+       p_ramrod->inner_vlan_removal_en = p_ll2_conn->conn.rx_vlan_removal_en;
+       p_ramrod->queue_id = p_ll2_conn->queue_id;
+       p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0
+                                                                         : 1;
+
+       if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
+           p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) {
+               p_ramrod->mf_si_bcast_accept_all = 1;
+               p_ramrod->mf_si_mcast_accept_all = 1;
+       } else {
+               p_ramrod->mf_si_bcast_accept_all = 0;
+               p_ramrod->mf_si_mcast_accept_all = 0;
+       }
+
+       p_ramrod->action_on_error.error_type = action_on_error;
+       p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
+       return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
+                                    struct qed_ll2_info *p_ll2_conn)
+{
+       enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
+       struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
+       struct core_tx_start_ramrod_data *p_ramrod = NULL;
+       struct qed_spq_entry *p_ent = NULL;
+       struct qed_sp_init_data init_data;
+       u16 pq_id = 0, pbl_size;
+       int rc = -EINVAL;
+
+       if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
+               return 0;
+
+       if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
+               p_ll2_conn->tx_stats_en = 0;
+       else
+               p_ll2_conn->tx_stats_en = 1;
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = p_ll2_conn->cid;
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                CORE_RAMROD_TX_QUEUE_START,
+                                PROTOCOLID_CORE, &init_data);
+       if (rc)
+               return rc;
+
+       p_ramrod = &p_ent->ramrod.core_tx_queue_start;
+
+       p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
+       p_ramrod->sb_index = p_tx->tx_sb_index;
+       p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
+       p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
+       p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
+
+       DMA_REGPAIR_LE(p_ramrod->pbl_base_addr,
+                      qed_chain_get_pbl_phys(&p_tx->txq_chain));
+       pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
+       p_ramrod->pbl_size = cpu_to_le16(pbl_size);
+
+       switch (p_ll2_conn->conn.tx_tc) {
+       case LB_TC:
+               pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
+               break;
+       case OOO_LB_TC:
+               pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OOO);
+               break;
+       default:
+               pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+               break;
+       }
+
+       p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
+
+       switch (conn_type) {
+       case QED_LL2_TYPE_FCOE:
+               p_ramrod->conn_type = PROTOCOLID_FCOE;
+               break;
+       case QED_LL2_TYPE_ISCSI:
+       case QED_LL2_TYPE_ISCSI_OOO:
+               p_ramrod->conn_type = PROTOCOLID_ISCSI;
+               break;
+       case QED_LL2_TYPE_ROCE:
+               p_ramrod->conn_type = PROTOCOLID_ROCE;
+               break;
+       default:
+               p_ramrod->conn_type = PROTOCOLID_ETH;
+               DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
+       }
+
+       p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
+       return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
+                                   struct qed_ll2_info *p_ll2_conn)
+{
+       struct core_rx_stop_ramrod_data *p_ramrod = NULL;
+       struct qed_spq_entry *p_ent = NULL;
+       struct qed_sp_init_data init_data;
+       int rc = -EINVAL;
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = p_ll2_conn->cid;
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                CORE_RAMROD_RX_QUEUE_STOP,
+                                PROTOCOLID_CORE, &init_data);
+       if (rc)
+               return rc;
+
+       p_ramrod = &p_ent->ramrod.core_rx_queue_stop;
+
+       p_ramrod->complete_event_flg = 1;
+       p_ramrod->queue_id = p_ll2_conn->queue_id;
+
+       return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
+                                   struct qed_ll2_info *p_ll2_conn)
+{
+       struct qed_spq_entry *p_ent = NULL;
+       struct qed_sp_init_data init_data;
+       int rc = -EINVAL;
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = p_ll2_conn->cid;
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                CORE_RAMROD_TX_QUEUE_STOP,
+                                PROTOCOLID_CORE, &init_data);
+       if (rc)
+               return rc;
+
+       return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
+                             struct qed_ll2_info *p_ll2_info, u16 rx_num_desc)
+{
+       struct qed_ll2_rx_packet *p_descq;
+       u32 capacity;
+       int rc = 0;
+
+       if (!rx_num_desc)
+               goto out;
+
+       rc = qed_chain_alloc(p_hwfn->cdev,
+                            QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+                            QED_CHAIN_MODE_NEXT_PTR,
+                            QED_CHAIN_CNT_TYPE_U16,
+                            rx_num_desc,
+                            sizeof(struct core_rx_bd),
+                            &p_ll2_info->rx_queue.rxq_chain);
+       if (rc) {
+               DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
+               goto out;
+       }
+
+       capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain);
+       p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet),
+                         GFP_KERNEL);
+       if (!p_descq) {
+               rc = -ENOMEM;
+               DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n");
+               goto out;
+       }
+       p_ll2_info->rx_queue.descq_array = p_descq;
+
+       rc = qed_chain_alloc(p_hwfn->cdev,
+                            QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+                            QED_CHAIN_MODE_PBL,
+                            QED_CHAIN_CNT_TYPE_U16,
+                            rx_num_desc,
+                            sizeof(struct core_rx_fast_path_cqe),
+                            &p_ll2_info->rx_queue.rcq_chain);
+       if (rc) {
+               DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
+               goto out;
+       }
+
+       DP_VERBOSE(p_hwfn, QED_MSG_LL2,
+                  "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
+                  p_ll2_info->conn.conn_type, rx_num_desc);
+
+out:
+       return rc;
+}
+
+static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
+                                        struct qed_ll2_info *p_ll2_info,
+                                        u16 tx_num_desc)
+{
+       struct qed_ll2_tx_packet *p_descq;
+       u32 capacity;
+       int rc = 0;
+
+       if (!tx_num_desc)
+               goto out;
+
+       rc = qed_chain_alloc(p_hwfn->cdev,
+                            QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+                            QED_CHAIN_MODE_PBL,
+                            QED_CHAIN_CNT_TYPE_U16,
+                            tx_num_desc,
+                            sizeof(struct core_tx_bd),
+                            &p_ll2_info->tx_queue.txq_chain);
+       if (rc)
+               goto out;
+
+       capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
+       p_descq = kcalloc(capacity, sizeof(struct qed_ll2_tx_packet),
+                         GFP_KERNEL);
+       if (!p_descq) {
+               rc = -ENOMEM;
+               goto out;
+       }
+       p_ll2_info->tx_queue.descq_array = p_descq;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_LL2,
+                  "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
+                  p_ll2_info->conn.conn_type, tx_num_desc);
+
+out:
+       if (rc)
+               DP_NOTICE(p_hwfn,
+                         "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
+                         tx_num_desc);
+       return rc;
+}
+
+int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
+                              struct qed_ll2_conn *p_params,
+                              u16 rx_num_desc,
+                              u16 tx_num_desc,
+                              u8 *p_connection_handle)
+{
+       qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
+       struct qed_ll2_info *p_ll2_info = NULL;
+       int rc;
+       u8 i;
+
+       if (!p_connection_handle || !p_hwfn->p_ll2_info)
+               return -EINVAL;
+
+       /* Find a free connection to be used */
+       for (i = 0; (i < QED_MAX_NUM_OF_LL2_CONNECTIONS); i++) {
+               mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
+               if (p_hwfn->p_ll2_info[i].b_active) {
+                       mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
+                       continue;
+               }
+
+               p_hwfn->p_ll2_info[i].b_active = true;
+               p_ll2_info = &p_hwfn->p_ll2_info[i];
+               mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
+               break;
+       }
+       if (!p_ll2_info)
+               return -EBUSY;
+
+       p_ll2_info->conn = *p_params;
+
+       rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc);
+       if (rc)
+               goto q_allocate_fail;
+
+       rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info, tx_num_desc);
+       if (rc)
+               goto q_allocate_fail;
+
+       rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info,
+                                           rx_num_desc * 2, p_params->mtu);
+       if (rc)
+               goto q_allocate_fail;
+
+       /* Register callbacks for the Rx/Tx queues */
+       if (p_params->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
+               comp_rx_cb = qed_ll2_lb_rxq_completion;
+               comp_tx_cb = qed_ll2_lb_txq_completion;
+       } else {
+               comp_rx_cb = qed_ll2_rxq_completion;
+               comp_tx_cb = qed_ll2_txq_completion;
+       }
+
+       if (rx_num_desc) {
+               qed_int_register_cb(p_hwfn, comp_rx_cb,
+                                   &p_hwfn->p_ll2_info[i],
+                                   &p_ll2_info->rx_queue.rx_sb_index,
+                                   &p_ll2_info->rx_queue.p_fw_cons);
+               p_ll2_info->rx_queue.b_cb_registred = true;
+       }
+
+       if (tx_num_desc) {
+               qed_int_register_cb(p_hwfn,
+                                   comp_tx_cb,
+                                   &p_hwfn->p_ll2_info[i],
+                                   &p_ll2_info->tx_queue.tx_sb_index,
+                                   &p_ll2_info->tx_queue.p_fw_cons);
+               p_ll2_info->tx_queue.b_cb_registred = true;
+       }
+
+       *p_connection_handle = i;
+       return rc;
+
+q_allocate_fail:
+       qed_ll2_release_connection(p_hwfn, i);
+       return -ENOMEM;
+}
+
+static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
+                                          struct qed_ll2_info *p_ll2_conn)
+{
+       u8 action_on_error = 0;
+
+       if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
+               return 0;
+
+       DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
+
+       SET_FIELD(action_on_error,
+                 CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG,
+                 p_ll2_conn->conn.ai_err_packet_too_big);
+       SET_FIELD(action_on_error,
+                 CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->conn.ai_err_no_buf);
+
+       return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
+}
+
+int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
+{
+       struct qed_ll2_info *p_ll2_conn;
+       struct qed_ll2_rx_queue *p_rx;
+       struct qed_ll2_tx_queue *p_tx;
+       struct qed_ptt *p_ptt;
+       int rc = -EINVAL;
+       u32 i, capacity;
+       u8 qid;
+
+       p_ptt = qed_ptt_acquire(p_hwfn);
+       if (!p_ptt)
+               return -EAGAIN;
+
+       p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
+       if (!p_ll2_conn) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+       p_rx = &p_ll2_conn->rx_queue;
+       p_tx = &p_ll2_conn->tx_queue;
+
+       qed_chain_reset(&p_rx->rxq_chain);
+       qed_chain_reset(&p_rx->rcq_chain);
+       INIT_LIST_HEAD(&p_rx->active_descq);
+       INIT_LIST_HEAD(&p_rx->free_descq);
+       INIT_LIST_HEAD(&p_rx->posting_descq);
+       spin_lock_init(&p_rx->lock);
+       capacity = qed_chain_get_capacity(&p_rx->rxq_chain);
+       for (i = 0; i < capacity; i++)
+               list_add_tail(&p_rx->descq_array[i].list_entry,
+                             &p_rx->free_descq);
+       *p_rx->p_fw_cons = 0;
+
+       qed_chain_reset(&p_tx->txq_chain);
+       INIT_LIST_HEAD(&p_tx->active_descq);
+       INIT_LIST_HEAD(&p_tx->free_descq);
+       INIT_LIST_HEAD(&p_tx->sending_descq);
+       spin_lock_init(&p_tx->lock);
+       capacity = qed_chain_get_capacity(&p_tx->txq_chain);
+       for (i = 0; i < capacity; i++)
+               list_add_tail(&p_tx->descq_array[i].list_entry,
+                             &p_tx->free_descq);
+       p_tx->cur_completing_bd_idx = 0;
+       p_tx->bds_idx = 0;
+       p_tx->b_completing_packet = false;
+       p_tx->cur_send_packet = NULL;
+       p_tx->cur_send_frag_num = 0;
+       p_tx->cur_completing_frag_num = 0;
+       *p_tx->p_fw_cons = 0;
+
+       rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
+       if (rc)
+               goto out;
+
+       qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
+       p_ll2_conn->queue_id = qid;
+       p_ll2_conn->tx_stats_id = qid;
+       p_rx->set_prod_addr = (u8 __iomem *)p_hwfn->regview +
+                                           GTT_BAR0_MAP_REG_TSDM_RAM +
+                                           TSTORM_LL2_RX_PRODS_OFFSET(qid);
+       p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
+                                           qed_db_addr(p_ll2_conn->cid,
+                                                       DQ_DEMS_LEGACY);
+
+       rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
+       if (rc)
+               goto out;
+
+       rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
+       if (rc)
+               goto out;
+
+       if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
+               qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1);
+
+       qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
+
+       if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) {
+               qed_llh_add_protocol_filter(p_hwfn, p_ptt,
+                                           0x8906, 0,
+                                           QED_LLH_FILTER_ETHERTYPE);
+               qed_llh_add_protocol_filter(p_hwfn, p_ptt,
+                                           0x8914, 0,
+                                           QED_LLH_FILTER_ETHERTYPE);
+       }
+
+out:
+       qed_ptt_release(p_hwfn, p_ptt);
+       return rc;
+}
+
+static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
+                                            struct qed_ll2_rx_queue *p_rx,
+                                            struct qed_ll2_rx_packet *p_curp)
+{
+       struct qed_ll2_rx_packet *p_posting_packet = NULL;
+       struct core_ll2_rx_prod rx_prod = { 0, 0, 0 };
+       bool b_notify_fw = false;
+       u16 bd_prod, cq_prod;
+
+       /* This handles the flushing of already posted buffers */
+       while (!list_empty(&p_rx->posting_descq)) {
+               p_posting_packet = list_first_entry(&p_rx->posting_descq,
+                                                   struct qed_ll2_rx_packet,
+                                                   list_entry);
+               list_move_tail(&p_posting_packet->list_entry,
+                              &p_rx->active_descq);
+               b_notify_fw = true;
+       }
+
+       /* This handles the supplied packet [if there is one] */
+       if (p_curp) {
+               list_add_tail(&p_curp->list_entry, &p_rx->active_descq);
+               b_notify_fw = true;
+       }
+
+       if (!b_notify_fw)
+               return;
+
+       bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
+       cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
+       rx_prod.bd_prod = cpu_to_le16(bd_prod);
+       rx_prod.cqe_prod = cpu_to_le16(cq_prod);
+       DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
+}
+
+int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
+                          u8 connection_handle,
+                          dma_addr_t addr,
+                          u16 buf_len, void *cookie, u8 notify_fw)
+{
+       struct core_rx_bd_with_buff_len *p_curb = NULL;
+       struct qed_ll2_rx_packet *p_curp = NULL;
+       struct qed_ll2_info *p_ll2_conn;
+       struct qed_ll2_rx_queue *p_rx;
+       unsigned long flags;
+       void *p_data;
+       int rc = 0;
+
+       p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
+       if (!p_ll2_conn)
+               return -EINVAL;
+       p_rx = &p_ll2_conn->rx_queue;
+
+       spin_lock_irqsave(&p_rx->lock, flags);
+       if (!list_empty(&p_rx->free_descq))
+               p_curp = list_first_entry(&p_rx->free_descq,
+                                         struct qed_ll2_rx_packet, list_entry);
+       if (p_curp) {
+               if (qed_chain_get_elem_left(&p_rx->rxq_chain) &&
+                   qed_chain_get_elem_left(&p_rx->rcq_chain)) {
+                       p_data = qed_chain_produce(&p_rx->rxq_chain);
+                       p_curb = (struct core_rx_bd_with_buff_len *)p_data;
+                       qed_chain_produce(&p_rx->rcq_chain);
+               }
+       }
+
+       /* If we're lacking entires, let's try to flush buffers to FW */
+       if (!p_curp || !p_curb) {
+               rc = -EBUSY;
+               p_curp = NULL;
+               goto out_notify;
+       }
+
+       /* We have an Rx packet we can fill */
+       DMA_REGPAIR_LE(p_curb->addr, addr);
+       p_curb->buff_length = cpu_to_le16(buf_len);
+       p_curp->rx_buf_addr = addr;
+       p_curp->cookie = cookie;
+       p_curp->rxq_bd = p_curb;
+       p_curp->buf_length = buf_len;
+       list_del(&p_curp->list_entry);
+
+       /* Check if we only want to enqueue this packet without informing FW */
+       if (!notify_fw) {
+               list_add_tail(&p_curp->list_entry, &p_rx->posting_descq);
+               goto out;
+       }
+
+out_notify:
+       qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp);
+out:
+       spin_unlock_irqrestore(&p_rx->lock, flags);
+       return rc;
+}
+
+static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
+                                         struct qed_ll2_tx_queue *p_tx,
+                                         struct qed_ll2_tx_packet *p_curp,
+                                         u8 num_of_bds,
+                                         dma_addr_t first_frag,
+                                         u16 first_frag_len, void *p_cookie,
+                                         u8 notify_fw)
+{
+       list_del(&p_curp->list_entry);
+       p_curp->cookie = p_cookie;
+       p_curp->bd_used = num_of_bds;
+       p_curp->notify_fw = notify_fw;
+       p_tx->cur_send_packet = p_curp;
+       p_tx->cur_send_frag_num = 0;
+
+       p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = first_frag;
+       p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = first_frag_len;
+       p_tx->cur_send_frag_num++;
+}
+
+static void
+qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
+                                struct qed_ll2_info *p_ll2,
+                                struct qed_ll2_tx_packet *p_curp,
+                                u8 num_of_bds,
+                                enum core_tx_dest tx_dest,
+                                u16 vlan,
+                                u8 bd_flags,
+                                u16 l4_hdr_offset_w,
+                                enum core_roce_flavor_type roce_flavor,
+                                dma_addr_t first_frag,
+                                u16 first_frag_len)
+{
+       struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
+       u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
+       struct core_tx_bd *start_bd = NULL;
+       u16 bd_data = 0, frag_idx;
+
+       start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
+       start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan);
+       SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
+                 cpu_to_le16(l4_hdr_offset_w));
+       SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
+       bd_data |= bd_flags;
+       SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
+       SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, num_of_bds);
+       SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor);
+       start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
+       DMA_REGPAIR_LE(start_bd->addr, first_frag);
+       start_bd->nbytes = cpu_to_le16(first_frag_len);
+
+       DP_VERBOSE(p_hwfn,
+                  (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
+                  "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
+                  p_ll2->queue_id,
+                  p_ll2->cid,
+                  p_ll2->conn.conn_type,
+                  prod_idx,
+                  first_frag_len,
+                  num_of_bds,
+                  le32_to_cpu(start_bd->addr.hi),
+                  le32_to_cpu(start_bd->addr.lo));
+
+       if (p_ll2->tx_queue.cur_send_frag_num == num_of_bds)
+               return;
+
+       /* Need to provide the packet with additional BDs for frags */
+       for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
+            frag_idx < num_of_bds; frag_idx++) {
+               struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
+
+               *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
+               (*p_bd)->bd_data.as_bitfield = 0;
+               (*p_bd)->bitfield1 = 0;
+               p_curp->bds_set[frag_idx].tx_frag = 0;
+               p_curp->bds_set[frag_idx].frag_len = 0;
+       }
+}
+
+/* This should be called while the Txq spinlock is being held */
+static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
+                                    struct qed_ll2_info *p_ll2_conn)
+{
+       bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw;
+       struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
+       struct qed_ll2_tx_packet *p_pkt = NULL;
+       struct core_db_data db_msg = { 0, 0, 0 };
+       u16 bd_prod;
+
+       /* If there are missing BDs, don't do anything now */
+       if (p_ll2_conn->tx_queue.cur_send_frag_num !=
+           p_ll2_conn->tx_queue.cur_send_packet->bd_used)
+               return;
+
+       /* Push the current packet to the list and clean after it */
+       list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry,
+                     &p_ll2_conn->tx_queue.sending_descq);
+       p_ll2_conn->tx_queue.cur_send_packet = NULL;
+       p_ll2_conn->tx_queue.cur_send_frag_num = 0;
+
+       /* Notify FW of packet only if requested to */
+       if (!b_notify)
+               return;
+
+       bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain);
+
+       while (!list_empty(&p_tx->sending_descq)) {
+               p_pkt = list_first_entry(&p_tx->sending_descq,
+                                        struct qed_ll2_tx_packet, list_entry);
+               if (!p_pkt)
+                       break;
+
+               list_move_tail(&p_pkt->list_entry, &p_tx->active_descq);
+       }
+
+       SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
+       SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
+       SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
+                 DQ_XCM_CORE_TX_BD_PROD_CMD);
+       db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
+       db_msg.spq_prod = cpu_to_le16(bd_prod);
+
+       /* Make sure the BDs data is updated before ringing the doorbell */
+       wmb();
+
+       DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&db_msg));
+
+       DP_VERBOSE(p_hwfn,
+                  (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
+                  "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
+                  p_ll2_conn->queue_id,
+                  p_ll2_conn->cid, p_ll2_conn->conn.conn_type, db_msg.spq_prod);
+}
+
+int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
+                             u8 connection_handle,
+                             u8 num_of_bds,
+                             u16 vlan,
+                             u8 bd_flags,
+                             u16 l4_hdr_offset_w,
+                             enum qed_ll2_tx_dest e_tx_dest,
+                             enum qed_ll2_roce_flavor_type qed_roce_flavor,
+                             dma_addr_t first_frag,
+                             u16 first_frag_len, void *cookie, u8 notify_fw)
+{
+       struct qed_ll2_tx_packet *p_curp = NULL;
+       struct qed_ll2_info *p_ll2_conn = NULL;
+       enum core_roce_flavor_type roce_flavor;
+       struct qed_ll2_tx_queue *p_tx;
+       struct qed_chain *p_tx_chain;
+       enum core_tx_dest tx_dest;
+       unsigned long flags;
+       int rc = 0;
+
+       p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
+       if (!p_ll2_conn)
+               return -EINVAL;
+       p_tx = &p_ll2_conn->tx_queue;
+       p_tx_chain = &p_tx->txq_chain;
+
+       if (num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET)
+               return -EIO;
+
+       spin_lock_irqsave(&p_tx->lock, flags);
+       if (p_tx->cur_send_packet) {
+               rc = -EEXIST;
+               goto out;
+       }
+
+       /* Get entry, but only if we have tx elements for it */
+       if (!list_empty(&p_tx->free_descq))
+               p_curp = list_first_entry(&p_tx->free_descq,
+                                         struct qed_ll2_tx_packet, list_entry);
+       if (p_curp && qed_chain_get_elem_left(p_tx_chain) < num_of_bds)
+               p_curp = NULL;
+
+       if (!p_curp) {
+               rc = -EBUSY;
+               goto out;
+       }
+
+       tx_dest = e_tx_dest == QED_LL2_TX_DEST_NW ? CORE_TX_DEST_NW :
+                                                   CORE_TX_DEST_LB;
+       if (qed_roce_flavor == QED_LL2_ROCE) {
+               roce_flavor = CORE_ROCE;
+       } else if (qed_roce_flavor == QED_LL2_RROCE) {
+               roce_flavor = CORE_RROCE;
+       } else {
+               rc = -EINVAL;
+               goto out;
+       }
+
+       /* Prepare packet and BD, and perhaps send a doorbell to FW */
+       qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp,
+                                     num_of_bds, first_frag,
+                                     first_frag_len, cookie, notify_fw);
+       qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp,
+                                        num_of_bds, tx_dest,
+                                        vlan, bd_flags, l4_hdr_offset_w,
+                                        roce_flavor,
+                                        first_frag, first_frag_len);
+
+       qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
+
+out:
+       spin_unlock_irqrestore(&p_tx->lock, flags);
+       return rc;
+}
+
+int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
+                                     u8 connection_handle,
+                                     dma_addr_t addr, u16 nbytes)
+{
+       struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
+       struct qed_ll2_info *p_ll2_conn = NULL;
+       u16 cur_send_frag_num = 0;
+       struct core_tx_bd *p_bd;
+       unsigned long flags;
+
+       p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
+       if (!p_ll2_conn)
+               return -EINVAL;
+
+       if (!p_ll2_conn->tx_queue.cur_send_packet)
+               return -EINVAL;
+
+       p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
+       cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
+
+       if (cur_send_frag_num >= p_cur_send_packet->bd_used)
+               return -EINVAL;
+
+       /* Fill the BD information, and possibly notify FW */
+       p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd;
+       DMA_REGPAIR_LE(p_bd->addr, addr);
+       p_bd->nbytes = cpu_to_le16(nbytes);
+       p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr;
+       p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes;
+
+       p_ll2_conn->tx_queue.cur_send_frag_num++;
+
+       spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags);
+       qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
+       spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags);
+
+       return 0;
+}
+
+int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
+{
+       struct qed_ll2_info *p_ll2_conn = NULL;
+       int rc = -EINVAL;
+       struct qed_ptt *p_ptt;
+
+       p_ptt = qed_ptt_acquire(p_hwfn);
+       if (!p_ptt)
+               return -EAGAIN;
+
+       p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
+       if (!p_ll2_conn) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+       /* Stop Tx & Rx of connection, if needed */
+       if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
+               rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
+               if (rc)
+                       goto out;
+               qed_ll2_txq_flush(p_hwfn, connection_handle);
+       }
+
+       if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
+               rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
+               if (rc)
+                       goto out;
+               qed_ll2_rxq_flush(p_hwfn, connection_handle);
+       }
+
+       if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
+               qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
+
+       if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) {
+               qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
+                                              0x8906, 0,
+                                              QED_LLH_FILTER_ETHERTYPE);
+               qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
+                                              0x8914, 0,
+                                              QED_LLH_FILTER_ETHERTYPE);
+       }
+
+out:
+       qed_ptt_release(p_hwfn, p_ptt);
+       return rc;
+}
+
+void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
+{
+       struct qed_ll2_info *p_ll2_conn = NULL;
+
+       p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
+       if (!p_ll2_conn)
+               return;
+
+       if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
+               p_ll2_conn->rx_queue.b_cb_registred = false;
+               qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
+       }
+
+       if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
+               p_ll2_conn->tx_queue.b_cb_registred = false;
+               qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
+       }
+
+       kfree(p_ll2_conn->tx_queue.descq_array);
+       qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
+
+       kfree(p_ll2_conn->rx_queue.descq_array);
+       qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain);
+       qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain);
+
+       qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
+
+       qed_ll2_release_connection_ooo(p_hwfn, p_ll2_conn);
+
+       mutex_lock(&p_ll2_conn->mutex);
+       p_ll2_conn->b_active = false;
+       mutex_unlock(&p_ll2_conn->mutex);
+}
+
+struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn)
+{
+       struct qed_ll2_info *p_ll2_connections;
+       u8 i;
+
+       /* Allocate LL2's set struct */
+       p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS,
+                                   sizeof(struct qed_ll2_info), GFP_KERNEL);
+       if (!p_ll2_connections) {
+               DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
+               return NULL;
+       }
+
+       for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
+               p_ll2_connections[i].my_id = i;
+
+       return p_ll2_connections;
+}
+
+void qed_ll2_setup(struct qed_hwfn *p_hwfn,
+                  struct qed_ll2_info *p_ll2_connections)
+{
+       int i;
+
+       for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
+               mutex_init(&p_ll2_connections[i].mutex);
+}
+
+void qed_ll2_free(struct qed_hwfn *p_hwfn,
+                 struct qed_ll2_info *p_ll2_connections)
+{
+       kfree(p_ll2_connections);
+}
+
+static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
+                               struct qed_ptt *p_ptt,
+                               struct qed_ll2_info *p_ll2_conn,
+                               struct qed_ll2_stats *p_stats)
+{
+       struct core_ll2_tstorm_per_queue_stat tstats;
+       u8 qid = p_ll2_conn->queue_id;
+       u32 tstats_addr;
+
+       memset(&tstats, 0, sizeof(tstats));
+       tstats_addr = BAR0_MAP_REG_TSDM_RAM +
+                     CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid);
+       qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
+
+       p_stats->packet_too_big_discard =
+                       HILO_64_REGPAIR(tstats.packet_too_big_discard);
+       p_stats->no_buff_discard = HILO_64_REGPAIR(tstats.no_buff_discard);
+}
+
+static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
+                               struct qed_ptt *p_ptt,
+                               struct qed_ll2_info *p_ll2_conn,
+                               struct qed_ll2_stats *p_stats)
+{
+       struct core_ll2_ustorm_per_queue_stat ustats;
+       u8 qid = p_ll2_conn->queue_id;
+       u32 ustats_addr;
+
+       memset(&ustats, 0, sizeof(ustats));
+       ustats_addr = BAR0_MAP_REG_USDM_RAM +
+                     CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid);
+       qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
+
+       p_stats->rcv_ucast_bytes = HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+       p_stats->rcv_mcast_bytes = HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+       p_stats->rcv_bcast_bytes = HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+       p_stats->rcv_ucast_pkts = HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+       p_stats->rcv_mcast_pkts = HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+       p_stats->rcv_bcast_pkts = HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+}
+
+static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
+                               struct qed_ptt *p_ptt,
+                               struct qed_ll2_info *p_ll2_conn,
+                               struct qed_ll2_stats *p_stats)
+{
+       struct core_ll2_pstorm_per_queue_stat pstats;
+       u8 stats_id = p_ll2_conn->tx_stats_id;
+       u32 pstats_addr;
+
+       memset(&pstats, 0, sizeof(pstats));
+       pstats_addr = BAR0_MAP_REG_PSDM_RAM +
+                     CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id);
+       qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
+
+       p_stats->sent_ucast_bytes = HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+       p_stats->sent_mcast_bytes = HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+       p_stats->sent_bcast_bytes = HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+       p_stats->sent_ucast_pkts = HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+       p_stats->sent_mcast_pkts = HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+       p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+}
+
+int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
+                     u8 connection_handle, struct qed_ll2_stats *p_stats)
+{
+       struct qed_ll2_info *p_ll2_conn = NULL;
+       struct qed_ptt *p_ptt;
+
+       memset(p_stats, 0, sizeof(*p_stats));
+
+       if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) ||
+           !p_hwfn->p_ll2_info)
+               return -EINVAL;
+
+       p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
+
+       p_ptt = qed_ptt_acquire(p_hwfn);
+       if (!p_ptt) {
+               DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+               return -EINVAL;
+       }
+
+       _qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
+       _qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
+       if (p_ll2_conn->tx_stats_en)
+               _qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
+
+       qed_ptt_release(p_hwfn, p_ptt);
+       return 0;
+}
+
+static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
+                                   const struct qed_ll2_cb_ops *ops,
+                                   void *cookie)
+{
+       cdev->ll2->cbs = ops;
+       cdev->ll2->cb_cookie = cookie;
+}
+
+static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
+{
+       struct qed_ll2_conn ll2_info;
+       struct qed_ll2_buffer *buffer, *tmp_buffer;
+       enum qed_ll2_conn_type conn_type;
+       struct qed_ptt *p_ptt;
+       int rc, i;
+       u8 gsi_enable = 1;
+
+       /* Initialize LL2 locks & lists */
+       INIT_LIST_HEAD(&cdev->ll2->list);
+       spin_lock_init(&cdev->ll2->lock);
+       cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
+                            L1_CACHE_BYTES + params->mtu;
+       cdev->ll2->frags_mapped = params->frags_mapped;
+
+       /*Allocate memory for LL2 */
+       DP_INFO(cdev, "Allocating LL2 buffers of size %08x bytes\n",
+               cdev->ll2->rx_size);
+       for (i = 0; i < QED_LL2_RX_SIZE; i++) {
+               buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+               if (!buffer) {
+                       DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
+                       goto fail;
+               }
+
+               rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
+                                         &buffer->phys_addr);
+               if (rc) {
+                       kfree(buffer);
+                       goto fail;
+               }
+
+               list_add_tail(&buffer->list, &cdev->ll2->list);
+       }
+
+       switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
+       case QED_PCI_FCOE:
+               conn_type = QED_LL2_TYPE_FCOE;
+               gsi_enable = 0;
+               break;
+       case QED_PCI_ISCSI:
+               conn_type = QED_LL2_TYPE_ISCSI;
+               gsi_enable = 0;
+               break;
+       case QED_PCI_ETH_ROCE:
+               conn_type = QED_LL2_TYPE_ROCE;
+               break;
+       default:
+               conn_type = QED_LL2_TYPE_TEST;
+       }
+
+       /* Prepare the temporary ll2 information */
+       memset(&ll2_info, 0, sizeof(ll2_info));
+
+       ll2_info.conn_type = conn_type;
+       ll2_info.mtu = params->mtu;
+       ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
+       ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
+       ll2_info.tx_tc = 0;
+       ll2_info.tx_dest = CORE_TX_DEST_NW;
+       ll2_info.gsi_enable = gsi_enable;
+
+       rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_info,
+                                       QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
+                                       &cdev->ll2->handle);
+       if (rc) {
+               DP_INFO(cdev, "Failed to acquire LL2 connection\n");
+               goto fail;
+       }
+
+       rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
+                                         cdev->ll2->handle);
+       if (rc) {
+               DP_INFO(cdev, "Failed to establish LL2 connection\n");
+               goto release_fail;
+       }
+
+       /* Post all Rx buffers to FW */
+       spin_lock_bh(&cdev->ll2->lock);
+       list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) {
+               rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
+                                           cdev->ll2->handle,
+                                           buffer->phys_addr, 0, buffer, 1);
+               if (rc) {
+                       DP_INFO(cdev,
+                               "Failed to post an Rx buffer; Deleting it\n");
+                       dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
+                                        cdev->ll2->rx_size, DMA_FROM_DEVICE);
+                       kfree(buffer->data);
+                       list_del(&buffer->list);
+                       kfree(buffer);
+               } else {
+                       cdev->ll2->rx_cnt++;
+               }
+       }
+       spin_unlock_bh(&cdev->ll2->lock);
+
+       if (!cdev->ll2->rx_cnt) {
+               DP_INFO(cdev, "Failed passing even a single Rx buffer\n");
+               goto release_terminate;
+       }
+
+       if (!is_valid_ether_addr(params->ll2_mac_address)) {
+               DP_INFO(cdev, "Invalid Ethernet address\n");
+               goto release_terminate;
+       }
+
+       if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
+           cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) {
+               DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
+               rc = qed_ll2_start_ooo(cdev, params);
+               if (rc) {
+                       DP_INFO(cdev,
+                               "Failed to initialize the OOO LL2 queue\n");
+                       goto release_terminate;
+               }
+       }
+
+       p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+       if (!p_ptt) {
+               DP_INFO(cdev, "Failed to acquire PTT\n");
+               goto release_terminate;
+       }
+
+       rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
+                                   params->ll2_mac_address);
+       qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
+       if (rc) {
+               DP_ERR(cdev, "Failed to allocate LLH filter\n");
+               goto release_terminate_all;
+       }
+
+       ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
+       return 0;
+
+release_terminate_all:
+
+release_terminate:
+       qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
+release_fail:
+       qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
+fail:
+       qed_ll2_kill_buffers(cdev);
+       cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
+       return -EINVAL;
+}
+
+static int qed_ll2_stop(struct qed_dev *cdev)
+{
+       struct qed_ptt *p_ptt;
+       int rc;
+
+       if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
+               return 0;
+
+       p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+       if (!p_ptt) {
+               DP_INFO(cdev, "Failed to acquire PTT\n");
+               goto fail;
+       }
+
+       qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
+                                 cdev->ll2_mac_address);
+       qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
+       eth_zero_addr(cdev->ll2_mac_address);
+
+       if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
+           cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable)
+               qed_ll2_stop_ooo(cdev);
+
+       rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
+                                         cdev->ll2->handle);
+       if (rc)
+               DP_INFO(cdev, "Failed to terminate LL2 connection\n");
+
+       qed_ll2_kill_buffers(cdev);
+
+       qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
+       cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
+
+       return rc;
+fail:
+       return -EINVAL;
+}
+
+static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
+{
+       const skb_frag_t *frag;
+       int rc = -EINVAL, i;
+       dma_addr_t mapping;
+       u16 vlan = 0;
+       u8 flags = 0;
+
+       if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
+               DP_INFO(cdev, "Cannot transmit a checksumed packet\n");
+               return -EINVAL;
+       }
+
+       if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
+               DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
+                      1 + skb_shinfo(skb)->nr_frags);
+               return -EINVAL;
+       }
+
+       mapping = dma_map_single(&cdev->pdev->dev, skb->data,
+                                skb->len, DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
+               DP_NOTICE(cdev, "SKB mapping failed\n");
+               return -EINVAL;
+       }
+
+       /* Request HW to calculate IP csum */
+       if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
+             ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
+               flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
+
+       if (skb_vlan_tag_present(skb)) {
+               vlan = skb_vlan_tag_get(skb);
+               flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT);
+       }
+
+       rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
+                                      cdev->ll2->handle,
+                                      1 + skb_shinfo(skb)->nr_frags,
+                                      vlan, flags, 0, QED_LL2_TX_DEST_NW,
+                                      0 /* RoCE FLAVOR */,
+                                      mapping, skb->len, skb, 1);
+       if (rc)
+               goto err;
+
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               frag = &skb_shinfo(skb)->frags[i];
+               if (!cdev->ll2->frags_mapped) {
+                       mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
+                                                  skb_frag_size(frag),
+                                                  DMA_TO_DEVICE);
+
+                       if (unlikely(dma_mapping_error(&cdev->pdev->dev,
+                                                      mapping))) {
+                               DP_NOTICE(cdev,
+                                         "Unable to map frag - dropping packet\n");
+                               goto err;
+                       }
+               } else {
+                       mapping = page_to_phys(skb_frag_page(frag)) |
+                           frag->page_offset;
+               }
+
+               rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
+                                                      cdev->ll2->handle,
+                                                      mapping,
+                                                      skb_frag_size(frag));
+
+               /* if failed not much to do here, partial packet has been posted
+                * we can't free memory, will need to wait for completion.
+                */
+               if (rc)
+                       goto err2;
+       }
+
+       return 0;
+
+err:
+       dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
+
+err2:
+       return rc;
+}
+
+static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
+{
+       if (!cdev->ll2)
+               return -EINVAL;
+
+       return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
+                                cdev->ll2->handle, stats);
+}
+
+const struct qed_ll2_ops qed_ll2_ops_pass = {
+       .start = &qed_ll2_start,
+       .stop = &qed_ll2_stop,
+       .start_xmit = &qed_ll2_start_xmit,
+       .register_cb_ops = &qed_ll2_register_cb_ops,
+       .get_stats = &qed_ll2_stats,
+};
+
+int qed_ll2_alloc_if(struct qed_dev *cdev)
+{
+       cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL);
+       return cdev->ll2 ? 0 : -ENOMEM;
+}
+
+void qed_ll2_dealloc_if(struct qed_dev *cdev)
+{
+       kfree(cdev->ll2);
+       cdev->ll2 = NULL;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
new file mode 100644 (file)
index 0000000..31a4090
--- /dev/null
@@ -0,0 +1,332 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _QED_LL2_H
+#define _QED_LL2_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_ll2_if.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_sp.h"
+
+#define QED_MAX_NUM_OF_LL2_CONNECTIONS                    (4)
+
+enum qed_ll2_roce_flavor_type {
+       QED_LL2_ROCE,
+       QED_LL2_RROCE,
+       MAX_QED_LL2_ROCE_FLAVOR_TYPE
+};
+
+enum qed_ll2_conn_type {
+       QED_LL2_TYPE_FCOE,
+       QED_LL2_TYPE_ISCSI,
+       QED_LL2_TYPE_TEST,
+       QED_LL2_TYPE_ISCSI_OOO,
+       QED_LL2_TYPE_RESERVED2,
+       QED_LL2_TYPE_ROCE,
+       QED_LL2_TYPE_RESERVED3,
+       MAX_QED_LL2_RX_CONN_TYPE
+};
+
+enum qed_ll2_tx_dest {
+       QED_LL2_TX_DEST_NW, /* Light L2 TX Destination to the Network */
+       QED_LL2_TX_DEST_LB, /* Light L2 TX Destination to the Loopback */
+       QED_LL2_TX_DEST_MAX
+};
+
+struct qed_ll2_rx_packet {
+       struct list_head list_entry;
+       struct core_rx_bd_with_buff_len *rxq_bd;
+       dma_addr_t rx_buf_addr;
+       u16 buf_length;
+       void *cookie;
+       u8 placement_offset;
+       u16 parse_flags;
+       u16 packet_length;
+       u16 vlan;
+       u32 opaque_data[2];
+};
+
+struct qed_ll2_tx_packet {
+       struct list_head list_entry;
+       u16 bd_used;
+       u16 vlan;
+       u16 l4_hdr_offset_w;
+       u8 bd_flags;
+       bool notify_fw;
+       void *cookie;
+
+       struct {
+               struct core_tx_bd *txq_bd;
+               dma_addr_t tx_frag;
+               u16 frag_len;
+       } bds_set[ETH_TX_MAX_BDS_PER_NON_LSO_PACKET];
+};
+
+struct qed_ll2_rx_queue {
+       /* Lock protecting the Rx queue manipulation */
+       spinlock_t lock;
+       struct qed_chain rxq_chain;
+       struct qed_chain rcq_chain;
+       u8 rx_sb_index;
+       bool b_cb_registred;
+       __le16 *p_fw_cons;
+       struct list_head active_descq;
+       struct list_head free_descq;
+       struct list_head posting_descq;
+       struct qed_ll2_rx_packet *descq_array;
+       void __iomem *set_prod_addr;
+};
+
+struct qed_ll2_tx_queue {
+       /* Lock protecting the Tx queue manipulation */
+       spinlock_t lock;
+       struct qed_chain txq_chain;
+       u8 tx_sb_index;
+       bool b_cb_registred;
+       __le16 *p_fw_cons;
+       struct list_head active_descq;
+       struct list_head free_descq;
+       struct list_head sending_descq;
+       struct qed_ll2_tx_packet *descq_array;
+       struct qed_ll2_tx_packet *cur_send_packet;
+       struct qed_ll2_tx_packet cur_completing_packet;
+       u16 cur_completing_bd_idx;
+       void __iomem *doorbell_addr;
+       u16 bds_idx;
+       u16 cur_send_frag_num;
+       u16 cur_completing_frag_num;
+       bool b_completing_packet;
+};
+
+struct qed_ll2_conn {
+       enum qed_ll2_conn_type conn_type;
+       u16 mtu;
+       u8 rx_drop_ttl0_flg;
+       u8 rx_vlan_removal_en;
+       u8 tx_tc;
+       enum core_tx_dest tx_dest;
+       enum core_error_handle ai_err_packet_too_big;
+       enum core_error_handle ai_err_no_buf;
+       u8 gsi_enable;
+};
+
+struct qed_ll2_info {
+       /* Lock protecting the state of LL2 */
+       struct mutex mutex;
+       struct qed_ll2_conn conn;
+       u32 cid;
+       u8 my_id;
+       u8 queue_id;
+       u8 tx_stats_id;
+       bool b_active;
+       u8 tx_stats_en;
+       struct qed_ll2_rx_queue rx_queue;
+       struct qed_ll2_tx_queue tx_queue;
+};
+
+/**
+ * @brief qed_ll2_acquire_connection - allocate resources,
+ *        starts rx & tx (if relevant) queues pair. Provides
+ *        connecion handler as output parameter.
+ *
+ * @param p_hwfn
+ * @param p_params             Contain various configuration properties
+ * @param rx_num_desc
+ * @param tx_num_desc
+ *
+ * @param p_connection_handle  Output container for LL2 connection's handle
+ *
+ * @return 0 on success, failure otherwise
+ */
+int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
+                              struct qed_ll2_conn *p_params,
+                              u16 rx_num_desc,
+                              u16 tx_num_desc,
+                              u8 *p_connection_handle);
+
+/**
+ * @brief qed_ll2_establish_connection - start previously
+ *        allocated LL2 queues pair
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param connection_handle    LL2 connection's handle obtained from
+ *                              qed_ll2_require_connection
+ *
+ * @return 0 on success, failure otherwise
+ */
+int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle);
+
+/**
+ * @brief qed_ll2_post_rx_buffers - submit buffers to LL2 Rx queue.
+ *
+ * @param p_hwfn
+ * @param connection_handle    LL2 connection's handle obtained from
+ *                             qed_ll2_require_connection
+ * @param addr                 rx (physical address) buffers to submit
+ * @param cookie
+ * @param notify_fw            produce corresponding Rx BD immediately
+ *
+ * @return 0 on success, failure otherwise
+ */
+int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
+                          u8 connection_handle,
+                          dma_addr_t addr,
+                          u16 buf_len, void *cookie, u8 notify_fw);
+
+/**
+ * @brief qed_ll2_prepare_tx_packet - request for start Tx BD
+ *                                   to prepare Tx packet submission to FW.
+ *
+ * @param p_hwfn
+ * @param connection_handle    LL2 connection's handle obtained from
+ *                             qed_ll2_require_connection
+ * @param num_of_bds           a number of requested BD equals a number of
+ *                             fragments in Tx packet
+ * @param vlan                 VLAN to insert to packet (if insertion set)
+ * @param bd_flags
+ * @param l4_hdr_offset_w      L4 Header Offset from start of packet
+ *                             (in words). This is needed if both l4_csum
+ *                             and ipv6_ext are set
+ * @param e_tx_dest             indicates if the packet is to be transmitted via
+ *                              loopback or to the network
+ * @param first_frag
+ * @param first_frag_len
+ * @param cookie
+ *
+ * @param notify_fw
+ *
+ * @return 0 on success, failure otherwise
+ */
+int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
+                             u8 connection_handle,
+                             u8 num_of_bds,
+                             u16 vlan,
+                             u8 bd_flags,
+                             u16 l4_hdr_offset_w,
+                             enum qed_ll2_tx_dest e_tx_dest,
+                             enum qed_ll2_roce_flavor_type qed_roce_flavor,
+                             dma_addr_t first_frag,
+                             u16 first_frag_len, void *cookie, u8 notify_fw);
+
+/**
+ * @brief qed_ll2_release_connection - releases resources
+ *                                     allocated for LL2 connection
+ *
+ * @param p_hwfn
+ * @param connection_handle            LL2 connection's handle obtained from
+ *                                     qed_ll2_require_connection
+ */
+void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle);
+
+/**
+ * @brief qed_ll2_set_fragment_of_tx_packet -  provides fragments to fill
+ *                                             Tx BD of BDs requested by
+ *                                             qed_ll2_prepare_tx_packet
+ *
+ * @param p_hwfn
+ * @param connection_handle                    LL2 connection's handle
+ *                                             obtained from
+ *                                             qed_ll2_require_connection
+ * @param addr
+ * @param nbytes
+ *
+ * @return 0 on success, failure otherwise
+ */
+int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
+                                     u8 connection_handle,
+                                     dma_addr_t addr, u16 nbytes);
+
+/**
+ * @brief qed_ll2_terminate_connection -       stops Tx/Rx queues
+ *
+ *
+ * @param p_hwfn
+ * @param connection_handle                    LL2 connection's handle
+ *                                             obtained from
+ *                                             qed_ll2_require_connection
+ *
+ * @return 0 on success, failure otherwise
+ */
+int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle);
+
+/**
+ * @brief qed_ll2_get_stats -  get LL2 queue's statistics
+ *
+ *
+ * @param p_hwfn
+ * @param connection_handle    LL2 connection's handle obtained from
+ *                             qed_ll2_require_connection
+ * @param p_stats
+ *
+ * @return 0 on success, failure otherwise
+ */
+int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
+                     u8 connection_handle, struct qed_ll2_stats *p_stats);
+
+/**
+ * @brief qed_ll2_alloc - Allocates LL2 connections set
+ *
+ * @param p_hwfn
+ *
+ * @return pointer to alocated qed_ll2_info or NULL
+ */
+struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ll2_setup - Inits LL2 connections set
+ *
+ * @param p_hwfn
+ * @param p_ll2_connections
+ *
+ */
+void qed_ll2_setup(struct qed_hwfn *p_hwfn,
+                  struct qed_ll2_info *p_ll2_connections);
+
+/**
+ * @brief qed_ll2_free - Releases LL2 connections set
+ *
+ * @param p_hwfn
+ * @param p_ll2_connections
+ *
+ */
+void qed_ll2_free(struct qed_hwfn *p_hwfn,
+                 struct qed_ll2_info *p_ll2_connections);
+#endif
index c7dc34bfdd0a6fe347fca4ca721ad11dc7867a3c..e0838a8190b26221970e8e60ddbcb3f511c456e4 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #include <linux/stddef.h>
 #include <linux/ethtool.h>
 #include <linux/etherdevice.h>
 #include <linux/vmalloc.h>
+#include <linux/crash_dump.h>
 #include <linux/qed/qed_if.h>
+#include <linux/qed/qed_ll2_if.h>
 
 #include "qed.h"
 #include "qed_sriov.h"
 #include "qed_sp.h"
 #include "qed_dev_api.h"
+#include "qed_ll2.h"
+#include "qed_fcoe.h"
+#include "qed_iscsi.h"
+
 #include "qed_mcp.h"
 #include "qed_hw.h"
 #include "qed_selftest.h"
+#include "qed_debug.h"
+
+#define QED_ROCE_QPS                   (8192)
+#define QED_ROCE_DPIS                  (8)
 
 static char version[] =
        "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
@@ -51,8 +85,6 @@ MODULE_FIRMWARE(QED_FW_FILE_NAME);
 
 static int __init qed_init(void)
 {
-       pr_notice("qed_init called\n");
-
        pr_info("%s", version);
 
        return 0;
@@ -106,8 +138,7 @@ static void qed_free_pci(struct qed_dev *cdev)
 /* Performs PCI initializations as well as initializing PCI-related parameters
  * in the device structrue. Returns 0 in case of success.
  */
-static int qed_init_pci(struct qed_dev *cdev,
-                       struct pci_dev *pdev)
+static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
 {
        u8 rev_id;
        int rc;
@@ -207,9 +238,10 @@ int qed_fill_dev_info(struct qed_dev *cdev,
        dev_info->pci_mem_start = cdev->pci_params.mem_start;
        dev_info->pci_mem_end = cdev->pci_params.mem_end;
        dev_info->pci_irq = cdev->pci_params.irq;
-       dev_info->rdma_supported =
-           (cdev->hwfns[0].hw_info.personality == QED_PCI_ETH_ROCE);
+       dev_info->rdma_supported = (cdev->hwfns[0].hw_info.personality ==
+                                   QED_PCI_ETH_ROCE);
        dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
+       dev_info->dev_type = cdev->type;
        ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
 
        if (IS_PF(cdev)) {
@@ -219,6 +251,10 @@ int qed_fill_dev_info(struct qed_dev *cdev,
                dev_info->fw_eng = FW_ENGINEERING_VERSION;
                dev_info->mf_mode = cdev->mf_mode;
                dev_info->tx_switching = true;
+
+               if (QED_LEADING_HWFN(cdev)->hw_info.b_wol_support ==
+                   QED_WOL_SUPPORT_PME)
+                       dev_info->wol_support = true;
        } else {
                qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
                                      &dev_info->fw_minor, &dev_info->fw_rev,
@@ -241,6 +277,8 @@ int qed_fill_dev_info(struct qed_dev *cdev,
                                    &dev_info->mfw_rev, NULL);
        }
 
+       dev_info->mtu = QED_LEADING_HWFN(cdev)->hw_info.mtu;
+
        return 0;
 }
 
@@ -263,8 +301,7 @@ static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
 }
 
 /* Sets the requested power state */
-static int qed_set_power_state(struct qed_dev *cdev,
-                              pci_power_t state)
+static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state)
 {
        if (!cdev)
                return -ENODEV;
@@ -366,8 +403,8 @@ static int qed_enable_msix(struct qed_dev *cdev,
                DP_NOTICE(cdev,
                          "Trying to enable MSI-X with less vectors (%d out of %d)\n",
                          cnt, int_params->in.num_vectors);
-               rc = pci_enable_msix_exact(cdev->pdev,
-                                          int_params->msix_table, cnt);
+               rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table,
+                                          cnt);
                if (!rc)
                        rc = cnt;
        }
@@ -439,6 +476,11 @@ static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
        }
 
 out:
+       if (!rc)
+               DP_INFO(cdev, "Using %s interrupts\n",
+                       int_params->out.int_mode == QED_INT_MODE_INTA ?
+                       "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ?
+                       "MSI" : "MSIX");
        cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
 
        return rc;
@@ -514,19 +556,18 @@ static irqreturn_t qed_single_int(int irq, void *dev_instance)
 int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
 {
        struct qed_dev *cdev = hwfn->cdev;
+       u32 int_mode;
        int rc = 0;
        u8 id;
 
-       if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+       int_mode = cdev->int_params.out.int_mode;
+       if (int_mode == QED_INT_MODE_MSIX) {
                id = hwfn->my_id;
                snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
                         id, cdev->pdev->bus->number,
                         PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
                rc = request_irq(cdev->int_params.msix_table[id].vector,
                                 qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc);
-               if (!rc)
-                       DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
-                                  "Requested slowpath MSI-X\n");
        } else {
                unsigned long flags = 0;
 
@@ -541,9 +582,29 @@ int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
                                 flags, cdev->name, cdev);
        }
 
+       if (rc)
+               DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc);
+       else
+               DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
+                          "Requested slowpath %s\n",
+                          (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ");
+
        return rc;
 }
 
+void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn)
+{
+       struct qed_dev *cdev = p_hwfn->cdev;
+       u8 id = p_hwfn->my_id;
+       u32 int_mode;
+
+       int_mode = cdev->int_params.out.int_mode;
+       if (int_mode == QED_INT_MODE_MSIX)
+               synchronize_irq(cdev->int_params.msix_table[id].vector);
+       else
+               synchronize_irq(cdev->pdev->irq);
+}
+
 static void qed_slowpath_irq_free(struct qed_dev *cdev)
 {
        int i;
@@ -581,25 +642,23 @@ static int qed_nic_stop(struct qed_dev *cdev)
                }
        }
 
+       qed_dbg_pf_exit(cdev);
+
        return rc;
 }
 
-static int qed_nic_reset(struct qed_dev *cdev)
+static int qed_nic_setup(struct qed_dev *cdev)
 {
-       int rc;
-
-       rc = qed_hw_reset(cdev);
-       if (rc)
-               return rc;
-
-       qed_resc_free(cdev);
+       int rc, i;
 
-       return 0;
-}
+       /* Determine if interface is going to require LL2 */
+       if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) {
+               for (i = 0; i < cdev->num_hwfns; i++) {
+                       struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 
-static int qed_nic_setup(struct qed_dev *cdev)
-{
-       int rc;
+                       p_hwfn->using_ll2 = true;
+               }
+       }
 
        rc = qed_resc_alloc(cdev);
        if (rc)
@@ -657,6 +716,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
                                  enum qed_int_mode int_mode)
 {
        struct qed_sb_cnt_info sb_cnt_info;
+       int num_l2_queues = 0;
        int rc;
        int i;
 
@@ -687,6 +747,32 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
        cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
                                       cdev->num_hwfns;
 
+       if (!IS_ENABLED(CONFIG_QED_RDMA) ||
+           QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH_ROCE)
+               return 0;
+
+       for_each_hwfn(cdev, i)
+               num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
+
+       DP_VERBOSE(cdev, QED_MSG_RDMA,
+                  "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n",
+                  cdev->int_params.fp_msix_cnt, num_l2_queues);
+
+       if (cdev->int_params.fp_msix_cnt > num_l2_queues) {
+               cdev->int_params.rdma_msix_cnt =
+                       (cdev->int_params.fp_msix_cnt - num_l2_queues)
+                       / cdev->num_hwfns;
+               cdev->int_params.rdma_msix_base =
+                       cdev->int_params.fp_msix_base + num_l2_queues;
+               cdev->int_params.fp_msix_cnt = num_l2_queues;
+       } else {
+               cdev->int_params.rdma_msix_cnt = 0;
+       }
+
+       DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
+                  cdev->int_params.rdma_msix_cnt,
+                  cdev->int_params.rdma_msix_base);
+
        return 0;
 }
 
@@ -790,6 +876,27 @@ static void qed_update_pf_params(struct qed_dev *cdev,
 {
        int i;
 
+       if (IS_ENABLED(CONFIG_QED_RDMA)) {
+               params->rdma_pf_params.num_qps = QED_ROCE_QPS;
+               params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
+               /* divide by 3 the MRs to avoid MF ILT overflow */
+               params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
+       }
+
+       if (cdev->num_hwfns > 1 || IS_VF(cdev))
+               params->eth_pf_params.num_arfs_filters = 0;
+
+       /* In case we might support RDMA, don't allow qede to be greedy
+        * with the L2 contexts. Allow for 64 queues [rx, tx, xdp] per hwfn.
+        */
+       if (QED_LEADING_HWFN(cdev)->hw_info.personality ==
+           QED_PCI_ETH_ROCE) {
+               u16 *num_cons;
+
+               num_cons = &params->eth_pf_params.num_cons;
+               *num_cons = min_t(u16, *num_cons, 192);
+       }
+
        for (i = 0; i < cdev->num_hwfns; i++) {
                struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 
@@ -800,10 +907,13 @@ static void qed_update_pf_params(struct qed_dev *cdev,
 static int qed_slowpath_start(struct qed_dev *cdev,
                              struct qed_slowpath_params *params)
 {
+       struct qed_drv_load_params drv_load_params;
+       struct qed_hw_init_params hw_init_params;
        struct qed_tunn_start_params tunn_info;
        struct qed_mcp_drv_version drv_version;
        const u8 *data = NULL;
        struct qed_hwfn *hwfn;
+       struct qed_ptt *p_ptt;
        int rc = -EINVAL;
 
        if (qed_iov_wq_start(cdev))
@@ -818,8 +928,29 @@ static int qed_slowpath_start(struct qed_dev *cdev,
                                  QED_FW_FILE_NAME);
                        goto err;
                }
+
+#ifdef CONFIG_RFS_ACCEL
+               if (cdev->num_hwfns == 1) {
+                       p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+                       if (p_ptt) {
+                               QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt;
+                       } else {
+                               DP_NOTICE(cdev,
+                                         "Failed to acquire PTT for aRFS\n");
+                               goto err;
+                       }
+               }
+#endif
+               p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+               if (p_ptt) {
+                       QED_LEADING_HWFN(cdev)->p_ptp_ptt = p_ptt;
+               } else {
+                       DP_NOTICE(cdev, "Failed to acquire PTT for PTP\n");
+                       goto err;
+               }
        }
 
+       cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
        rc = qed_nic_setup(cdev);
        if (rc)
                goto err;
@@ -834,13 +965,13 @@ static int qed_slowpath_start(struct qed_dev *cdev,
        if (IS_PF(cdev)) {
                /* Allocate stream for unzipping */
                rc = qed_alloc_stream_mem(cdev);
-               if (rc) {
-                       DP_NOTICE(cdev, "Failed to allocate stream memory\n");
+               if (rc)
                        goto err2;
-               }
 
                /* First Dword used to diffrentiate between various sources */
                data = cdev->firmware->data + sizeof(u32);
+
+               qed_dbg_pf_init(cdev);
        }
 
        memset(&tunn_info, 0, sizeof(tunn_info));
@@ -855,15 +986,33 @@ static int qed_slowpath_start(struct qed_dev *cdev,
        tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN;
 
        /* Start the slowpath */
-       rc = qed_hw_init(cdev, &tunn_info, true,
-                        cdev->int_params.out.int_mode,
-                        true, data);
+       memset(&hw_init_params, 0, sizeof(hw_init_params));
+       hw_init_params.p_tunn = &tunn_info;
+       hw_init_params.b_hw_start = true;
+       hw_init_params.int_mode = cdev->int_params.out.int_mode;
+       hw_init_params.allow_npar_tx_switch = true;
+       hw_init_params.bin_fw_data = data;
+
+       memset(&drv_load_params, 0, sizeof(drv_load_params));
+       drv_load_params.is_crash_kernel = is_kdump_kernel();
+       drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT;
+       drv_load_params.avoid_eng_reset = false;
+       drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE;
+       hw_init_params.p_drv_load_params = &drv_load_params;
+
+       rc = qed_hw_init(cdev, &hw_init_params);
        if (rc)
                goto err2;
 
        DP_INFO(cdev,
                "HW initialization and function start completed successfully\n");
 
+       /* Allocate LL2 interface if needed */
+       if (QED_LEADING_HWFN(cdev)->using_ll2) {
+               rc = qed_ll2_alloc_if(cdev);
+               if (rc)
+                       goto err3;
+       }
        if (IS_PF(cdev)) {
                hwfn = QED_LEADING_HWFN(cdev);
                drv_version.version = (params->drv_major << 24) |
@@ -884,6 +1033,8 @@ static int qed_slowpath_start(struct qed_dev *cdev,
 
        return 0;
 
+err3:
+       qed_hw_stop(cdev);
 err2:
        qed_hw_timers_stop_all(cdev);
        if (IS_PF(cdev))
@@ -896,6 +1047,16 @@ err:
        if (IS_PF(cdev))
                release_firmware(cdev->firmware);
 
+#ifdef CONFIG_RFS_ACCEL
+       if (IS_PF(cdev) && (cdev->num_hwfns == 1) &&
+           QED_LEADING_HWFN(cdev)->p_arfs_ptt)
+               qed_ptt_release(QED_LEADING_HWFN(cdev),
+                               QED_LEADING_HWFN(cdev)->p_arfs_ptt);
+#endif
+       if (IS_PF(cdev) && QED_LEADING_HWFN(cdev)->p_ptp_ptt)
+               qed_ptt_release(QED_LEADING_HWFN(cdev),
+                               QED_LEADING_HWFN(cdev)->p_ptp_ptt);
+
        qed_iov_wq_stop(cdev, false);
 
        return rc;
@@ -906,7 +1067,16 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
        if (!cdev)
                return -ENODEV;
 
+       qed_ll2_dealloc_if(cdev);
+
        if (IS_PF(cdev)) {
+#ifdef CONFIG_RFS_ACCEL
+               if (cdev->num_hwfns == 1)
+                       qed_ptt_release(QED_LEADING_HWFN(cdev),
+                                       QED_LEADING_HWFN(cdev)->p_arfs_ptt);
+#endif
+               qed_ptt_release(QED_LEADING_HWFN(cdev),
+                               QED_LEADING_HWFN(cdev)->p_ptp_ptt);
                qed_free_stream_mem(cdev);
                if (IS_QED_ETH_IF(cdev))
                        qed_sriov_disable(cdev, true);
@@ -916,7 +1086,8 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
        }
 
        qed_disable_msix(cdev);
-       qed_nic_reset(cdev);
+
+       qed_resc_free(cdev);
 
        qed_iov_wq_stop(cdev, true);
 
@@ -946,6 +1117,7 @@ static u32 qed_sb_init(struct qed_dev *cdev,
                       enum qed_sb_type type)
 {
        struct qed_hwfn *p_hwfn;
+       struct qed_ptt *p_ptt;
        int hwfn_index;
        u16 rel_sb_id;
        u8 n_hwfns;
@@ -967,15 +1139,24 @@ static u32 qed_sb_init(struct qed_dev *cdev,
                   "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
                   hwfn_index, rel_sb_id, sb_id);
 
-       rc = qed_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
-                            sb_virt_addr, sb_phy_addr, rel_sb_id);
+       if (IS_PF(p_hwfn->cdev)) {
+               p_ptt = qed_ptt_acquire(p_hwfn);
+               if (!p_ptt)
+                       return -EBUSY;
+
+               rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr,
+                                    sb_phy_addr, rel_sb_id);
+               qed_ptt_release(p_hwfn, p_ptt);
+       } else {
+               rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr,
+                                    sb_phy_addr, rel_sb_id);
+       }
 
        return rc;
 }
 
 static u32 qed_sb_release(struct qed_dev *cdev,
-                         struct qed_sb_info *sb_info,
-                         u16 sb_id)
+                         struct qed_sb_info *sb_info, u16 sb_id)
 {
        struct qed_hwfn *p_hwfn;
        int hwfn_index;
@@ -1010,12 +1191,18 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
        if (!cdev)
                return -ENODEV;
 
-       if (IS_VF(cdev))
-               return 0;
-
        /* The link should be set only once per PF */
        hwfn = &cdev->hwfns[0];
 
+       /* When VF wants to set link, force it to read the bulletin instead.
+        * This mimics the PF behavior, where a noitification [both immediate
+        * and possible later] would be generated when changing properties.
+        */
+       if (IS_VF(cdev)) {
+               qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG);
+               return 0;
+       }
+
        ptt = qed_ptt_acquire(hwfn);
        if (!ptt)
                return -EBUSY;
@@ -1358,11 +1545,106 @@ static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
        return status;
 }
 
-struct qed_selftest_ops qed_selftest_ops_pass = {
+static int qed_update_wol(struct qed_dev *cdev, bool enabled)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_ptt *ptt;
+       int rc = 0;
+
+       if (IS_VF(cdev))
+               return 0;
+
+       ptt = qed_ptt_acquire(hwfn);
+       if (!ptt)
+               return -EAGAIN;
+
+       rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED
+                                  : QED_OV_WOL_DISABLED);
+       if (rc)
+               goto out;
+       rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
+
+out:
+       qed_ptt_release(hwfn, ptt);
+       return rc;
+}
+
+static int qed_update_drv_state(struct qed_dev *cdev, bool active)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_ptt *ptt;
+       int status = 0;
+
+       if (IS_VF(cdev))
+               return 0;
+
+       ptt = qed_ptt_acquire(hwfn);
+       if (!ptt)
+               return -EAGAIN;
+
+       status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ?
+                                               QED_OV_DRIVER_STATE_ACTIVE :
+                                               QED_OV_DRIVER_STATE_DISABLED);
+
+       qed_ptt_release(hwfn, ptt);
+
+       return status;
+}
+
+static int qed_update_mac(struct qed_dev *cdev, u8 *mac)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_ptt *ptt;
+       int status = 0;
+
+       if (IS_VF(cdev))
+               return 0;
+
+       ptt = qed_ptt_acquire(hwfn);
+       if (!ptt)
+               return -EAGAIN;
+
+       status = qed_mcp_ov_update_mac(hwfn, ptt, mac);
+       if (status)
+               goto out;
+
+       status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
+
+out:
+       qed_ptt_release(hwfn, ptt);
+       return status;
+}
+
+static int qed_update_mtu(struct qed_dev *cdev, u16 mtu)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_ptt *ptt;
+       int status = 0;
+
+       if (IS_VF(cdev))
+               return 0;
+
+       ptt = qed_ptt_acquire(hwfn);
+       if (!ptt)
+               return -EAGAIN;
+
+       status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu);
+       if (status)
+               goto out;
+
+       status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
+
+out:
+       qed_ptt_release(hwfn, ptt);
+       return status;
+}
+
+static struct qed_selftest_ops qed_selftest_ops_pass = {
        .selftest_memory = &qed_selftest_memory,
        .selftest_interrupt = &qed_selftest_interrupt,
        .selftest_register = &qed_selftest_register,
        .selftest_clock = &qed_selftest_clock,
+       .selftest_nvram = &qed_selftest_nvram,
 };
 
 const struct qed_common_ops qed_common_ops_pass = {
@@ -1380,14 +1662,51 @@ const struct qed_common_ops qed_common_ops_pass = {
        .sb_release = &qed_sb_release,
        .simd_handler_config = &qed_simd_handler_config,
        .simd_handler_clean = &qed_simd_handler_clean,
+       .dbg_grc = &qed_dbg_grc,
+       .dbg_grc_size = &qed_dbg_grc_size,
        .can_link_change = &qed_can_link_change,
        .set_link = &qed_set_link,
        .get_link = &qed_get_current_link,
        .drain = &qed_drain,
        .update_msglvl = &qed_init_dp,
+       .dbg_all_data = &qed_dbg_all_data,
+       .dbg_all_data_size = &qed_dbg_all_data_size,
        .chain_alloc = &qed_chain_alloc,
        .chain_free = &qed_chain_free,
        .get_coalesce = &qed_get_coalesce,
        .set_coalesce = &qed_set_coalesce,
        .set_led = &qed_set_led,
+       .update_drv_state = &qed_update_drv_state,
+       .update_mac = &qed_update_mac,
+       .update_mtu = &qed_update_mtu,
+       .update_wol = &qed_update_wol,
 };
+
+void qed_get_protocol_stats(struct qed_dev *cdev,
+                           enum qed_mcp_protocol_type type,
+                           union qed_mcp_protocol_stats *stats)
+{
+       struct qed_eth_stats eth_stats;
+
+       memset(stats, 0, sizeof(*stats));
+
+       switch (type) {
+       case QED_MCP_LAN_STATS:
+               qed_get_vport_stats(cdev, &eth_stats);
+               stats->lan_stats.ucast_rx_pkts =
+                                       eth_stats.common.rx_ucast_pkts;
+               stats->lan_stats.ucast_tx_pkts =
+                                       eth_stats.common.tx_ucast_pkts;
+               stats->lan_stats.fcs_err = -1;
+               break;
+       case QED_MCP_FCOE_STATS:
+               qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats);
+               break;
+       case QED_MCP_ISCSI_STATS:
+               qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
+               break;
+       default:
+               DP_ERR(cdev, "Invalid protocol type = %d\n", type);
+               return;
+       }
+}
index f776a77794c5154b1fab58f49aa6f1bcad0031ee..26d9a9e99babd304bcb8b9b02921f38182a7f6ec 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #include <linux/types.h>
@@ -14,6 +38,7 @@
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/string.h>
+#include <linux/etherdevice.h>
 #include "qed.h"
 #include "qed_dcbx.h"
 #include "qed_hsi.h"
@@ -54,8 +79,7 @@ bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
        return true;
 }
 
-void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
-                          struct qed_ptt *p_ptt)
+void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
        u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
                                        PUBLIC_PORT);
@@ -68,8 +92,7 @@ void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
                   p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
 }
 
-void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
-                    struct qed_ptt *p_ptt)
+void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
        u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
        u32 tmp, i;
@@ -88,19 +111,77 @@ void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
        }
 }
 
+struct qed_mcp_cmd_elem {
+       struct list_head list;
+       struct qed_mcp_mb_params *p_mb_params;
+       u16 expected_seq_num;
+       bool b_is_completed;
+};
+
+/* Must be called while cmd_lock is acquired */
+static struct qed_mcp_cmd_elem *
+qed_mcp_cmd_add_elem(struct qed_hwfn *p_hwfn,
+                    struct qed_mcp_mb_params *p_mb_params,
+                    u16 expected_seq_num)
+{
+       struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
+
+       p_cmd_elem = kzalloc(sizeof(*p_cmd_elem), GFP_ATOMIC);
+       if (!p_cmd_elem)
+               goto out;
+
+       p_cmd_elem->p_mb_params = p_mb_params;
+       p_cmd_elem->expected_seq_num = expected_seq_num;
+       list_add(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
+out:
+       return p_cmd_elem;
+}
+
+/* Must be called while cmd_lock is acquired */
+static void qed_mcp_cmd_del_elem(struct qed_hwfn *p_hwfn,
+                                struct qed_mcp_cmd_elem *p_cmd_elem)
+{
+       list_del(&p_cmd_elem->list);
+       kfree(p_cmd_elem);
+}
+
+/* Must be called while cmd_lock is acquired */
+static struct qed_mcp_cmd_elem *qed_mcp_cmd_get_elem(struct qed_hwfn *p_hwfn,
+                                                    u16 seq_num)
+{
+       struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
+
+       list_for_each_entry(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list) {
+               if (p_cmd_elem->expected_seq_num == seq_num)
+                       return p_cmd_elem;
+       }
+
+       return NULL;
+}
+
 int qed_mcp_free(struct qed_hwfn *p_hwfn)
 {
        if (p_hwfn->mcp_info) {
+               struct qed_mcp_cmd_elem *p_cmd_elem, *p_tmp;
+
                kfree(p_hwfn->mcp_info->mfw_mb_cur);
                kfree(p_hwfn->mcp_info->mfw_mb_shadow);
+
+               spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
+               list_for_each_entry_safe(p_cmd_elem,
+                                        p_tmp,
+                                        &p_hwfn->mcp_info->cmd_list, list) {
+                       qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
+               }
+               spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
        }
+
        kfree(p_hwfn->mcp_info);
 
        return 0;
 }
 
-static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn,
-                               struct qed_ptt *p_ptt)
+static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
        struct qed_mcp_info *p_info = p_hwfn->mcp_info;
        u32 drv_mb_offsize, mfw_mb_offsize;
@@ -138,13 +219,12 @@ static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn,
        p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
                                DRV_PULSE_SEQ_MASK;
 
-       p_info->mcp_hist = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
+       p_info->mcp_hist = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
 
        return 0;
 }
 
-int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
-                    struct qed_ptt *p_ptt)
+int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
        struct qed_mcp_info *p_info;
        u32 size;
@@ -155,6 +235,12 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
                goto err;
        p_info = p_hwfn->mcp_info;
 
+       /* Initialize the MFW spinlock */
+       spin_lock_init(&p_info->cmd_lock);
+       spin_lock_init(&p_info->link_lock);
+
+       INIT_LIST_HEAD(&p_info->cmd_list);
+
        if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
                DP_NOTICE(p_hwfn, "MCP is not initialized\n");
                /* Do not free mcp_info here, since public_base indicate that
@@ -165,88 +251,50 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
 
        size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
        p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
-       p_info->mfw_mb_shadow =
-               kzalloc(sizeof(u32) * MFW_DRV_MSG_MAX_DWORDS(
-                               p_info->mfw_mb_length), GFP_KERNEL);
+       p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
        if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
                goto err;
 
-       /* Initialize the MFW spinlock */
-       spin_lock_init(&p_info->lock);
-
        return 0;
 
 err:
-       DP_NOTICE(p_hwfn, "Failed to allocate mcp memory\n");
        qed_mcp_free(p_hwfn);
        return -ENOMEM;
 }
 
-/* Locks the MFW mailbox of a PF to ensure a single access.
- * The lock is achieved in most cases by holding a spinlock, causing other
- * threads to wait till a previous access is done.
- * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
- * access is achieved by setting a blocking flag, which will fail other
- * competing contexts to send their mailboxes.
- */
-static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn,
-                          u32 cmd)
-{
-       spin_lock_bh(&p_hwfn->mcp_info->lock);
-
-       /* The spinlock shouldn't be acquired when the mailbox command is
-        * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
-        * pending [UN]LOAD_REQ command of another PF together with a spinlock
-        * (i.e. interrupts are disabled) - can lead to a deadlock.
-        * It is assumed that for a single PF, no other mailbox commands can be
-        * sent from another context while sending LOAD_REQ, and that any
-        * parallel commands to UNLOAD_REQ can be cancelled.
-        */
-       if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
-               p_hwfn->mcp_info->block_mb_sending = false;
+static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn,
+                                  struct qed_ptt *p_ptt)
+{
+       u32 generic_por_0 = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
 
-       if (p_hwfn->mcp_info->block_mb_sending) {
-               DP_NOTICE(p_hwfn,
-                         "Trying to send a MFW mailbox command [0x%x] in parallel to [UN]LOAD_REQ. Aborting.\n",
-                         cmd);
-               spin_unlock_bh(&p_hwfn->mcp_info->lock);
-               return -EBUSY;
-       }
+       /* Use MCP history register to check if MCP reset occurred between init
+        * time and now.
+        */
+       if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
+               DP_VERBOSE(p_hwfn,
+                          QED_MSG_SP,
+                          "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
+                          p_hwfn->mcp_info->mcp_hist, generic_por_0);
 
-       if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
-               p_hwfn->mcp_info->block_mb_sending = true;
-               spin_unlock_bh(&p_hwfn->mcp_info->lock);
+               qed_load_mcp_offsets(p_hwfn, p_ptt);
+               qed_mcp_cmd_port_init(p_hwfn, p_ptt);
        }
-
-       return 0;
-}
-
-static void qed_mcp_mb_unlock(struct qed_hwfn  *p_hwfn,
-                             u32               cmd)
-{
-       if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
-               spin_unlock_bh(&p_hwfn->mcp_info->lock);
 }
 
-int qed_mcp_reset(struct qed_hwfn *p_hwfn,
-                 struct qed_ptt *p_ptt)
+int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-       u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
-       u8 delay = CHIP_MCP_RESP_ITER_US;
-       u32 org_mcp_reset_seq, cnt = 0;
+       u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
        int rc = 0;
 
-       /* Ensure that only a single thread is accessing the mailbox at a
-        * certain time.
-        */
-       rc = qed_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
-       if (rc != 0)
-               return rc;
+       /* Ensure that only a single thread is accessing the mailbox */
+       spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
 
-       /* Set drv command along with the updated sequence */
        org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
-       DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header,
-                 (DRV_MSG_CODE_MCP_RESET | seq));
+
+       /* Set drv command along with the updated sequence */
+       qed_mcp_reread_offsets(p_hwfn, p_ptt);
+       seq = ++p_hwfn->mcp_info->drv_mb_seq;
+       DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
 
        do {
                /* Wait for MFW response */
@@ -265,71 +313,207 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn,
                rc = -EAGAIN;
        }
 
-       qed_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
+       spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
 
        return rc;
 }
 
-static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn,
-                         struct qed_ptt *p_ptt,
-                         u32 cmd,
-                         u32 param,
-                         u32 *o_mcp_resp,
-                         u32 *o_mcp_param)
+/* Must be called while cmd_lock is acquired */
+static bool qed_mcp_has_pending_cmd(struct qed_hwfn *p_hwfn)
 {
-       u8 delay = CHIP_MCP_RESP_ITER_US;
-       u32 seq, cnt = 1, actual_mb_seq;
-       int rc = 0;
+       struct qed_mcp_cmd_elem *p_cmd_elem;
 
-       /* Get actual driver mailbox sequence */
-       actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
-                       DRV_MSG_SEQ_NUMBER_MASK;
-
-       /* Use MCP history register to check if MCP reset occurred between
-        * init time and now.
+       /* There is at most one pending command at a certain time, and if it
+        * exists - it is placed at the HEAD of the list.
         */
-       if (p_hwfn->mcp_info->mcp_hist !=
-           qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
-               DP_VERBOSE(p_hwfn, QED_MSG_SP, "Rereading MCP offsets\n");
-               qed_load_mcp_offsets(p_hwfn, p_ptt);
-               qed_mcp_cmd_port_init(p_hwfn, p_ptt);
+       if (!list_empty(&p_hwfn->mcp_info->cmd_list)) {
+               p_cmd_elem = list_first_entry(&p_hwfn->mcp_info->cmd_list,
+                                             struct qed_mcp_cmd_elem, list);
+               return !p_cmd_elem->b_is_completed;
        }
-       seq = ++p_hwfn->mcp_info->drv_mb_seq;
 
-       /* Set drv param */
-       DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
+       return false;
+}
 
-       /* Set drv command along with the updated sequence */
-       DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
+/* Must be called while cmd_lock is acquired */
+static int
+qed_mcp_update_pending_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+       struct qed_mcp_mb_params *p_mb_params;
+       struct qed_mcp_cmd_elem *p_cmd_elem;
+       u32 mcp_resp;
+       u16 seq_num;
+
+       mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
+       seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
+
+       /* Return if no new non-handled response has been received */
+       if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
+               return -EAGAIN;
+
+       p_cmd_elem = qed_mcp_cmd_get_elem(p_hwfn, seq_num);
+       if (!p_cmd_elem) {
+               DP_ERR(p_hwfn,
+                      "Failed to find a pending mailbox cmd that expects sequence number %d\n",
+                      seq_num);
+               return -EINVAL;
+       }
+
+       p_mb_params = p_cmd_elem->p_mb_params;
+
+       /* Get the MFW response along with the sequence number */
+       p_mb_params->mcp_resp = mcp_resp;
+
+       /* Get the MFW param */
+       p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
+
+       /* Get the union data */
+       if (p_mb_params->p_data_dst != NULL && p_mb_params->data_dst_size) {
+               u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
+                                     offsetof(struct public_drv_mb,
+                                              union_data);
+               qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
+                               union_data_addr, p_mb_params->data_dst_size);
+       }
+
+       p_cmd_elem->b_is_completed = true;
+
+       return 0;
+}
+
+/* Must be called while cmd_lock is acquired */
+static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
+                                   struct qed_ptt *p_ptt,
+                                   struct qed_mcp_mb_params *p_mb_params,
+                                   u16 seq_num)
+{
+       union drv_union_data union_data;
+       u32 union_data_addr;
+
+       /* Set the union data */
+       union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
+                         offsetof(struct public_drv_mb, union_data);
+       memset(&union_data, 0, sizeof(union_data));
+       if (p_mb_params->p_data_src != NULL && p_mb_params->data_src_size)
+               memcpy(&union_data, p_mb_params->p_data_src,
+                      p_mb_params->data_src_size);
+       qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
+                     sizeof(union_data));
+
+       /* Set the drv param */
+       DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
+
+       /* Set the drv command along with the sequence number */
+       DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
 
        DP_VERBOSE(p_hwfn, QED_MSG_SP,
-                  "wrote command (%x) to MFW MB param 0x%08x\n",
-                  (cmd | seq), param);
+                  "MFW mailbox: command 0x%08x param 0x%08x\n",
+                  (p_mb_params->cmd | seq_num), p_mb_params->param);
+}
+
+static int
+_qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
+                      struct qed_ptt *p_ptt,
+                      struct qed_mcp_mb_params *p_mb_params,
+                      u32 max_retries, u32 delay)
+{
+       struct qed_mcp_cmd_elem *p_cmd_elem;
+       u32 cnt = 0;
+       u16 seq_num;
+       int rc = 0;
 
+       /* Wait until the mailbox is non-occupied */
        do {
-               /* Wait for MFW response */
+               /* Exit the loop if there is no pending command, or if the
+                * pending command is completed during this iteration.
+                * The spinlock stays locked until the command is sent.
+                */
+
+               spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
+
+               if (!qed_mcp_has_pending_cmd(p_hwfn))
+                       break;
+
+               rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
+               if (!rc)
+                       break;
+               else if (rc != -EAGAIN)
+                       goto err;
+
+               spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
                udelay(delay);
-               *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
+       } while (++cnt < max_retries);
 
-               /* Give the FW up to 5 second (500*10ms) */
-       } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
-                (cnt++ < QED_DRV_MB_MAX_RETRIES));
+       if (cnt >= max_retries) {
+               DP_NOTICE(p_hwfn,
+                         "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
+                         p_mb_params->cmd, p_mb_params->param);
+               return -EAGAIN;
+       }
 
-       DP_VERBOSE(p_hwfn, QED_MSG_SP,
-                  "[after %d ms] read (%x) seq is (%x) from FW MB\n",
-                  cnt * delay, *o_mcp_resp, seq);
-
-       /* Is this a reply to our command? */
-       if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
-               *o_mcp_resp &= FW_MSG_CODE_MASK;
-               /* Get the MCP param */
-               *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
-       } else {
-               /* FW BUG! */
-               DP_ERR(p_hwfn, "MFW failed to respond!\n");
-               *o_mcp_resp = 0;
-               rc = -EAGAIN;
+       /* Send the mailbox command */
+       qed_mcp_reread_offsets(p_hwfn, p_ptt);
+       seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
+       p_cmd_elem = qed_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
+       if (!p_cmd_elem) {
+               rc = -ENOMEM;
+               goto err;
+       }
+
+       __qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
+       spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
+
+       /* Wait for the MFW response */
+       do {
+               /* Exit the loop if the command is already completed, or if the
+                * command is completed during this iteration.
+                * The spinlock stays locked until the list element is removed.
+                */
+
+               udelay(delay);
+               spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
+
+               if (p_cmd_elem->b_is_completed)
+                       break;
+
+               rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
+               if (!rc)
+                       break;
+               else if (rc != -EAGAIN)
+                       goto err;
+
+               spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
+       } while (++cnt < max_retries);
+
+       if (cnt >= max_retries) {
+               DP_NOTICE(p_hwfn,
+                         "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
+                         p_mb_params->cmd, p_mb_params->param);
+
+               spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
+               qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
+               spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
+
+               return -EAGAIN;
        }
+
+       qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
+       spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
+
+       DP_VERBOSE(p_hwfn,
+                  QED_MSG_SP,
+                  "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
+                  p_mb_params->mcp_resp,
+                  p_mb_params->mcp_param,
+                  (cnt * delay) / 1000, (cnt * delay) % 1000);
+
+       /* Clear the sequence number from the MFW response */
+       p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
+
+       return 0;
+
+err:
+       spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
        return rc;
 }
 
@@ -337,42 +521,27 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
                                 struct qed_ptt *p_ptt,
                                 struct qed_mcp_mb_params *p_mb_params)
 {
-       u32 union_data_addr;
-       int rc;
+       size_t union_data_size = sizeof(union drv_union_data);
+       u32 max_retries = QED_DRV_MB_MAX_RETRIES;
+       u32 delay = CHIP_MCP_RESP_ITER_US;
 
        /* MCP not initialized */
        if (!qed_mcp_is_init(p_hwfn)) {
-               DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+               DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
                return -EBUSY;
        }
 
-       union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
-                         offsetof(struct public_drv_mb, union_data);
-
-       /* Ensure that only a single thread is accessing the mailbox at a
-        * certain time.
-        */
-       rc = qed_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
-       if (rc)
-               return rc;
-
-       if (p_mb_params->p_data_src != NULL)
-               qed_memcpy_to(p_hwfn, p_ptt, union_data_addr,
-                             p_mb_params->p_data_src,
-                             sizeof(*p_mb_params->p_data_src));
-
-       rc = qed_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
-                           p_mb_params->param, &p_mb_params->mcp_resp,
-                           &p_mb_params->mcp_param);
-
-       if (p_mb_params->p_data_dst != NULL)
-               qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
-                               union_data_addr,
-                               sizeof(*p_mb_params->p_data_dst));
-
-       qed_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
+       if (p_mb_params->data_src_size > union_data_size ||
+           p_mb_params->data_dst_size > union_data_size) {
+               DP_ERR(p_hwfn,
+                      "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
+                      p_mb_params->data_src_size,
+                      p_mb_params->data_dst_size, union_data_size);
+               return -EINVAL;
+       }
 
-       return rc;
+       return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
+                                     delay);
 }
 
 int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
@@ -388,6 +557,7 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
        memset(&mb_params, 0, sizeof(mb_params));
        mb_params.cmd = cmd;
        mb_params.param = param;
+
        rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
        if (rc)
                return rc;
@@ -398,171 +568,559 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
        return 0;
 }
 
-int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
-                    struct qed_ptt *p_ptt,
-                    u32 *p_load_code)
+int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
+                      struct qed_ptt *p_ptt,
+                      u32 cmd,
+                      u32 param,
+                      u32 *o_mcp_resp,
+                      u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf)
 {
-       struct qed_dev *cdev = p_hwfn->cdev;
        struct qed_mcp_mb_params mb_params;
-       union drv_union_data union_data;
+       u8 raw_data[MCP_DRV_NVM_BUF_LEN];
        int rc;
 
        memset(&mb_params, 0, sizeof(mb_params));
-       /* Load Request */
-       mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
-       mb_params.param = PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
-                         cdev->drv_type;
-       memcpy(&union_data.ver_str, cdev->ver_str, MCP_DRV_VER_STR_SIZE);
-       mb_params.p_data_src = &union_data;
-       rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+       mb_params.cmd = cmd;
+       mb_params.param = param;
+       mb_params.p_data_dst = raw_data;
 
-       /* if mcp fails to respond we must abort */
-       if (rc) {
-               DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+       /* Use the maximal value since the actual one is part of the response */
+       mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
+
+       rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+       if (rc)
                return rc;
-       }
 
-       *p_load_code = mb_params.mcp_resp;
+       *o_mcp_resp = mb_params.mcp_resp;
+       *o_mcp_param = mb_params.mcp_param;
 
-       /* If MFW refused (e.g. other port is in diagnostic mode) we
-        * must abort. This can happen in the following cases:
-        * - Other port is in diagnostic mode
-        * - Previously loaded function on the engine is not compliant with
-        *   the requester.
-        * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
-        *      -
-        */
-       if (!(*p_load_code) ||
-           ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) ||
-           ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) ||
-           ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
-               DP_ERR(p_hwfn, "MCP refused load request, aborting\n");
-               return -EBUSY;
-       }
+       *o_txn_size = *o_mcp_param;
+       memcpy(o_buf, raw_data, *o_txn_size);
 
        return 0;
 }
 
-static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn,
-                                 struct qed_ptt *p_ptt)
+static bool
+qed_mcp_can_force_load(u8 drv_role,
+                      u8 exist_drv_role,
+                      enum qed_override_force_load override_force_load)
 {
-       u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
-                                       PUBLIC_PATH);
-       u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
-       u32 path_addr = SECTION_ADDR(mfw_path_offsize,
-                                    QED_PATH_ID(p_hwfn));
-       u32 disabled_vfs[VF_MAX_STATIC / 32];
-       int i;
-
-       DP_VERBOSE(p_hwfn,
-                  QED_MSG_SP,
-                  "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
-                  mfw_path_offsize, path_addr);
+       bool can_force_load = false;
 
-       for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
-               disabled_vfs[i] = qed_rd(p_hwfn, p_ptt,
-                                        path_addr +
-                                        offsetof(struct public_path,
-                                                 mcp_vf_disabled) +
-                                        sizeof(u32) * i);
-               DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
-                          "FLR-ed VFs [%08x,...,%08x] - %08x\n",
-                          i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
+       switch (override_force_load) {
+       case QED_OVERRIDE_FORCE_LOAD_ALWAYS:
+               can_force_load = true;
+               break;
+       case QED_OVERRIDE_FORCE_LOAD_NEVER:
+               can_force_load = false;
+               break;
+       default:
+               can_force_load = (drv_role == DRV_ROLE_OS &&
+                                 exist_drv_role == DRV_ROLE_PREBOOT) ||
+                                (drv_role == DRV_ROLE_KDUMP &&
+                                 exist_drv_role == DRV_ROLE_OS);
+               break;
        }
 
-       if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs))
-               qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
+       return can_force_load;
 }
 
-int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
-                      struct qed_ptt *p_ptt, u32 *vfs_to_ack)
+static int qed_mcp_cancel_load_req(struct qed_hwfn *p_hwfn,
+                                  struct qed_ptt *p_ptt)
 {
-       u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
-                                       PUBLIC_FUNC);
-       u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr);
-       u32 func_addr = SECTION_ADDR(mfw_func_offsize,
-                                    MCP_PF_ID(p_hwfn));
-       struct qed_mcp_mb_params mb_params;
-       union drv_union_data union_data;
+       u32 resp = 0, param = 0;
        int rc;
-       int i;
-
-       for (i = 0; i < (VF_MAX_STATIC / 32); i++)
-               DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
-                          "Acking VFs [%08x,...,%08x] - %08x\n",
-                          i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
-
-       memset(&mb_params, 0, sizeof(mb_params));
-       mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
-       memcpy(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
-       mb_params.p_data_src = &union_data;
-       rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
-       if (rc) {
-               DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
-               return -EBUSY;
-       }
 
-       /* Clear the ACK bits */
-       for (i = 0; i < (VF_MAX_STATIC / 32); i++)
-               qed_wr(p_hwfn, p_ptt,
-                      func_addr +
-                      offsetof(struct public_func, drv_ack_vf_disabled) +
-                      i * sizeof(u32), 0);
+       rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
+                        &resp, &param);
+       if (rc)
+               DP_NOTICE(p_hwfn,
+                         "Failed to send cancel load request, rc = %d\n", rc);
 
        return rc;
 }
 
-static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
-                                             struct qed_ptt *p_ptt)
+#define CONFIG_QEDE_BITMAP_IDX         BIT(0)
+#define CONFIG_QED_SRIOV_BITMAP_IDX    BIT(1)
+#define CONFIG_QEDR_BITMAP_IDX         BIT(2)
+#define CONFIG_QEDF_BITMAP_IDX         BIT(4)
+#define CONFIG_QEDI_BITMAP_IDX         BIT(5)
+#define CONFIG_QED_LL2_BITMAP_IDX      BIT(6)
+
+static u32 qed_get_config_bitmap(void)
 {
-       u32 transceiver_state;
+       u32 config_bitmap = 0x0;
 
-       transceiver_state = qed_rd(p_hwfn, p_ptt,
-                                  p_hwfn->mcp_info->port_addr +
-                                  offsetof(struct public_port,
-                                           transceiver_data));
+       if (IS_ENABLED(CONFIG_QEDE))
+               config_bitmap |= CONFIG_QEDE_BITMAP_IDX;
 
-       DP_VERBOSE(p_hwfn,
-                  (NETIF_MSG_HW | QED_MSG_SP),
-                  "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
-                  transceiver_state,
-                  (u32)(p_hwfn->mcp_info->port_addr +
-                        offsetof(struct public_port,
-                                 transceiver_data)));
+       if (IS_ENABLED(CONFIG_QED_SRIOV))
+               config_bitmap |= CONFIG_QED_SRIOV_BITMAP_IDX;
 
-       transceiver_state = GET_FIELD(transceiver_state,
-                                     ETH_TRANSCEIVER_STATE);
+       if (IS_ENABLED(CONFIG_QED_RDMA))
+               config_bitmap |= CONFIG_QEDR_BITMAP_IDX;
 
-       if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
-               DP_NOTICE(p_hwfn, "Transceiver is present.\n");
-       else
-               DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n");
+       if (IS_ENABLED(CONFIG_QED_FCOE))
+               config_bitmap |= CONFIG_QEDF_BITMAP_IDX;
+
+       if (IS_ENABLED(CONFIG_QED_ISCSI))
+               config_bitmap |= CONFIG_QEDI_BITMAP_IDX;
+
+       if (IS_ENABLED(CONFIG_QED_LL2))
+               config_bitmap |= CONFIG_QED_LL2_BITMAP_IDX;
+
+       return config_bitmap;
 }
 
-static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
-                                      struct qed_ptt *p_ptt,
-                                      bool b_reset)
+struct qed_load_req_in_params {
+       u8 hsi_ver;
+#define QED_LOAD_REQ_HSI_VER_DEFAULT   0
+#define QED_LOAD_REQ_HSI_VER_1         1
+       u32 drv_ver_0;
+       u32 drv_ver_1;
+       u32 fw_ver;
+       u8 drv_role;
+       u8 timeout_val;
+       u8 force_cmd;
+       bool avoid_eng_reset;
+};
+
+struct qed_load_req_out_params {
+       u32 load_code;
+       u32 exist_drv_ver_0;
+       u32 exist_drv_ver_1;
+       u32 exist_fw_ver;
+       u8 exist_drv_role;
+       u8 mfw_hsi_ver;
+       bool drv_exists;
+};
+
+static int
+__qed_mcp_load_req(struct qed_hwfn *p_hwfn,
+                  struct qed_ptt *p_ptt,
+                  struct qed_load_req_in_params *p_in_params,
+                  struct qed_load_req_out_params *p_out_params)
 {
-       struct qed_mcp_link_state *p_link;
-       u8 max_bw, min_bw;
-       u32 status = 0;
+       struct qed_mcp_mb_params mb_params;
+       struct load_req_stc load_req;
+       struct load_rsp_stc load_rsp;
+       u32 hsi_ver;
+       int rc;
 
-       p_link = &p_hwfn->mcp_info->link_output;
-       memset(p_link, 0, sizeof(*p_link));
-       if (!b_reset) {
-               status = qed_rd(p_hwfn, p_ptt,
-                               p_hwfn->mcp_info->port_addr +
-                               offsetof(struct public_port, link_status));
+       memset(&load_req, 0, sizeof(load_req));
+       load_req.drv_ver_0 = p_in_params->drv_ver_0;
+       load_req.drv_ver_1 = p_in_params->drv_ver_1;
+       load_req.fw_ver = p_in_params->fw_ver;
+       QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
+       QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
+                         p_in_params->timeout_val);
+       QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE,
+                         p_in_params->force_cmd);
+       QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
+                         p_in_params->avoid_eng_reset);
+
+       hsi_ver = (p_in_params->hsi_ver == QED_LOAD_REQ_HSI_VER_DEFAULT) ?
+                 DRV_ID_MCP_HSI_VER_CURRENT :
+                 (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
+
+       memset(&mb_params, 0, sizeof(mb_params));
+       mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
+       mb_params.param = PDA_COMP | hsi_ver | p_hwfn->cdev->drv_type;
+       mb_params.p_data_src = &load_req;
+       mb_params.data_src_size = sizeof(load_req);
+       mb_params.p_data_dst = &load_rsp;
+       mb_params.data_dst_size = sizeof(load_rsp);
+
+       DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                  "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
+                  mb_params.param,
+                  QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
+                  QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
+                  QED_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
+                  QED_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
+
+       if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1) {
+               DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                          "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
+                          load_req.drv_ver_0,
+                          load_req.drv_ver_1,
+                          load_req.fw_ver,
+                          load_req.misc0,
+                          QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE),
+                          QED_MFW_GET_FIELD(load_req.misc0,
+                                            LOAD_REQ_LOCK_TO),
+                          QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE),
+                          QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
+       }
+
+       rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+       if (rc) {
+               DP_NOTICE(p_hwfn, "Failed to send load request, rc = %d\n", rc);
+               return rc;
+       }
+
+       DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                  "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
+       p_out_params->load_code = mb_params.mcp_resp;
+
+       if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
+           p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
+               DP_VERBOSE(p_hwfn,
+                          QED_MSG_SP,
+                          "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
+                          load_rsp.drv_ver_0,
+                          load_rsp.drv_ver_1,
+                          load_rsp.fw_ver,
+                          load_rsp.misc0,
+                          QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
+                          QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
+                          QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
+
+               p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
+               p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
+               p_out_params->exist_fw_ver = load_rsp.fw_ver;
+               p_out_params->exist_drv_role =
+                   QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
+               p_out_params->mfw_hsi_ver =
+                   QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
+               p_out_params->drv_exists =
+                   QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
+                   LOAD_RSP_FLAGS0_DRV_EXISTS;
+       }
+
+       return 0;
+}
+
+static int eocre_get_mfw_drv_role(struct qed_hwfn *p_hwfn,
+                                 enum qed_drv_role drv_role,
+                                 u8 *p_mfw_drv_role)
+{
+       switch (drv_role) {
+       case QED_DRV_ROLE_OS:
+               *p_mfw_drv_role = DRV_ROLE_OS;
+               break;
+       case QED_DRV_ROLE_KDUMP:
+               *p_mfw_drv_role = DRV_ROLE_KDUMP;
+               break;
+       default:
+               DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+enum qed_load_req_force {
+       QED_LOAD_REQ_FORCE_NONE,
+       QED_LOAD_REQ_FORCE_PF,
+       QED_LOAD_REQ_FORCE_ALL,
+};
+
+static void qed_get_mfw_force_cmd(struct qed_hwfn *p_hwfn,
+
+                                 enum qed_load_req_force force_cmd,
+                                 u8 *p_mfw_force_cmd)
+{
+       switch (force_cmd) {
+       case QED_LOAD_REQ_FORCE_NONE:
+               *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
+               break;
+       case QED_LOAD_REQ_FORCE_PF:
+               *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
+               break;
+       case QED_LOAD_REQ_FORCE_ALL:
+               *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
+               break;
+       }
+}
+
+int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
+                    struct qed_ptt *p_ptt,
+                    struct qed_load_req_params *p_params)
+{
+       struct qed_load_req_out_params out_params;
+       struct qed_load_req_in_params in_params;
+       u8 mfw_drv_role, mfw_force_cmd;
+       int rc;
+
+       memset(&in_params, 0, sizeof(in_params));
+       in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_DEFAULT;
+       in_params.drv_ver_0 = QED_VERSION;
+       in_params.drv_ver_1 = qed_get_config_bitmap();
+       in_params.fw_ver = STORM_FW_VERSION;
+       rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
+       if (rc)
+               return rc;
+
+       in_params.drv_role = mfw_drv_role;
+       in_params.timeout_val = p_params->timeout_val;
+       qed_get_mfw_force_cmd(p_hwfn,
+                             QED_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
+
+       in_params.force_cmd = mfw_force_cmd;
+       in_params.avoid_eng_reset = p_params->avoid_eng_reset;
+
+       memset(&out_params, 0, sizeof(out_params));
+       rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
+       if (rc)
+               return rc;
+
+       /* First handle cases where another load request should/might be sent:
+        * - MFW expects the old interface [HSI version = 1]
+        * - MFW responds that a force load request is required
+        */
+       if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
+               DP_INFO(p_hwfn,
+                       "MFW refused a load request due to HSI > 1. Resending with HSI = 1\n");
+
+               in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_1;
+               memset(&out_params, 0, sizeof(out_params));
+               rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
+               if (rc)
+                       return rc;
+       } else if (out_params.load_code ==
+                  FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
+               if (qed_mcp_can_force_load(in_params.drv_role,
+                                          out_params.exist_drv_role,
+                                          p_params->override_force_load)) {
+                       DP_INFO(p_hwfn,
+                               "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}]\n",
+                               in_params.drv_role, in_params.fw_ver,
+                               in_params.drv_ver_0, in_params.drv_ver_1,
+                               out_params.exist_drv_role,
+                               out_params.exist_fw_ver,
+                               out_params.exist_drv_ver_0,
+                               out_params.exist_drv_ver_1);
+
+                       qed_get_mfw_force_cmd(p_hwfn,
+                                             QED_LOAD_REQ_FORCE_ALL,
+                                             &mfw_force_cmd);
+
+                       in_params.force_cmd = mfw_force_cmd;
+                       memset(&out_params, 0, sizeof(out_params));
+                       rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params,
+                                               &out_params);
+                       if (rc)
+                               return rc;
+               } else {
+                       DP_NOTICE(p_hwfn,
+                                 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
+                                 in_params.drv_role, in_params.fw_ver,
+                                 in_params.drv_ver_0, in_params.drv_ver_1,
+                                 out_params.exist_drv_role,
+                                 out_params.exist_fw_ver,
+                                 out_params.exist_drv_ver_0,
+                                 out_params.exist_drv_ver_1);
+                       DP_NOTICE(p_hwfn,
+                                 "Avoid sending a force load request to prevent disruption of active PFs\n");
+
+                       qed_mcp_cancel_load_req(p_hwfn, p_ptt);
+                       return -EBUSY;
+               }
+       }
+
+       /* Now handle the other types of responses.
+        * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
+        * expected here after the additional revised load requests were sent.
+        */
+       switch (out_params.load_code) {
+       case FW_MSG_CODE_DRV_LOAD_ENGINE:
+       case FW_MSG_CODE_DRV_LOAD_PORT:
+       case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+               if (out_params.mfw_hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
+                   out_params.drv_exists) {
+                       /* The role and fw/driver version match, but the PF is
+                        * already loaded and has not been unloaded gracefully.
+                        */
+                       DP_NOTICE(p_hwfn,
+                                 "PF is already loaded\n");
+                       return -EINVAL;
+               }
+               break;
+       default:
+               DP_NOTICE(p_hwfn,
+                         "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
+                         out_params.load_code);
+               return -EBUSY;
+       }
+
+       p_params->load_code = out_params.load_code;
+
+       return 0;
+}
+
+int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+       u32 wol_param, mcp_resp, mcp_param;
+
+       switch (p_hwfn->cdev->wol_config) {
+       case QED_OV_WOL_DISABLED:
+               wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
+               break;
+       case QED_OV_WOL_ENABLED:
+               wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
+               break;
+       default:
+               DP_NOTICE(p_hwfn,
+                         "Unknown WoL configuration %02x\n",
+                         p_hwfn->cdev->wol_config);
+               /* Fallthrough */
+       case QED_OV_WOL_DEFAULT:
+               wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
+       }
+
+       return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
+                          &mcp_resp, &mcp_param);
+}
+
+int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+       struct qed_mcp_mb_params mb_params;
+       struct mcp_mac wol_mac;
+
+       memset(&mb_params, 0, sizeof(mb_params));
+       mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
+
+       /* Set the primary MAC if WoL is enabled */
+       if (p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED) {
+               u8 *p_mac = p_hwfn->cdev->wol_mac;
+
+               memset(&wol_mac, 0, sizeof(wol_mac));
+               wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
+               wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
+                                   p_mac[4] << 8 | p_mac[5];
+
+               DP_VERBOSE(p_hwfn,
+                          (QED_MSG_SP | NETIF_MSG_IFDOWN),
+                          "Setting WoL MAC: %pM --> [%08x,%08x]\n",
+                          p_mac, wol_mac.mac_upper, wol_mac.mac_lower);
+
+               mb_params.p_data_src = &wol_mac;
+               mb_params.data_src_size = sizeof(wol_mac);
+       }
+
+       return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+}
+
+static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn,
+                                 struct qed_ptt *p_ptt)
+{
+       u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+                                       PUBLIC_PATH);
+       u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
+       u32 path_addr = SECTION_ADDR(mfw_path_offsize,
+                                    QED_PATH_ID(p_hwfn));
+       u32 disabled_vfs[VF_MAX_STATIC / 32];
+       int i;
+
+       DP_VERBOSE(p_hwfn,
+                  QED_MSG_SP,
+                  "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
+                  mfw_path_offsize, path_addr);
+
+       for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
+               disabled_vfs[i] = qed_rd(p_hwfn, p_ptt,
+                                        path_addr +
+                                        offsetof(struct public_path,
+                                                 mcp_vf_disabled) +
+                                        sizeof(u32) * i);
+               DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
+                          "FLR-ed VFs [%08x,...,%08x] - %08x\n",
+                          i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
+       }
+
+       if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs))
+               qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
+}
+
+int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
+                      struct qed_ptt *p_ptt, u32 *vfs_to_ack)
+{
+       u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+                                       PUBLIC_FUNC);
+       u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr);
+       u32 func_addr = SECTION_ADDR(mfw_func_offsize,
+                                    MCP_PF_ID(p_hwfn));
+       struct qed_mcp_mb_params mb_params;
+       int rc;
+       int i;
+
+       for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+               DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
+                          "Acking VFs [%08x,...,%08x] - %08x\n",
+                          i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
+
+       memset(&mb_params, 0, sizeof(mb_params));
+       mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
+       mb_params.p_data_src = vfs_to_ack;
+       mb_params.data_src_size = VF_MAX_STATIC / 8;
+       rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+       if (rc) {
+               DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
+               return -EBUSY;
+       }
+
+       /* Clear the ACK bits */
+       for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+               qed_wr(p_hwfn, p_ptt,
+                      func_addr +
+                      offsetof(struct public_func, drv_ack_vf_disabled) +
+                      i * sizeof(u32), 0);
+
+       return rc;
+}
+
+static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
+                                             struct qed_ptt *p_ptt)
+{
+       u32 transceiver_state;
+
+       transceiver_state = qed_rd(p_hwfn, p_ptt,
+                                  p_hwfn->mcp_info->port_addr +
+                                  offsetof(struct public_port,
+                                           transceiver_data));
+
+       DP_VERBOSE(p_hwfn,
+                  (NETIF_MSG_HW | QED_MSG_SP),
+                  "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
+                  transceiver_state,
+                  (u32)(p_hwfn->mcp_info->port_addr +
+                         offsetof(struct public_port, transceiver_data)));
+
+       transceiver_state = GET_FIELD(transceiver_state,
+                                     ETH_TRANSCEIVER_STATE);
+
+       if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
+               DP_NOTICE(p_hwfn, "Transceiver is present.\n");
+       else
+               DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n");
+}
+
+static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
+                                      struct qed_ptt *p_ptt, bool b_reset)
+{
+       struct qed_mcp_link_state *p_link;
+       u8 max_bw, min_bw;
+       u32 status = 0;
+
+       /* Prevent SW/attentions from doing this at the same time */
+       spin_lock_bh(&p_hwfn->mcp_info->link_lock);
+
+       p_link = &p_hwfn->mcp_info->link_output;
+       memset(p_link, 0, sizeof(*p_link));
+       if (!b_reset) {
+               status = qed_rd(p_hwfn, p_ptt,
+                               p_hwfn->mcp_info->port_addr +
+                               offsetof(struct public_port, link_status));
                DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP),
                           "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
                           status,
                           (u32)(p_hwfn->mcp_info->port_addr +
-                                offsetof(struct public_port,
-                                         link_status)));
+                                offsetof(struct public_port, link_status)));
        } else {
                DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
                           "Resetting link indications\n");
-               return;
+               goto out;
        }
 
        if (p_hwfn->b_drv_link_init)
@@ -613,7 +1171,8 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
 
        /* Min bandwidth configuration */
        __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
-       qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_link->min_pf_rate);
+       qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_ptt,
+                                           p_link->min_pf_rate);
 
        p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
        p_link->an_complete = !!(status &
@@ -666,39 +1225,39 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
        p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
 
        qed_link_update(p_hwfn);
+out:
+       spin_unlock_bh(&p_hwfn->mcp_info->link_lock);
 }
 
 int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
 {
        struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
        struct qed_mcp_mb_params mb_params;
-       union drv_union_data union_data;
-       struct eth_phy_cfg *phy_cfg;
+       struct eth_phy_cfg phy_cfg;
        int rc = 0;
        u32 cmd;
 
        /* Set the shmem configuration according to params */
-       phy_cfg = &union_data.drv_phy_cfg;
-       memset(phy_cfg, 0, sizeof(*phy_cfg));
+       memset(&phy_cfg, 0, sizeof(phy_cfg));
        cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
        if (!params->speed.autoneg)
-               phy_cfg->speed = params->speed.forced_speed;
-       phy_cfg->pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
-       phy_cfg->pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
-       phy_cfg->pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
-       phy_cfg->adv_speed = params->speed.advertised_speeds;
-       phy_cfg->loopback_mode = params->loopback_mode;
+               phy_cfg.speed = params->speed.forced_speed;
+       phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
+       phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
+       phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
+       phy_cfg.adv_speed = params->speed.advertised_speeds;
+       phy_cfg.loopback_mode = params->loopback_mode;
 
        p_hwfn->b_drv_link_init = b_up;
 
        if (b_up) {
                DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
                           "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
-                          phy_cfg->speed,
-                          phy_cfg->pause,
-                          phy_cfg->adv_speed,
-                          phy_cfg->loopback_mode,
-                          phy_cfg->feature_config_flags);
+                          phy_cfg.speed,
+                          phy_cfg.pause,
+                          phy_cfg.adv_speed,
+                          phy_cfg.loopback_mode,
+                          phy_cfg.feature_config_flags);
        } else {
                DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
                           "Resetting link\n");
@@ -706,7 +1265,8 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
 
        memset(&mb_params, 0, sizeof(mb_params));
        mb_params.cmd = cmd;
-       mb_params.p_data_src = &union_data;
+       mb_params.p_data_src = &phy_cfg;
+       mb_params.data_src_size = sizeof(phy_cfg);
        rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
 
        /* if mcp fails to respond we must abort */
@@ -715,13 +1275,58 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
                return rc;
        }
 
-       /* Reset the link status if needed */
-       if (!b_up)
-               qed_mcp_handle_link_change(p_hwfn, p_ptt, true);
+       /* Mimic link-change attention, done for several reasons:
+        *  - On reset, there's no guarantee MFW would trigger
+        *    an attention.
+        *  - On initialization, older MFWs might not indicate link change
+        *    during LFA, so we'll never get an UP indication.
+        */
+       qed_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
 
        return 0;
 }
 
+static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
+                                       struct qed_ptt *p_ptt,
+                                       enum MFW_DRV_MSG_TYPE type)
+{
+       enum qed_mcp_protocol_type stats_type;
+       union qed_mcp_protocol_stats stats;
+       struct qed_mcp_mb_params mb_params;
+       u32 hsi_param;
+
+       switch (type) {
+       case MFW_DRV_MSG_GET_LAN_STATS:
+               stats_type = QED_MCP_LAN_STATS;
+               hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
+               break;
+       case MFW_DRV_MSG_GET_FCOE_STATS:
+               stats_type = QED_MCP_FCOE_STATS;
+               hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE;
+               break;
+       case MFW_DRV_MSG_GET_ISCSI_STATS:
+               stats_type = QED_MCP_ISCSI_STATS;
+               hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI;
+               break;
+       case MFW_DRV_MSG_GET_RDMA_STATS:
+               stats_type = QED_MCP_RDMA_STATS;
+               hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, "Invalid protocol type %d\n", type);
+               return;
+       }
+
+       qed_get_protocol_stats(p_hwfn->cdev, stats_type, &stats);
+
+       memset(&mb_params, 0, sizeof(mb_params));
+       mb_params.cmd = DRV_MSG_CODE_GET_STATS;
+       mb_params.param = hsi_param;
+       mb_params.p_data_src = &stats;
+       mb_params.data_src_size = sizeof(stats);
+       qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+}
+
 static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
                                  struct public_func *p_shmem_info)
 {
@@ -752,8 +1357,7 @@ static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
 
 static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
                                  struct qed_ptt *p_ptt,
-                                 struct public_func *p_data,
-                                 int pfid)
+                                 struct public_func *p_data, int pfid)
 {
        u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
                                        PUBLIC_FUNC);
@@ -763,51 +1367,20 @@ static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
 
        memset(p_data, 0, sizeof(*p_data));
 
-       size = min_t(u32, sizeof(*p_data),
-                    QED_SECTION_SIZE(mfw_path_offsize));
+       size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize));
        for (i = 0; i < size / sizeof(u32); i++)
                ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
                                            func_addr + (i << 2));
        return size;
 }
 
-int qed_hw_init_first_eth(struct qed_hwfn *p_hwfn,
-                         struct qed_ptt *p_ptt, u8 *p_pf)
-{
-       struct public_func shmem_info;
-       int i;
-
-       /* Find first Ethernet interface in port */
-       for (i = 0; i < NUM_OF_ENG_PFS(p_hwfn->cdev);
-            i += p_hwfn->cdev->num_ports_in_engines) {
-               qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
-                                      MCP_PF_ID_BY_REL(p_hwfn, i));
-
-               if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
-                       continue;
-
-               if ((shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK) ==
-                   FUNC_MF_CFG_PROTOCOL_ETHERNET) {
-                       *p_pf = (u8)i;
-                       return 0;
-               }
-       }
-
-       DP_NOTICE(p_hwfn,
-                 "Failed to find on port an ethernet interface in MF_SI mode\n");
-
-       return -EINVAL;
-}
-
-static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn,
-                             struct qed_ptt *p_ptt)
+static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
        struct qed_mcp_function_info *p_info;
        struct public_func shmem_info;
        u32 resp = 0, param = 0;
 
-       qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
-                              MCP_PF_ID(p_hwfn));
+       qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
 
        qed_read_pf_bandwidth(p_hwfn, &shmem_info);
 
@@ -867,11 +1440,17 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
                case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
                        qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
                        break;
+               case MFW_DRV_MSG_GET_LAN_STATS:
+               case MFW_DRV_MSG_GET_FCOE_STATS:
+               case MFW_DRV_MSG_GET_ISCSI_STATS:
+               case MFW_DRV_MSG_GET_RDMA_STATS:
+                       qed_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
+                       break;
                case MFW_DRV_MSG_BW_UPDATE:
                        qed_mcp_update_bw(p_hwfn, p_ptt);
                        break;
                default:
-                       DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i);
+                       DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
                        rc = -EINVAL;
                }
        }
@@ -940,8 +1519,7 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
        return 0;
 }
 
-int qed_mcp_get_media_type(struct qed_dev *cdev,
-                          u32 *p_media_type)
+int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type)
 {
        struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
        struct qed_ptt  *p_ptt;
@@ -950,7 +1528,7 @@ int qed_mcp_get_media_type(struct qed_dev *cdev,
                return -EINVAL;
 
        if (!qed_mcp_is_init(p_hwfn)) {
-               DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+               DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
                return -EBUSY;
        }
 
@@ -968,28 +1546,94 @@ int qed_mcp_get_media_type(struct qed_dev *cdev,
        return 0;
 }
 
+/* Old MFW has a global configuration for all PFs regarding RDMA support */
+static void
+qed_mcp_get_shmem_proto_legacy(struct qed_hwfn *p_hwfn,
+                              enum qed_pci_personality *p_proto)
+{
+       /* There wasn't ever a legacy MFW that published iwarp.
+        * So at this point, this is either plain l2 or RoCE.
+        */
+       if (test_bit(QED_DEV_CAP_ROCE, &p_hwfn->hw_info.device_capabilities))
+               *p_proto = QED_PCI_ETH_ROCE;
+       else
+               *p_proto = QED_PCI_ETH;
+
+       DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
+                  "According to Legacy capabilities, L2 personality is %08x\n",
+                  (u32) *p_proto);
+}
+
 static int
-qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
-                       struct public_func *p_info,
-                       enum qed_pci_personality *p_proto)
+qed_mcp_get_shmem_proto_mfw(struct qed_hwfn *p_hwfn,
+                           struct qed_ptt *p_ptt,
+                           enum qed_pci_personality *p_proto)
 {
-       int rc = 0;
+       u32 resp = 0, param = 0;
+       int rc;
 
-       switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
-       case FUNC_MF_CFG_PROTOCOL_ETHERNET:
-               if (test_bit(QED_DEV_CAP_ROCE,
-                            &p_hwfn->hw_info.device_capabilities))
-                       *p_proto = QED_PCI_ETH_ROCE;
-               else
-                       *p_proto = QED_PCI_ETH;
+       rc = qed_mcp_cmd(p_hwfn, p_ptt,
+                        DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL, 0, &resp, &param);
+       if (rc)
+               return rc;
+       if (resp != FW_MSG_CODE_OK) {
+               DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
+                          "MFW lacks support for command; Returns %08x\n",
+                          resp);
+               return -EINVAL;
+       }
+
+       switch (param) {
+       case FW_MB_PARAM_GET_PF_RDMA_NONE:
+               *p_proto = QED_PCI_ETH;
                break;
-       case FUNC_MF_CFG_PROTOCOL_ISCSI:
-               *p_proto = QED_PCI_ISCSI;
+       case FW_MB_PARAM_GET_PF_RDMA_ROCE:
+               *p_proto = QED_PCI_ETH_ROCE;
                break;
-       case FUNC_MF_CFG_PROTOCOL_ROCE:
-               DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n");
-               rc = -EINVAL;
+       case FW_MB_PARAM_GET_PF_RDMA_BOTH:
+               DP_NOTICE(p_hwfn,
+                         "Current day drivers don't support RoCE & iWARP. Default to RoCE-only\n");
+               *p_proto = QED_PCI_ETH_ROCE;
                break;
+       case FW_MB_PARAM_GET_PF_RDMA_IWARP:
+       default:
+               DP_NOTICE(p_hwfn,
+                         "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n",
+                         param);
+               return -EINVAL;
+       }
+
+       DP_VERBOSE(p_hwfn,
+                  NETIF_MSG_IFUP,
+                  "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
+                  (u32) *p_proto, resp, param);
+       return 0;
+}
+
+static int
+qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
+                       struct public_func *p_info,
+                       struct qed_ptt *p_ptt,
+                       enum qed_pci_personality *p_proto)
+{
+       int rc = 0;
+
+       switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
+       case FUNC_MF_CFG_PROTOCOL_ETHERNET:
+               if (!IS_ENABLED(CONFIG_QED_RDMA))
+                       *p_proto = QED_PCI_ETH;
+               else if (qed_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto))
+                       qed_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
+               break;
+       case FUNC_MF_CFG_PROTOCOL_ISCSI:
+               *p_proto = QED_PCI_ISCSI;
+               break;
+       case FUNC_MF_CFG_PROTOCOL_FCOE:
+               *p_proto = QED_PCI_FCOE;
+               break;
+       case FUNC_MF_CFG_PROTOCOL_ROCE:
+               DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n");
+       /* Fallthrough */
        default:
                rc = -EINVAL;
        }
@@ -1003,14 +1647,13 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
        struct qed_mcp_function_info *info;
        struct public_func shmem_info;
 
-       qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
-                              MCP_PF_ID(p_hwfn));
+       qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
        info = &p_hwfn->mcp_info->func_info;
 
        info->pause_on_host = (shmem_info.config &
                               FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
 
-       if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info,
+       if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
                                    &info->protocol)) {
                DP_ERR(p_hwfn, "Unknown personality %08x\n",
                       (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
@@ -1026,6 +1669,9 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
                info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
                info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
                info->mac[5] = (u8)(shmem_info.mac_lower);
+
+               /* Store primary MAC for later possible WoL */
+               memcpy(&p_hwfn->cdev->wol_mac, info->mac, ETH_ALEN);
        } else {
                DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n");
        }
@@ -1037,13 +1683,30 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
 
        info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
 
+       info->mtu = (u16)shmem_info.mtu_size;
+
+       p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_NONE;
+       p_hwfn->cdev->wol_config = (u8)QED_OV_WOL_DEFAULT;
+       if (qed_mcp_is_init(p_hwfn)) {
+               u32 resp = 0, param = 0;
+               int rc;
+
+               rc = qed_mcp_cmd(p_hwfn, p_ptt,
+                                DRV_MSG_CODE_OS_WOL, 0, &resp, &param);
+               if (rc)
+                       return rc;
+               if (resp == FW_MSG_CODE_OS_WOL_SUPPORTED)
+                       p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_PME;
+       }
+
        DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
-                  "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x\n",
+                  "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x wol %02x\n",
                info->pause_on_host, info->protocol,
                info->bandwidth_min, info->bandwidth_max,
                info->mac[0], info->mac[1], info->mac[2],
                info->mac[3], info->mac[4], info->mac[5],
-               info->wwn_port, info->wwn_node, info->ovlan);
+               info->wwn_port, info->wwn_node,
+               info->ovlan, (u8)p_hwfn->hw_info.b_wol_support);
 
        return 0;
 }
@@ -1072,15 +1735,13 @@ struct qed_mcp_link_capabilities
        return &p_hwfn->mcp_info->link_capabilities;
 }
 
-int qed_mcp_drain(struct qed_hwfn *p_hwfn,
-                 struct qed_ptt *p_ptt)
+int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
        u32 resp = 0, param = 0;
        int rc;
 
        rc = qed_mcp_cmd(p_hwfn, p_ptt,
-                        DRV_MSG_CODE_NIG_DRAIN, 1000,
-                        &resp, &param);
+                        DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, &param);
 
        /* Wait for the drain to complete before returning */
        msleep(1020);
@@ -1089,8 +1750,7 @@ int qed_mcp_drain(struct qed_hwfn *p_hwfn,
 }
 
 int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
-                          struct qed_ptt *p_ptt,
-                          u32 *p_flash_size)
+                          struct qed_ptt *p_ptt, u32 *p_flash_size)
 {
        u32 flash_size;
 
@@ -1143,24 +1803,23 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
                         struct qed_ptt *p_ptt,
                         struct qed_mcp_drv_version *p_ver)
 {
-       struct drv_version_stc *p_drv_version;
        struct qed_mcp_mb_params mb_params;
-       union drv_union_data union_data;
+       struct drv_version_stc drv_version;
        __be32 val;
        u32 i;
        int rc;
 
-       p_drv_version = &union_data.drv_version;
-       p_drv_version->version = p_ver->version;
-
+       memset(&drv_version, 0, sizeof(drv_version));
+       drv_version.version = p_ver->version;
        for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) {
                val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)]));
-               *(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val;
+               *(__be32 *)&drv_version.name[i * sizeof(u32)] = val;
        }
 
        memset(&mb_params, 0, sizeof(mb_params));
        mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
-       mb_params.p_data_src = &union_data;
+       mb_params.p_data_src = &drv_version;
+       mb_params.data_src_size = sizeof(drv_version);
        rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
        if (rc)
                DP_ERR(p_hwfn, "MCP response failure, aborting\n");
@@ -1168,8 +1827,216 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
        return rc;
 }
 
-int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
-                   enum qed_led_mode mode)
+int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+       u32 resp = 0, param = 0;
+       int rc;
+
+       rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
+                        &param);
+       if (rc)
+               DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+
+       return rc;
+}
+
+int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+       u32 value, cpu_mode;
+
+       qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
+
+       value = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+       value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
+       qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
+       cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+
+       return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0;
+}
+
+int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
+                                    struct qed_ptt *p_ptt,
+                                    enum qed_ov_client client)
+{
+       u32 resp = 0, param = 0;
+       u32 drv_mb_param;
+       int rc;
+
+       switch (client) {
+       case QED_OV_CLIENT_DRV:
+               drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
+               break;
+       case QED_OV_CLIENT_USER:
+               drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
+               break;
+       case QED_OV_CLIENT_VENDOR_SPEC:
+               drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, "Invalid client type %d\n", client);
+               return -EINVAL;
+       }
+
+       rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
+                        drv_mb_param, &resp, &param);
+       if (rc)
+               DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+
+       return rc;
+}
+
+int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn,
+                                  struct qed_ptt *p_ptt,
+                                  enum qed_ov_driver_state drv_state)
+{
+       u32 resp = 0, param = 0;
+       u32 drv_mb_param;
+       int rc;
+
+       switch (drv_state) {
+       case QED_OV_DRIVER_STATE_NOT_LOADED:
+               drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
+               break;
+       case QED_OV_DRIVER_STATE_DISABLED:
+               drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
+               break;
+       case QED_OV_DRIVER_STATE_ACTIVE:
+               drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, "Invalid driver state %d\n", drv_state);
+               return -EINVAL;
+       }
+
+       rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
+                        drv_mb_param, &resp, &param);
+       if (rc)
+               DP_ERR(p_hwfn, "Failed to send driver state\n");
+
+       return rc;
+}
+
+int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
+                         struct qed_ptt *p_ptt, u16 mtu)
+{
+       u32 resp = 0, param = 0;
+       u32 drv_mb_param;
+       int rc;
+
+       drv_mb_param = (u32)mtu << DRV_MB_PARAM_OV_MTU_SIZE_SHIFT;
+       rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
+                        drv_mb_param, &resp, &param);
+       if (rc)
+               DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
+
+       return rc;
+}
+
+int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
+                         struct qed_ptt *p_ptt, u8 *mac)
+{
+       struct qed_mcp_mb_params mb_params;
+       u32 mfw_mac[2];
+       int rc;
+
+       memset(&mb_params, 0, sizeof(mb_params));
+       mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
+       mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC <<
+                         DRV_MSG_CODE_VMAC_TYPE_SHIFT;
+       mb_params.param |= MCP_PF_ID(p_hwfn);
+
+       /* MCP is BE, and on LE platforms PCI would swap access to SHMEM
+        * in 32-bit granularity.
+        * So the MAC has to be set in native order [and not byte order],
+        * otherwise it would be read incorrectly by MFW after swap.
+        */
+       mfw_mac[0] = mac[0] << 24 | mac[1] << 16 | mac[2] << 8 | mac[3];
+       mfw_mac[1] = mac[4] << 24 | mac[5] << 16;
+
+       mb_params.p_data_src = (u8 *)mfw_mac;
+       mb_params.data_src_size = 8;
+       rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+       if (rc)
+               DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
+
+       /* Store primary MAC for later possible WoL */
+       memcpy(p_hwfn->cdev->wol_mac, mac, ETH_ALEN);
+
+       return rc;
+}
+
+int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn,
+                         struct qed_ptt *p_ptt, enum qed_ov_wol wol)
+{
+       u32 resp = 0, param = 0;
+       u32 drv_mb_param;
+       int rc;
+
+       if (p_hwfn->hw_info.b_wol_support == QED_WOL_SUPPORT_NONE) {
+               DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                          "Can't change WoL configuration when WoL isn't supported\n");
+               return -EINVAL;
+       }
+
+       switch (wol) {
+       case QED_OV_WOL_DEFAULT:
+               drv_mb_param = DRV_MB_PARAM_WOL_DEFAULT;
+               break;
+       case QED_OV_WOL_DISABLED:
+               drv_mb_param = DRV_MB_PARAM_WOL_DISABLED;
+               break;
+       case QED_OV_WOL_ENABLED:
+               drv_mb_param = DRV_MB_PARAM_WOL_ENABLED;
+               break;
+       default:
+               DP_ERR(p_hwfn, "Invalid wol state %d\n", wol);
+               return -EINVAL;
+       }
+
+       rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_WOL,
+                        drv_mb_param, &resp, &param);
+       if (rc)
+               DP_ERR(p_hwfn, "Failed to send wol mode, rc = %d\n", rc);
+
+       /* Store the WoL update for a future unload */
+       p_hwfn->cdev->wol_config = (u8)wol;
+
+       return rc;
+}
+
+int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
+                             struct qed_ptt *p_ptt,
+                             enum qed_ov_eswitch eswitch)
+{
+       u32 resp = 0, param = 0;
+       u32 drv_mb_param;
+       int rc;
+
+       switch (eswitch) {
+       case QED_OV_ESWITCH_NONE:
+               drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
+               break;
+       case QED_OV_ESWITCH_VEB:
+               drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
+               break;
+       case QED_OV_ESWITCH_VEPA:
+               drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
+               break;
+       default:
+               DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
+               return -EINVAL;
+       }
+
+       rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
+                        drv_mb_param, &resp, &param);
+       if (rc)
+               DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
+
+       return rc;
+}
+
+int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
+                   struct qed_ptt *p_ptt, enum qed_led_mode mode)
 {
        u32 resp = 0, param = 0, drv_mb_param;
        int rc;
@@ -1195,6 +2062,73 @@ int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
        return rc;
 }
 
+int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
+                         struct qed_ptt *p_ptt, u32 mask_parities)
+{
+       u32 resp = 0, param = 0;
+       int rc;
+
+       rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
+                        mask_parities, &resp, &param);
+
+       if (rc) {
+               DP_ERR(p_hwfn,
+                      "MCP response failure for mask parities, aborting\n");
+       } else if (resp != FW_MSG_CODE_OK) {
+               DP_ERR(p_hwfn,
+                      "MCP did not acknowledge mask parity request. Old MFW?\n");
+               rc = -EINVAL;
+       }
+
+       return rc;
+}
+
+int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
+{
+       u32 bytes_left = len, offset = 0, bytes_to_copy, read_len = 0;
+       struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+       u32 resp = 0, resp_param = 0;
+       struct qed_ptt *p_ptt;
+       int rc = 0;
+
+       p_ptt = qed_ptt_acquire(p_hwfn);
+       if (!p_ptt)
+               return -EBUSY;
+
+       while (bytes_left > 0) {
+               bytes_to_copy = min_t(u32, bytes_left, MCP_DRV_NVM_BUF_LEN);
+
+               rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+                                       DRV_MSG_CODE_NVM_READ_NVRAM,
+                                       addr + offset +
+                                       (bytes_to_copy <<
+                                        DRV_MB_PARAM_NVM_LEN_SHIFT),
+                                       &resp, &resp_param,
+                                       &read_len,
+                                       (u32 *)(p_buf + offset));
+
+               if (rc || (resp != FW_MSG_CODE_NVM_OK)) {
+                       DP_NOTICE(cdev, "MCP command rc = %d\n", rc);
+                       break;
+               }
+
+               /* This can be a lengthy process, and it's possible scheduler
+                * isn't preemptable. Sleep a bit to prevent CPU hogging.
+                */
+               if (bytes_left % 0x1000 <
+                   (bytes_left - read_len) % 0x1000)
+                       usleep_range(1000, 2000);
+
+               offset += read_len;
+               bytes_left -= read_len;
+       }
+
+       cdev->mcp_nvm_resp = resp;
+       qed_ptt_release(p_hwfn, p_ptt);
+
+       return rc;
+}
+
 int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
        u32 drv_mb_param = 0, rsp, param;
@@ -1236,3 +2170,445 @@ int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 
        return rc;
 }
+
+int qed_mcp_bist_nvm_test_get_num_images(struct qed_hwfn *p_hwfn,
+                                        struct qed_ptt *p_ptt,
+                                        u32 *num_images)
+{
+       u32 drv_mb_param = 0, rsp;
+       int rc = 0;
+
+       drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
+                       DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
+
+       rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
+                        drv_mb_param, &rsp, num_images);
+       if (rc)
+               return rc;
+
+       if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
+               rc = -EINVAL;
+
+       return rc;
+}
+
+int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn,
+                                       struct qed_ptt *p_ptt,
+                                       struct bist_nvm_image_att *p_image_att,
+                                       u32 image_index)
+{
+       u32 buf_size = 0, param, resp = 0, resp_param = 0;
+       int rc;
+
+       param = DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
+               DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT;
+       param |= image_index << DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT;
+
+       rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+                               DRV_MSG_CODE_BIST_TEST, param,
+                               &resp, &resp_param,
+                               &buf_size,
+                               (u32 *)p_image_att);
+       if (rc)
+               return rc;
+
+       if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
+           (p_image_att->return_code != 1))
+               rc = -EINVAL;
+
+       return rc;
+}
+
+static enum resource_id_enum qed_mcp_get_mfw_res_id(enum qed_resources res_id)
+{
+       enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
+
+       switch (res_id) {
+       case QED_SB:
+               mfw_res_id = RESOURCE_NUM_SB_E;
+               break;
+       case QED_L2_QUEUE:
+               mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
+               break;
+       case QED_VPORT:
+               mfw_res_id = RESOURCE_NUM_VPORT_E;
+               break;
+       case QED_RSS_ENG:
+               mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
+               break;
+       case QED_PQ:
+               mfw_res_id = RESOURCE_NUM_PQ_E;
+               break;
+       case QED_RL:
+               mfw_res_id = RESOURCE_NUM_RL_E;
+               break;
+       case QED_MAC:
+       case QED_VLAN:
+               /* Each VFC resource can accommodate both a MAC and a VLAN */
+               mfw_res_id = RESOURCE_VFC_FILTER_E;
+               break;
+       case QED_ILT:
+               mfw_res_id = RESOURCE_ILT_E;
+               break;
+       case QED_LL2_QUEUE:
+               mfw_res_id = RESOURCE_LL2_QUEUE_E;
+               break;
+       case QED_RDMA_CNQ_RAM:
+       case QED_CMDQS_CQS:
+               /* CNQ/CMDQS are the same resource */
+               mfw_res_id = RESOURCE_CQS_E;
+               break;
+       case QED_RDMA_STATS_QUEUE:
+               mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
+               break;
+       case QED_BDQ:
+               mfw_res_id = RESOURCE_BDQ_E;
+               break;
+       default:
+               break;
+       }
+
+       return mfw_res_id;
+}
+
+#define QED_RESC_ALLOC_VERSION_MAJOR    2
+#define QED_RESC_ALLOC_VERSION_MINOR    0
+#define QED_RESC_ALLOC_VERSION                              \
+       ((QED_RESC_ALLOC_VERSION_MAJOR <<                    \
+         DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
+        (QED_RESC_ALLOC_VERSION_MINOR <<                    \
+         DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
+
+struct qed_resc_alloc_in_params {
+       u32 cmd;
+       enum qed_resources res_id;
+       u32 resc_max_val;
+};
+
+struct qed_resc_alloc_out_params {
+       u32 mcp_resp;
+       u32 mcp_param;
+       u32 resc_num;
+       u32 resc_start;
+       u32 vf_resc_num;
+       u32 vf_resc_start;
+       u32 flags;
+};
+
+static int
+qed_mcp_resc_allocation_msg(struct qed_hwfn *p_hwfn,
+                           struct qed_ptt *p_ptt,
+                           struct qed_resc_alloc_in_params *p_in_params,
+                           struct qed_resc_alloc_out_params *p_out_params)
+{
+       struct qed_mcp_mb_params mb_params;
+       struct resource_info mfw_resc_info;
+       int rc;
+
+       memset(&mfw_resc_info, 0, sizeof(mfw_resc_info));
+
+       mfw_resc_info.res_id = qed_mcp_get_mfw_res_id(p_in_params->res_id);
+       if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
+               DP_ERR(p_hwfn,
+                      "Failed to match resource %d [%s] with the MFW resources\n",
+                      p_in_params->res_id,
+                      qed_hw_get_resc_name(p_in_params->res_id));
+               return -EINVAL;
+       }
+
+       switch (p_in_params->cmd) {
+       case DRV_MSG_SET_RESOURCE_VALUE_MSG:
+               mfw_resc_info.size = p_in_params->resc_max_val;
+               /* Fallthrough */
+       case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
+               break;
+       default:
+               DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
+                      p_in_params->cmd);
+               return -EINVAL;
+       }
+
+       memset(&mb_params, 0, sizeof(mb_params));
+       mb_params.cmd = p_in_params->cmd;
+       mb_params.param = QED_RESC_ALLOC_VERSION;
+       mb_params.p_data_src = &mfw_resc_info;
+       mb_params.data_src_size = sizeof(mfw_resc_info);
+       mb_params.p_data_dst = mb_params.p_data_src;
+       mb_params.data_dst_size = mb_params.data_src_size;
+
+       DP_VERBOSE(p_hwfn,
+                  QED_MSG_SP,
+                  "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
+                  p_in_params->cmd,
+                  p_in_params->res_id,
+                  qed_hw_get_resc_name(p_in_params->res_id),
+                  QED_MFW_GET_FIELD(mb_params.param,
+                                    DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
+                  QED_MFW_GET_FIELD(mb_params.param,
+                                    DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
+                  p_in_params->resc_max_val);
+
+       rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+       if (rc)
+               return rc;
+
+       p_out_params->mcp_resp = mb_params.mcp_resp;
+       p_out_params->mcp_param = mb_params.mcp_param;
+       p_out_params->resc_num = mfw_resc_info.size;
+       p_out_params->resc_start = mfw_resc_info.offset;
+       p_out_params->vf_resc_num = mfw_resc_info.vf_size;
+       p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
+       p_out_params->flags = mfw_resc_info.flags;
+
+       DP_VERBOSE(p_hwfn,
+                  QED_MSG_SP,
+                  "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
+                  QED_MFW_GET_FIELD(p_out_params->mcp_param,
+                                    FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
+                  QED_MFW_GET_FIELD(p_out_params->mcp_param,
+                                    FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
+                  p_out_params->resc_num,
+                  p_out_params->resc_start,
+                  p_out_params->vf_resc_num,
+                  p_out_params->vf_resc_start, p_out_params->flags);
+
+       return 0;
+}
+
+int
+qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
+                        struct qed_ptt *p_ptt,
+                        enum qed_resources res_id,
+                        u32 resc_max_val, u32 *p_mcp_resp)
+{
+       struct qed_resc_alloc_out_params out_params;
+       struct qed_resc_alloc_in_params in_params;
+       int rc;
+
+       memset(&in_params, 0, sizeof(in_params));
+       in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
+       in_params.res_id = res_id;
+       in_params.resc_max_val = resc_max_val;
+       memset(&out_params, 0, sizeof(out_params));
+       rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
+                                        &out_params);
+       if (rc)
+               return rc;
+
+       *p_mcp_resp = out_params.mcp_resp;
+
+       return 0;
+}
+
+int
+qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
+                     struct qed_ptt *p_ptt,
+                     enum qed_resources res_id,
+                     u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start)
+{
+       struct qed_resc_alloc_out_params out_params;
+       struct qed_resc_alloc_in_params in_params;
+       int rc;
+
+       memset(&in_params, 0, sizeof(in_params));
+       in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
+       in_params.res_id = res_id;
+       memset(&out_params, 0, sizeof(out_params));
+       rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
+                                        &out_params);
+       if (rc)
+               return rc;
+
+       *p_mcp_resp = out_params.mcp_resp;
+
+       if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
+               *p_resc_num = out_params.resc_num;
+               *p_resc_start = out_params.resc_start;
+       }
+
+       return 0;
+}
+
+int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+       u32 mcp_resp, mcp_param;
+
+       return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
+                          &mcp_resp, &mcp_param);
+}
+
+static int qed_mcp_resource_cmd(struct qed_hwfn *p_hwfn,
+                               struct qed_ptt *p_ptt,
+                               u32 param, u32 *p_mcp_resp, u32 *p_mcp_param)
+{
+       int rc;
+
+       rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
+                        p_mcp_resp, p_mcp_param);
+       if (rc)
+               return rc;
+
+       if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+               DP_INFO(p_hwfn,
+                       "The resource command is unsupported by the MFW\n");
+               return -EINVAL;
+       }
+
+       if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
+               u8 opcode = QED_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
+
+               DP_NOTICE(p_hwfn,
+                         "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
+                         param, opcode);
+               return -EINVAL;
+       }
+
+       return rc;
+}
+
+int
+__qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
+                   struct qed_ptt *p_ptt,
+                   struct qed_resc_lock_params *p_params)
+{
+       u32 param = 0, mcp_resp, mcp_param;
+       u8 opcode;
+       int rc;
+
+       switch (p_params->timeout) {
+       case QED_MCP_RESC_LOCK_TO_DEFAULT:
+               opcode = RESOURCE_OPCODE_REQ;
+               p_params->timeout = 0;
+               break;
+       case QED_MCP_RESC_LOCK_TO_NONE:
+               opcode = RESOURCE_OPCODE_REQ_WO_AGING;
+               p_params->timeout = 0;
+               break;
+       default:
+               opcode = RESOURCE_OPCODE_REQ_W_AGING;
+               break;
+       }
+
+       QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
+       QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
+       QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
+
+       DP_VERBOSE(p_hwfn,
+                  QED_MSG_SP,
+                  "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
+                  param, p_params->timeout, opcode, p_params->resource);
+
+       /* Attempt to acquire the resource */
+       rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
+       if (rc)
+               return rc;
+
+       /* Analyze the response */
+       p_params->owner = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
+       opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
+
+       DP_VERBOSE(p_hwfn,
+                  QED_MSG_SP,
+                  "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
+                  mcp_param, opcode, p_params->owner);
+
+       switch (opcode) {
+       case RESOURCE_OPCODE_GNT:
+               p_params->b_granted = true;
+               break;
+       case RESOURCE_OPCODE_BUSY:
+               p_params->b_granted = false;
+               break;
+       default:
+               DP_NOTICE(p_hwfn,
+                         "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
+                         mcp_param, opcode);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int
+qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
+                 struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params)
+{
+       u32 retry_cnt = 0;
+       int rc;
+
+       do {
+               /* No need for an interval before the first iteration */
+               if (retry_cnt) {
+                       if (p_params->sleep_b4_retry) {
+                               u16 retry_interval_in_ms =
+                                   DIV_ROUND_UP(p_params->retry_interval,
+                                                1000);
+
+                               msleep(retry_interval_in_ms);
+                       } else {
+                               udelay(p_params->retry_interval);
+                       }
+               }
+
+               rc = __qed_mcp_resc_lock(p_hwfn, p_ptt, p_params);
+               if (rc)
+                       return rc;
+
+               if (p_params->b_granted)
+                       break;
+       } while (retry_cnt++ < p_params->retry_num);
+
+       return 0;
+}
+
+int
+qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
+                   struct qed_ptt *p_ptt,
+                   struct qed_resc_unlock_params *p_params)
+{
+       u32 param = 0, mcp_resp, mcp_param;
+       u8 opcode;
+       int rc;
+
+       opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
+                                  : RESOURCE_OPCODE_RELEASE;
+       QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
+       QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
+
+       DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                  "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
+                  param, opcode, p_params->resource);
+
+       /* Attempt to release the resource */
+       rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
+       if (rc)
+               return rc;
+
+       /* Analyze the response */
+       opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
+
+       DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                  "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
+                  mcp_param, opcode);
+
+       switch (opcode) {
+       case RESOURCE_OPCODE_RELEASED_PREVIOUS:
+               DP_INFO(p_hwfn,
+                       "Resource unlock request for an already released resource [%d]\n",
+                       p_params->resource);
+               /* Fallthrough */
+       case RESOURCE_OPCODE_RELEASED:
+               p_params->b_released = true;
+               break;
+       case RESOURCE_OPCODE_WRONG_OWNER:
+               p_params->b_released = false;
+               break;
+       default:
+               DP_NOTICE(p_hwfn,
+                         "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
+                         mcp_param, opcode);
+               return -EINVAL;
+       }
+
+       return 0;
+}
index 7f319aa1b229c3bd12155cc0d5b76a6995115fee..76fd3d2ead3bd49c4ec1623040089d70fcfc457b 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef _QED_MCP_H
@@ -13,7 +37,9 @@
 #include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
+#include <linux/qed/qed_fcoe_if.h>
 #include "qed_hsi.h"
+#include "qed_dev_api.h"
 
 struct qed_mcp_link_speed_params {
        bool    autoneg;
@@ -91,6 +117,8 @@ struct qed_mcp_function_info {
 
 #define QED_MCP_VLAN_UNSET              (0xffff)
        u16                             ovlan;
+
+       u16                             mtu;
 };
 
 struct qed_mcp_nvm_common {
@@ -105,6 +133,71 @@ struct qed_mcp_drv_version {
        u8      name[MCP_DRV_VER_STR_SIZE - 4];
 };
 
+struct qed_mcp_lan_stats {
+       u64 ucast_rx_pkts;
+       u64 ucast_tx_pkts;
+       u32 fcs_err;
+};
+
+struct qed_mcp_fcoe_stats {
+       u64 rx_pkts;
+       u64 tx_pkts;
+       u32 fcs_err;
+       u32 login_failure;
+};
+
+struct qed_mcp_iscsi_stats {
+       u64 rx_pdus;
+       u64 tx_pdus;
+       u64 rx_bytes;
+       u64 tx_bytes;
+};
+
+struct qed_mcp_rdma_stats {
+       u64 rx_pkts;
+       u64 tx_pkts;
+       u64 rx_bytes;
+       u64 tx_byts;
+};
+
+enum qed_mcp_protocol_type {
+       QED_MCP_LAN_STATS,
+       QED_MCP_FCOE_STATS,
+       QED_MCP_ISCSI_STATS,
+       QED_MCP_RDMA_STATS
+};
+
+union qed_mcp_protocol_stats {
+       struct qed_mcp_lan_stats lan_stats;
+       struct qed_mcp_fcoe_stats fcoe_stats;
+       struct qed_mcp_iscsi_stats iscsi_stats;
+       struct qed_mcp_rdma_stats rdma_stats;
+};
+
+enum qed_ov_eswitch {
+       QED_OV_ESWITCH_NONE,
+       QED_OV_ESWITCH_VEB,
+       QED_OV_ESWITCH_VEPA
+};
+
+enum qed_ov_client {
+       QED_OV_CLIENT_DRV,
+       QED_OV_CLIENT_USER,
+       QED_OV_CLIENT_VENDOR_SPEC
+};
+
+enum qed_ov_driver_state {
+       QED_OV_DRIVER_STATE_NOT_LOADED,
+       QED_OV_DRIVER_STATE_DISABLED,
+       QED_OV_DRIVER_STATE_ACTIVE
+};
+
+enum qed_ov_wol {
+       QED_OV_WOL_DEFAULT,
+       QED_OV_WOL_DISABLED,
+       QED_OV_WOL_ENABLED
+};
+
 /**
  * @brief - returns the link params of the hw function
  *
@@ -235,6 +328,69 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
                         struct qed_ptt *p_ptt,
                         struct qed_mcp_drv_version *p_ver);
 
+/**
+ * @brief Notify MFW about the change in base device properties
+ *
+ *  @param p_hwfn
+ *  @param p_ptt
+ *  @param client - qed client type
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
+                                    struct qed_ptt *p_ptt,
+                                    enum qed_ov_client client);
+
+/**
+ * @brief Notify MFW about the driver state
+ *
+ *  @param p_hwfn
+ *  @param p_ptt
+ *  @param drv_state - Driver state
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn,
+                                  struct qed_ptt *p_ptt,
+                                  enum qed_ov_driver_state drv_state);
+
+/**
+ * @brief Send MTU size to MFW
+ *
+ *  @param p_hwfn
+ *  @param p_ptt
+ *  @param mtu - MTU size
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
+                         struct qed_ptt *p_ptt, u16 mtu);
+
+/**
+ * @brief Send MAC address to MFW
+ *
+ *  @param p_hwfn
+ *  @param p_ptt
+ *  @param mac - MAC address
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
+                         struct qed_ptt *p_ptt, u8 *mac);
+
+/**
+ * @brief Send WOL mode to MFW
+ *
+ *  @param p_hwfn
+ *  @param p_ptt
+ *  @param wol - WOL mode
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn,
+                         struct qed_ptt *p_ptt,
+                         enum qed_ov_wol wol);
+
 /**
  * @brief Set LED status
  *
@@ -248,6 +404,18 @@ int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
                    struct qed_ptt *p_ptt,
                    enum qed_led_mode mode);
 
+/**
+ * @brief Read from nvm
+ *
+ *  @param cdev
+ *  @param addr - nvm offset
+ *  @param p_buf - nvm read buffer
+ *  @param len - buffer len
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len);
+
 /**
  * @brief Bist register test
  *
@@ -270,6 +438,35 @@ int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn,
 int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn,
                            struct qed_ptt *p_ptt);
 
+/**
+ * @brief Bist nvm test - get number of images
+ *
+ *  @param p_hwfn       - hw function
+ *  @param p_ptt        - PTT required for register access
+ *  @param num_images   - number of images if operation was
+ *                       successful. 0 if not.
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_bist_nvm_test_get_num_images(struct qed_hwfn *p_hwfn,
+                                        struct qed_ptt *p_ptt,
+                                        u32 *num_images);
+
+/**
+ * @brief Bist nvm test - get image attributes by index
+ *
+ *  @param p_hwfn      - hw function
+ *  @param p_ptt       - PTT required for register access
+ *  @param p_image_att - Attributes of image
+ *  @param image_index - Index of image to get information for
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn,
+                                       struct qed_ptt *p_ptt,
+                                       struct bist_nvm_image_att *p_image_att,
+                                       u32 image_index);
+
 /* Using hwfn number (and not pf_num) is required since in CMT mode,
  * same pf_num may be used by two different hwfn
  * TODO - this shouldn't really be in .h file, but until all fields
@@ -282,13 +479,23 @@ int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn,
                                            rel_pfid)
 #define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
 
-/* TODO - this is only correct as long as only BB is supported, and
- * no port-swapping is implemented; Afterwards we'll need to fix it.
- */
-#define MFW_PORT(_p_hwfn)       ((_p_hwfn)->abs_pf_id %        \
-                                ((_p_hwfn)->cdev->num_ports_in_engines * 2))
+#define MFW_PORT(_p_hwfn)       ((_p_hwfn)->abs_pf_id %                          \
+                                ((_p_hwfn)->cdev->num_ports_in_engines * \
+                                 qed_device_num_engines((_p_hwfn)->cdev)))
+
 struct qed_mcp_info {
-       spinlock_t                              lock;
+       /* List for mailbox commands which were sent and wait for a response */
+       struct list_head                        cmd_list;
+
+       /* Spinlock used for protecting the access to the mailbox commands list
+        * and the sending of the commands.
+        */
+       spinlock_t                              cmd_lock;
+
+       /* Spinlock used for syncing SW link-changes and link-changes
+        * originating from attention context.
+        */
+       spinlock_t                              link_lock;
        bool                                    block_mb_sending;
        u32                                     public_base;
        u32                                     drv_mb_addr;
@@ -303,14 +510,16 @@ struct qed_mcp_info {
        u8                                      *mfw_mb_cur;
        u8                                      *mfw_mb_shadow;
        u16                                     mfw_mb_length;
-       u16                                     mcp_hist;
+       u32                                     mcp_hist;
 };
 
 struct qed_mcp_mb_params {
        u32                     cmd;
        u32                     param;
-       union drv_union_data    *p_data_src;
-       union drv_union_data    *p_data_dst;
+       void                    *p_data_src;
+       u8                      data_src_size;
+       void                    *p_data_dst;
+       u8                      data_dst_size;
        u32                     mcp_resp;
        u32                     mcp_param;
 };
@@ -361,27 +570,55 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn);
 int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
                          struct qed_ptt *p_ptt);
 
+enum qed_drv_role {
+       QED_DRV_ROLE_OS,
+       QED_DRV_ROLE_KDUMP,
+};
+
+struct qed_load_req_params {
+       /* Input params */
+       enum qed_drv_role drv_role;
+       u8 timeout_val;
+       bool avoid_eng_reset;
+       enum qed_override_force_load override_force_load;
+
+       /* Output params */
+       u32 load_code;
+};
+
 /**
- * @brief Sends a LOAD_REQ to the MFW, and in case operation
- *        succeed, returns whether this PF is the first on the
- *        chip/engine/port or function. This function should be
- *        called when driver is ready to accept MFW events after
- *        Storms initializations are done.
+ * @brief Sends a LOAD_REQ to the MFW, and in case the operation succeeds,
+ *        returns whether this PF is the first on the engine/port or function.
  *
- * @param p_hwfn       - hw function
- * @param p_ptt        - PTT required for register access
- * @param p_load_code  - The MCP response param containing one
- *      of the following:
- *      FW_MSG_CODE_DRV_LOAD_ENGINE
- *      FW_MSG_CODE_DRV_LOAD_PORT
- *      FW_MSG_CODE_DRV_LOAD_FUNCTION
- * @return int -
- *      0 - Operation was successul.
- *      -EBUSY - Operation failed
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_params
+ *
+ * @return int - 0 - Operation was successful.
  */
 int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
                     struct qed_ptt *p_ptt,
-                    u32 *p_load_code);
+                    struct qed_load_req_params *p_params);
+
+/**
+ * @brief Sends a UNLOAD_REQ message to the MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return int - 0 - Operation was successful.
+ */
+int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
+ * @brief Sends a UNLOAD_DONE message to the MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return int - 0 - Operation was successful.
+ */
+int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
 
 /**
  * @brief Read the MFW mailbox into Current buffer.
@@ -425,6 +662,29 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
 int qed_mcp_reset(struct qed_hwfn *p_hwfn,
                  struct qed_ptt *p_ptt);
 
+/**
+ * @brief - Sends an NVM read command request to the MFW to get
+ *        a buffer.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param cmd - Command: DRV_MSG_CODE_NVM_GET_FILE_DATA or
+ *            DRV_MSG_CODE_NVM_READ_NVRAM commands
+ * @param param - [0:23] - Offset [24:31] - Size
+ * @param o_mcp_resp - MCP response
+ * @param o_mcp_param - MCP response param
+ * @param o_txn_size -  Buffer size output
+ * @param o_buf - Pointer to the buffer returned by the MFW.
+ *
+ * @param return 0 upon success.
+ */
+int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
+                      struct qed_ptt *p_ptt,
+                      u32 cmd,
+                      u32 param,
+                      u32 *o_mcp_resp,
+                      u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf);
+
 /**
  * @brief indicates whether the MFW objects [under mcp_info] are accessible
  *
@@ -447,6 +707,26 @@ bool qed_mcp_is_init(struct qed_hwfn *p_hwfn);
 int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
                           struct qed_ptt *p_ptt, u8 vf_id, u8 num);
 
+/**
+ * @brief - Halt the MCP.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return 0 upon success.
+ */
+int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
+ * @brief - Wake up the MCP.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return 0 upon success.
+ */
+int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
 int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw);
 int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw);
 int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
@@ -458,6 +738,137 @@ int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
                                     struct qed_mcp_link_state *p_link,
                                     u8 min_bw);
 
-int qed_hw_init_first_eth(struct qed_hwfn *p_hwfn,
-                         struct qed_ptt *p_ptt, u8 *p_pf);
+int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
+                         struct qed_ptt *p_ptt, u32 mask_parities);
+
+/**
+ * @brief - Sets the MFW's max value for the given resource
+ *
+ *  @param p_hwfn
+ *  @param p_ptt
+ *  @param res_id
+ *  @param resc_max_val
+ *  @param p_mcp_resp
+ *
+ * @return int - 0 - operation was successful.
+ */
+int
+qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
+                        struct qed_ptt *p_ptt,
+                        enum qed_resources res_id,
+                        u32 resc_max_val, u32 *p_mcp_resp);
+
+/**
+ * @brief - Gets the MFW allocation info for the given resource
+ *
+ *  @param p_hwfn
+ *  @param p_ptt
+ *  @param res_id
+ *  @param p_mcp_resp
+ *  @param p_resc_num
+ *  @param p_resc_start
+ *
+ * @return int - 0 - operation was successful.
+ */
+int
+qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
+                     struct qed_ptt *p_ptt,
+                     enum qed_resources res_id,
+                     u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start);
+
+/**
+ * @brief Send eswitch mode to MFW
+ *
+ *  @param p_hwfn
+ *  @param p_ptt
+ *  @param eswitch - eswitch mode
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
+                             struct qed_ptt *p_ptt,
+                             enum qed_ov_eswitch eswitch);
+
+#define QED_MCP_RESC_LOCK_MIN_VAL       RESOURCE_DUMP
+#define QED_MCP_RESC_LOCK_MAX_VAL       31
+
+enum qed_resc_lock {
+       QED_RESC_LOCK_DBG_DUMP = QED_MCP_RESC_LOCK_MIN_VAL,
+       QED_RESC_LOCK_RESC_ALLOC = QED_MCP_RESC_LOCK_MAX_VAL
+};
+
+/**
+ * @brief - Initiates PF FLR
+ *
+ *  @param p_hwfn
+ *  @param p_ptt
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+struct qed_resc_lock_params {
+       /* Resource number [valid values are 0..31] */
+       u8 resource;
+
+       /* Lock timeout value in seconds [default, none or 1..254] */
+       u8 timeout;
+#define QED_MCP_RESC_LOCK_TO_DEFAULT    0
+#define QED_MCP_RESC_LOCK_TO_NONE       255
+
+       /* Number of times to retry locking */
+       u8 retry_num;
+
+       /* The interval in usec between retries */
+       u16 retry_interval;
+
+       /* Use sleep or delay between retries */
+       bool sleep_b4_retry;
+
+       /* Will be set as true if the resource is free and granted */
+       bool b_granted;
+
+       /* Will be filled with the resource owner.
+        * [0..15 = PF0-15, 16 = MFW]
+        */
+       u8 owner;
+};
+
+/**
+ * @brief Acquires MFW generic resource lock
+ *
+ *  @param p_hwfn
+ *  @param p_ptt
+ *  @param p_params
+ *
+ * @return int - 0 - operation was successful.
+ */
+int
+qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
+                 struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params);
+
+struct qed_resc_unlock_params {
+       /* Resource number [valid values are 0..31] */
+       u8 resource;
+
+       /* Allow to release a resource even if belongs to another PF */
+       bool b_force;
+
+       /* Will be set as true if the resource is released */
+       bool b_released;
+};
+
+/**
+ * @brief Releases MFW generic resource lock
+ *
+ *  @param p_hwfn
+ *  @param p_ptt
+ *  @param p_params
+ *
+ * @return int - 0 - operation was successful.
+ */
+int
+qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
+                   struct qed_ptt *p_ptt,
+                   struct qed_resc_unlock_params *p_params);
+
 #endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
new file mode 100644 (file)
index 0000000..db96670
--- /dev/null
@@ -0,0 +1,481 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_iscsi.h"
+#include "qed_ll2.h"
+#include "qed_ooo.h"
+#include "qed_cxt.h"
+
+static struct qed_ooo_archipelago
+*qed_ooo_seek_archipelago(struct qed_hwfn *p_hwfn,
+                         struct qed_ooo_info
+                         *p_ooo_info,
+                         u32 cid)
+{
+       u32 idx = (cid & 0xffff) - p_ooo_info->cid_base;
+       struct qed_ooo_archipelago *p_archipelago;
+
+       if (idx >= p_ooo_info->max_num_archipelagos)
+               return NULL;
+
+       p_archipelago = &p_ooo_info->p_archipelagos_mem[idx];
+
+       if (list_empty(&p_archipelago->isles_list))
+               return NULL;
+
+       return p_archipelago;
+}
+
+static struct qed_ooo_isle *qed_ooo_seek_isle(struct qed_hwfn *p_hwfn,
+                                             struct qed_ooo_info *p_ooo_info,
+                                             u32 cid, u8 isle)
+{
+       struct qed_ooo_archipelago *p_archipelago = NULL;
+       struct qed_ooo_isle *p_isle = NULL;
+       u8 the_num_of_isle = 1;
+
+       p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
+       if (!p_archipelago) {
+               DP_NOTICE(p_hwfn,
+                         "Connection %d is not found in OOO list\n", cid);
+               return NULL;
+       }
+
+       list_for_each_entry(p_isle, &p_archipelago->isles_list, list_entry) {
+               if (the_num_of_isle == isle)
+                       return p_isle;
+               the_num_of_isle++;
+       }
+
+       return NULL;
+}
+
+void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn,
+                               struct qed_ooo_info *p_ooo_info,
+                               struct ooo_opaque *p_cqe)
+{
+       struct qed_ooo_history *p_history = &p_ooo_info->ooo_history;
+
+       if (p_history->head_idx == p_history->num_of_cqes)
+               p_history->head_idx = 0;
+       p_history->p_cqes[p_history->head_idx] = *p_cqe;
+       p_history->head_idx++;
+}
+
+struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
+{
+       u16 max_num_archipelagos = 0, cid_base;
+       struct qed_ooo_info *p_ooo_info;
+       u16 max_num_isles = 0;
+       u32 i;
+
+       if (p_hwfn->hw_info.personality != QED_PCI_ISCSI) {
+               DP_NOTICE(p_hwfn,
+                         "Failed to allocate qed_ooo_info: unknown personality\n");
+               return NULL;
+       }
+
+       max_num_archipelagos = p_hwfn->pf_params.iscsi_pf_params.num_cons;
+       max_num_isles = QED_MAX_NUM_ISLES + max_num_archipelagos;
+       cid_base = (u16)qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ISCSI);
+
+       if (!max_num_archipelagos) {
+               DP_NOTICE(p_hwfn,
+                         "Failed to allocate qed_ooo_info: unknown amount of connections\n");
+               return NULL;
+       }
+
+       p_ooo_info = kzalloc(sizeof(*p_ooo_info), GFP_KERNEL);
+       if (!p_ooo_info)
+               return NULL;
+
+       p_ooo_info->cid_base = cid_base;
+       p_ooo_info->max_num_archipelagos = max_num_archipelagos;
+
+       INIT_LIST_HEAD(&p_ooo_info->free_buffers_list);
+       INIT_LIST_HEAD(&p_ooo_info->ready_buffers_list);
+       INIT_LIST_HEAD(&p_ooo_info->free_isles_list);
+
+       p_ooo_info->p_isles_mem = kcalloc(max_num_isles,
+                                         sizeof(struct qed_ooo_isle),
+                                         GFP_KERNEL);
+       if (!p_ooo_info->p_isles_mem)
+               goto no_isles_mem;
+
+       for (i = 0; i < max_num_isles; i++) {
+               INIT_LIST_HEAD(&p_ooo_info->p_isles_mem[i].buffers_list);
+               list_add_tail(&p_ooo_info->p_isles_mem[i].list_entry,
+                             &p_ooo_info->free_isles_list);
+       }
+
+       p_ooo_info->p_archipelagos_mem =
+                               kcalloc(max_num_archipelagos,
+                                       sizeof(struct qed_ooo_archipelago),
+                                       GFP_KERNEL);
+       if (!p_ooo_info->p_archipelagos_mem)
+               goto no_archipelagos_mem;
+
+       for (i = 0; i < max_num_archipelagos; i++)
+               INIT_LIST_HEAD(&p_ooo_info->p_archipelagos_mem[i].isles_list);
+
+       p_ooo_info->ooo_history.p_cqes =
+                               kcalloc(QED_MAX_NUM_OOO_HISTORY_ENTRIES,
+                                       sizeof(struct ooo_opaque),
+                                       GFP_KERNEL);
+       if (!p_ooo_info->ooo_history.p_cqes)
+               goto no_history_mem;
+
+       p_ooo_info->ooo_history.num_of_cqes = QED_MAX_NUM_OOO_HISTORY_ENTRIES;
+
+       return p_ooo_info;
+
+no_history_mem:
+       kfree(p_ooo_info->p_archipelagos_mem);
+no_archipelagos_mem:
+       kfree(p_ooo_info->p_isles_mem);
+no_isles_mem:
+       kfree(p_ooo_info);
+       return NULL;
+}
+
+void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
+                                     struct qed_ooo_info *p_ooo_info, u32 cid)
+{
+       struct qed_ooo_archipelago *p_archipelago;
+       struct qed_ooo_buffer *p_buffer;
+       struct qed_ooo_isle *p_isle;
+
+       p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
+       if (!p_archipelago)
+               return;
+
+       while (!list_empty(&p_archipelago->isles_list)) {
+               p_isle = list_first_entry(&p_archipelago->isles_list,
+                                         struct qed_ooo_isle, list_entry);
+
+               list_del(&p_isle->list_entry);
+
+               while (!list_empty(&p_isle->buffers_list)) {
+                       p_buffer = list_first_entry(&p_isle->buffers_list,
+                                                   struct qed_ooo_buffer,
+                                                   list_entry);
+
+                       if (!p_buffer)
+                               break;
+
+                       list_del(&p_buffer->list_entry);
+                       list_add_tail(&p_buffer->list_entry,
+                                     &p_ooo_info->free_buffers_list);
+               }
+               list_add_tail(&p_isle->list_entry,
+                             &p_ooo_info->free_isles_list);
+       }
+}
+
+void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
+                              struct qed_ooo_info *p_ooo_info)
+{
+       struct qed_ooo_archipelago *p_archipelago;
+       struct qed_ooo_buffer *p_buffer;
+       struct qed_ooo_isle *p_isle;
+       u32 i;
+
+       for (i = 0; i < p_ooo_info->max_num_archipelagos; i++) {
+               p_archipelago = &(p_ooo_info->p_archipelagos_mem[i]);
+
+               while (!list_empty(&p_archipelago->isles_list)) {
+                       p_isle = list_first_entry(&p_archipelago->isles_list,
+                                                 struct qed_ooo_isle,
+                                                 list_entry);
+
+                       list_del(&p_isle->list_entry);
+
+                       while (!list_empty(&p_isle->buffers_list)) {
+                               p_buffer =
+                                   list_first_entry(&p_isle->buffers_list,
+                                                    struct qed_ooo_buffer,
+                                                    list_entry);
+
+                               if (!p_buffer)
+                                       break;
+
+                       list_del(&p_buffer->list_entry);
+                               list_add_tail(&p_buffer->list_entry,
+                                             &p_ooo_info->free_buffers_list);
+                       }
+                       list_add_tail(&p_isle->list_entry,
+                                     &p_ooo_info->free_isles_list);
+               }
+       }
+       if (!list_empty(&p_ooo_info->ready_buffers_list))
+               list_splice_tail_init(&p_ooo_info->ready_buffers_list,
+                                     &p_ooo_info->free_buffers_list);
+}
+
+void qed_ooo_setup(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info)
+{
+       qed_ooo_release_all_isles(p_hwfn, p_ooo_info);
+       memset(p_ooo_info->ooo_history.p_cqes, 0,
+              p_ooo_info->ooo_history.num_of_cqes *
+              sizeof(struct ooo_opaque));
+       p_ooo_info->ooo_history.head_idx = 0;
+}
+
+void qed_ooo_free(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info)
+{
+       struct qed_ooo_buffer *p_buffer;
+
+       qed_ooo_release_all_isles(p_hwfn, p_ooo_info);
+       while (!list_empty(&p_ooo_info->free_buffers_list)) {
+               p_buffer = list_first_entry(&p_ooo_info->free_buffers_list,
+                                           struct qed_ooo_buffer, list_entry);
+
+               if (!p_buffer)
+                       break;
+
+               list_del(&p_buffer->list_entry);
+               dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                                 p_buffer->rx_buffer_size,
+                                 p_buffer->rx_buffer_virt_addr,
+                                 p_buffer->rx_buffer_phys_addr);
+               kfree(p_buffer);
+       }
+
+       kfree(p_ooo_info->p_isles_mem);
+       kfree(p_ooo_info->p_archipelagos_mem);
+       kfree(p_ooo_info->ooo_history.p_cqes);
+       kfree(p_ooo_info);
+}
+
+void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn,
+                            struct qed_ooo_info *p_ooo_info,
+                            struct qed_ooo_buffer *p_buffer)
+{
+       list_add_tail(&p_buffer->list_entry, &p_ooo_info->free_buffers_list);
+}
+
+struct qed_ooo_buffer *qed_ooo_get_free_buffer(struct qed_hwfn *p_hwfn,
+                                              struct qed_ooo_info *p_ooo_info)
+{
+       struct qed_ooo_buffer *p_buffer = NULL;
+
+       if (!list_empty(&p_ooo_info->free_buffers_list)) {
+               p_buffer = list_first_entry(&p_ooo_info->free_buffers_list,
+                                           struct qed_ooo_buffer, list_entry);
+
+               list_del(&p_buffer->list_entry);
+       }
+
+       return p_buffer;
+}
+
+void qed_ooo_put_ready_buffer(struct qed_hwfn *p_hwfn,
+                             struct qed_ooo_info *p_ooo_info,
+                             struct qed_ooo_buffer *p_buffer, u8 on_tail)
+{
+       if (on_tail)
+               list_add_tail(&p_buffer->list_entry,
+                             &p_ooo_info->ready_buffers_list);
+       else
+               list_add(&p_buffer->list_entry,
+                        &p_ooo_info->ready_buffers_list);
+}
+
+struct qed_ooo_buffer *qed_ooo_get_ready_buffer(struct qed_hwfn *p_hwfn,
+                                               struct qed_ooo_info *p_ooo_info)
+{
+       struct qed_ooo_buffer *p_buffer = NULL;
+
+       if (!list_empty(&p_ooo_info->ready_buffers_list)) {
+               p_buffer = list_first_entry(&p_ooo_info->ready_buffers_list,
+                                           struct qed_ooo_buffer, list_entry);
+
+               list_del(&p_buffer->list_entry);
+       }
+
+       return p_buffer;
+}
+
+void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn,
+                         struct qed_ooo_info *p_ooo_info,
+                         u32 cid, u8 drop_isle, u8 drop_size)
+{
+       struct qed_ooo_archipelago *p_archipelago = NULL;
+       struct qed_ooo_isle *p_isle = NULL;
+       u8 isle_idx;
+
+       p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
+       for (isle_idx = 0; isle_idx < drop_size; isle_idx++) {
+               p_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, drop_isle);
+               if (!p_isle) {
+                       DP_NOTICE(p_hwfn,
+                                 "Isle %d is not found(cid %d)\n",
+                                 drop_isle, cid);
+                       return;
+               }
+               if (list_empty(&p_isle->buffers_list))
+                       DP_NOTICE(p_hwfn,
+                                 "Isle %d is empty(cid %d)\n", drop_isle, cid);
+               else
+                       list_splice_tail_init(&p_isle->buffers_list,
+                                             &p_ooo_info->free_buffers_list);
+
+               list_del(&p_isle->list_entry);
+               p_ooo_info->cur_isles_number--;
+               list_add(&p_isle->list_entry, &p_ooo_info->free_isles_list);
+       }
+}
+
+void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
+                         struct qed_ooo_info *p_ooo_info,
+                         u32 cid, u8 ooo_isle,
+                         struct qed_ooo_buffer *p_buffer)
+{
+       struct qed_ooo_archipelago *p_archipelago = NULL;
+       struct qed_ooo_isle *p_prev_isle = NULL;
+       struct qed_ooo_isle *p_isle = NULL;
+
+       if (ooo_isle > 1) {
+               p_prev_isle = qed_ooo_seek_isle(p_hwfn,
+                                               p_ooo_info, cid, ooo_isle - 1);
+               if (!p_prev_isle) {
+                       DP_NOTICE(p_hwfn,
+                                 "Isle %d is not found(cid %d)\n",
+                                 ooo_isle - 1, cid);
+                       return;
+               }
+       }
+       p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
+       if (!p_archipelago && (ooo_isle != 1)) {
+               DP_NOTICE(p_hwfn,
+                         "Connection %d is not found in OOO list\n", cid);
+               return;
+       }
+
+       if (!list_empty(&p_ooo_info->free_isles_list)) {
+               p_isle = list_first_entry(&p_ooo_info->free_isles_list,
+                                         struct qed_ooo_isle, list_entry);
+
+               list_del(&p_isle->list_entry);
+               if (!list_empty(&p_isle->buffers_list)) {
+                       DP_NOTICE(p_hwfn, "Free isle is not empty\n");
+                       INIT_LIST_HEAD(&p_isle->buffers_list);
+               }
+       } else {
+               DP_NOTICE(p_hwfn, "No more free isles\n");
+               return;
+       }
+
+       if (!p_archipelago) {
+               u32 idx = (cid & 0xffff) - p_ooo_info->cid_base;
+
+               p_archipelago = &p_ooo_info->p_archipelagos_mem[idx];
+       }
+
+       list_add(&p_buffer->list_entry, &p_isle->buffers_list);
+       p_ooo_info->cur_isles_number++;
+       p_ooo_info->gen_isles_number++;
+
+       if (p_ooo_info->cur_isles_number > p_ooo_info->max_isles_number)
+               p_ooo_info->max_isles_number = p_ooo_info->cur_isles_number;
+
+       if (!p_prev_isle)
+               list_add(&p_isle->list_entry, &p_archipelago->isles_list);
+       else
+               list_add(&p_isle->list_entry, &p_prev_isle->list_entry);
+}
+
+void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn,
+                           struct qed_ooo_info *p_ooo_info,
+                           u32 cid,
+                           u8 ooo_isle,
+                           struct qed_ooo_buffer *p_buffer, u8 buffer_side)
+{
+       struct qed_ooo_isle *p_isle = NULL;
+
+       p_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, ooo_isle);
+       if (!p_isle) {
+               DP_NOTICE(p_hwfn,
+                         "Isle %d is not found(cid %d)\n", ooo_isle, cid);
+               return;
+       }
+
+       if (buffer_side == QED_OOO_LEFT_BUF)
+               list_add(&p_buffer->list_entry, &p_isle->buffers_list);
+       else
+               list_add_tail(&p_buffer->list_entry, &p_isle->buffers_list);
+}
+
+void qed_ooo_join_isles(struct qed_hwfn *p_hwfn,
+                       struct qed_ooo_info *p_ooo_info, u32 cid, u8 left_isle)
+{
+       struct qed_ooo_archipelago *p_archipelago = NULL;
+       struct qed_ooo_isle *p_right_isle = NULL;
+       struct qed_ooo_isle *p_left_isle = NULL;
+
+       p_right_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid,
+                                        left_isle + 1);
+       if (!p_right_isle) {
+               DP_NOTICE(p_hwfn,
+                         "Right isle %d is not found(cid %d)\n",
+                         left_isle + 1, cid);
+               return;
+       }
+
+       p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
+       list_del(&p_right_isle->list_entry);
+       p_ooo_info->cur_isles_number--;
+       if (left_isle) {
+               p_left_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid,
+                                               left_isle);
+               if (!p_left_isle) {
+                       DP_NOTICE(p_hwfn,
+                                 "Left isle %d is not found(cid %d)\n",
+                                 left_isle, cid);
+                       return;
+               }
+               list_splice_tail_init(&p_right_isle->buffers_list,
+                                     &p_left_isle->buffers_list);
+       } else {
+               list_splice_tail_init(&p_right_isle->buffers_list,
+                                     &p_ooo_info->ready_buffers_list);
+       }
+       list_add_tail(&p_right_isle->list_entry, &p_ooo_info->free_isles_list);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.h b/drivers/net/ethernet/qlogic/qed/qed_ooo.h
new file mode 100644 (file)
index 0000000..791ad0f
--- /dev/null
@@ -0,0 +1,198 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _QED_OOO_H
+#define _QED_OOO_H
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include "qed.h"
+
+#define QED_MAX_NUM_ISLES      256
+#define QED_MAX_NUM_OOO_HISTORY_ENTRIES        512
+
+#define QED_OOO_LEFT_BUF       0
+#define QED_OOO_RIGHT_BUF      1
+
+struct qed_ooo_buffer {
+       struct list_head list_entry;
+       void *rx_buffer_virt_addr;
+       dma_addr_t rx_buffer_phys_addr;
+       u32 rx_buffer_size;
+       u16 packet_length;
+       u16 parse_flags;
+       u16 vlan;
+       u8 placement_offset;
+};
+
+struct qed_ooo_isle {
+       struct list_head list_entry;
+       struct list_head buffers_list;
+};
+
+struct qed_ooo_archipelago {
+       struct list_head isles_list;
+};
+
+struct qed_ooo_history {
+       struct ooo_opaque *p_cqes;
+       u32 head_idx;
+       u32 num_of_cqes;
+};
+
+struct qed_ooo_info {
+       struct list_head free_buffers_list;
+       struct list_head ready_buffers_list;
+       struct list_head free_isles_list;
+       struct qed_ooo_archipelago *p_archipelagos_mem;
+       struct qed_ooo_isle *p_isles_mem;
+       struct qed_ooo_history ooo_history;
+       u32 cur_isles_number;
+       u32 max_isles_number;
+       u32 gen_isles_number;
+       u16 max_num_archipelagos;
+       u16 cid_base;
+};
+
+#if IS_ENABLED(CONFIG_QED_ISCSI)
+void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn,
+                               struct qed_ooo_info *p_ooo_info,
+                               struct ooo_opaque *p_cqe);
+
+struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn);
+
+void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
+                                     struct qed_ooo_info *p_ooo_info,
+                                     u32 cid);
+
+void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
+                              struct qed_ooo_info *p_ooo_info);
+
+void qed_ooo_setup(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info);
+
+void qed_ooo_free(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info);
+
+void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn,
+                            struct qed_ooo_info *p_ooo_info,
+                            struct qed_ooo_buffer *p_buffer);
+
+struct qed_ooo_buffer *
+qed_ooo_get_free_buffer(struct qed_hwfn *p_hwfn,
+                       struct qed_ooo_info *p_ooo_info);
+
+void qed_ooo_put_ready_buffer(struct qed_hwfn *p_hwfn,
+                             struct qed_ooo_info *p_ooo_info,
+                             struct qed_ooo_buffer *p_buffer, u8 on_tail);
+
+struct qed_ooo_buffer *
+qed_ooo_get_ready_buffer(struct qed_hwfn *p_hwfn,
+                        struct qed_ooo_info *p_ooo_info);
+
+void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn,
+                         struct qed_ooo_info *p_ooo_info,
+                         u32 cid, u8 drop_isle, u8 drop_size);
+
+void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
+                         struct qed_ooo_info *p_ooo_info,
+                         u32 cid,
+                         u8 ooo_isle, struct qed_ooo_buffer *p_buffer);
+
+void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn,
+                           struct qed_ooo_info *p_ooo_info,
+                           u32 cid,
+                           u8 ooo_isle,
+                           struct qed_ooo_buffer *p_buffer, u8 buffer_side);
+
+void qed_ooo_join_isles(struct qed_hwfn *p_hwfn,
+                       struct qed_ooo_info *p_ooo_info, u32 cid,
+                       u8 left_isle);
+#else /* IS_ENABLED(CONFIG_QED_ISCSI) */
+static inline void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn,
+                                             struct qed_ooo_info *p_ooo_info,
+                                             struct ooo_opaque *p_cqe) {}
+
+static inline struct qed_ooo_info *qed_ooo_alloc(
+                               struct qed_hwfn *p_hwfn) { return NULL; }
+
+static inline void
+qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
+                                struct qed_ooo_info *p_ooo_info,
+                                u32 cid) {}
+
+static inline void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
+                                            struct qed_ooo_info *p_ooo_info)
+                                            {}
+
+static inline void qed_ooo_setup(struct qed_hwfn *p_hwfn,
+                                struct qed_ooo_info *p_ooo_info) {}
+
+static inline void qed_ooo_free(struct qed_hwfn *p_hwfn,
+                               struct qed_ooo_info *p_ooo_info) {}
+
+static inline void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn,
+                                          struct qed_ooo_info *p_ooo_info,
+                                          struct qed_ooo_buffer *p_buffer) {}
+
+static inline struct qed_ooo_buffer *
+qed_ooo_get_free_buffer(struct qed_hwfn *p_hwfn,
+                       struct qed_ooo_info *p_ooo_info) { return NULL; }
+
+static inline void qed_ooo_put_ready_buffer(struct qed_hwfn *p_hwfn,
+                                           struct qed_ooo_info *p_ooo_info,
+                                           struct qed_ooo_buffer *p_buffer,
+                                           u8 on_tail) {}
+
+static inline struct qed_ooo_buffer *
+qed_ooo_get_ready_buffer(struct qed_hwfn *p_hwfn,
+                        struct qed_ooo_info *p_ooo_info) { return NULL; }
+
+static inline void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn,
+                                       struct qed_ooo_info *p_ooo_info,
+                                       u32 cid, u8 drop_isle, u8 drop_size) {}
+
+static inline void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
+                                       struct qed_ooo_info *p_ooo_info,
+                                       u32 cid, u8 ooo_isle,
+                                       struct qed_ooo_buffer *p_buffer) {}
+
+static inline void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn,
+                                         struct qed_ooo_info *p_ooo_info,
+                                         u32 cid, u8 ooo_isle,
+                                         struct qed_ooo_buffer *p_buffer,
+                                         u8 buffer_side) {}
+
+static inline void qed_ooo_join_isles(struct qed_hwfn *p_hwfn,
+                                     struct qed_ooo_info *p_ooo_info, u32 cid,
+                                     u8 left_isle) {}
+#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
new file mode 100644 (file)
index 0000000..80c9c0b
--- /dev/null
@@ -0,0 +1,331 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/types.h>
+#include "qed.h"
+#include "qed_dev_api.h"
+#include "qed_hw.h"
+#include "qed_l2.h"
+#include "qed_ptp.h"
+#include "qed_reg_addr.h"
+
+/* 16 nano second time quantas to wait before making a Drift adjustment */
+#define QED_DRIFT_CNTR_TIME_QUANTA_SHIFT       0
+/* Nano seconds to add/subtract when making a Drift adjustment */
+#define QED_DRIFT_CNTR_ADJUSTMENT_SHIFT                28
+/* Add/subtract the Adjustment_Value when making a Drift adjustment */
+#define QED_DRIFT_CNTR_DIRECTION_SHIFT         31
+#define QED_TIMESTAMP_MASK                     BIT(16)
+
+/* Read Rx timestamp */
+static int qed_ptp_hw_read_rx_ts(struct qed_dev *cdev, u64 *timestamp)
+{
+       struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
+       u32 val;
+
+       *timestamp = 0;
+       val = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID);
+       if (!(val & QED_TIMESTAMP_MASK)) {
+               DP_INFO(p_hwfn, "Invalid Rx timestamp, buf_seqid = %d\n", val);
+               return -EINVAL;
+       }
+
+       val = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_TS_LSB);
+       *timestamp = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_TS_MSB);
+       *timestamp <<= 32;
+       *timestamp |= val;
+
+       /* Reset timestamp register to allow new timestamp */
+       qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID,
+              QED_TIMESTAMP_MASK);
+
+       return 0;
+}
+
+/* Read Tx timestamp */
+static int qed_ptp_hw_read_tx_ts(struct qed_dev *cdev, u64 *timestamp)
+{
+       struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
+       u32 val;
+
+       *timestamp = 0;
+       val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID);
+       if (!(val & QED_TIMESTAMP_MASK)) {
+               DP_INFO(p_hwfn, "Invalid Tx timestamp, buf_seqid = %d\n", val);
+               return -EINVAL;
+       }
+
+       val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_TS_LSB);
+       *timestamp = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_TS_MSB);
+       *timestamp <<= 32;
+       *timestamp |= val;
+
+       /* Reset timestamp register to allow new timestamp */
+       qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID, QED_TIMESTAMP_MASK);
+
+       return 0;
+}
+
+/* Read Phy Hardware Clock */
+static int qed_ptp_hw_read_cc(struct qed_dev *cdev, u64 *phc_cycles)
+{
+       struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
+       u32 temp = 0;
+
+       temp = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_SYNC_TIME_LSB);
+       *phc_cycles = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_SYNC_TIME_MSB);
+       *phc_cycles <<= 32;
+       *phc_cycles |= temp;
+
+       return 0;
+}
+
+/* Filter PTP protocol packets that need to be timestamped */
+static int qed_ptp_hw_cfg_rx_filters(struct qed_dev *cdev,
+                                    enum qed_ptp_filter_type type)
+{
+       struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
+       u32 rule_mask, parm_mask;
+
+       switch (type) {
+       case QED_PTP_FILTER_L2_IPV4_IPV6:
+               parm_mask = 0x6AA;
+               rule_mask = 0x3EEE;
+               break;
+       case QED_PTP_FILTER_L2:
+               parm_mask = 0x6BF;
+               rule_mask = 0x3EFF;
+               break;
+       case QED_PTP_FILTER_IPV4_IPV6:
+               parm_mask = 0x7EA;
+               rule_mask = 0x3FFE;
+               break;
+       case QED_PTP_FILTER_IPV4:
+               parm_mask = 0x7EE;
+               rule_mask = 0x3FFE;
+               break;
+       default:
+               DP_INFO(p_hwfn, "Invalid PTP filter type %d\n", type);
+               return -EINVAL;
+       }
+
+       qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, parm_mask);
+       qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, rule_mask);
+
+       qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_TO_HOST, 0x1);
+
+       /* Reset possibly old timestamps */
+       qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID,
+              QED_TIMESTAMP_MASK);
+
+       return 0;
+}
+
+/* Adjust the HW clock by a rate given in parts-per-billion (ppb) units.
+ * FW/HW accepts the adjustment value in terms of 3 parameters:
+ *   Drift period - adjustment happens once in certain number of nano seconds.
+ *   Drift value - time is adjusted by a certain value, for example by 5 ns.
+ *   Drift direction - add or subtract the adjustment value.
+ * The routine translates ppb into the adjustment triplet in an optimal manner.
+ */
+static int qed_ptp_hw_adjfreq(struct qed_dev *cdev, s32 ppb)
+{
+       s64 best_val = 0, val, best_period = 0, period, approx_dev, dif, dif2;
+       struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
+       u32 drift_ctr_cfg = 0, drift_state;
+       int drift_dir = 1;
+
+       if (ppb < 0) {
+               ppb = -ppb;
+               drift_dir = 0;
+       }
+
+       if (ppb > 1) {
+               s64 best_dif = ppb, best_approx_dev = 1;
+
+               /* Adjustment value is up to +/-7ns, find an optimal value in
+                * this range.
+                */
+               for (val = 7; val > 0; val--) {
+                       period = div_s64(val * 1000000000, ppb);
+                       period -= 8;
+                       period >>= 4;
+                       if (period < 1)
+                               period = 1;
+                       if (period > 0xFFFFFFE)
+                               period = 0xFFFFFFE;
+
+                       /* Check both rounding ends for approximate error */
+                       approx_dev = period * 16 + 8;
+                       dif = ppb * approx_dev - val * 1000000000;
+                       dif2 = dif + 16 * ppb;
+
+                       if (dif < 0)
+                               dif = -dif;
+                       if (dif2 < 0)
+                               dif2 = -dif2;
+
+                       /* Determine which end gives better approximation */
+                       if (dif * (approx_dev + 16) > dif2 * approx_dev) {
+                               period++;
+                               approx_dev += 16;
+                               dif = dif2;
+                       }
+
+                       /* Track best approximation found so far */
+                       if (best_dif * approx_dev > dif * best_approx_dev) {
+                               best_dif = dif;
+                               best_val = val;
+                               best_period = period;
+                               best_approx_dev = approx_dev;
+                       }
+               }
+       } else if (ppb == 1) {
+               /* This is a special case as its the only value which wouldn't
+                * fit in a s64 variable. In order to prevent castings simple
+                * handle it seperately.
+                */
+               best_val = 4;
+               best_period = 0xee6b27f;
+       } else {
+               best_val = 0;
+               best_period = 0xFFFFFFF;
+       }
+
+       drift_ctr_cfg = (best_period << QED_DRIFT_CNTR_TIME_QUANTA_SHIFT) |
+                       (((int)best_val) << QED_DRIFT_CNTR_ADJUSTMENT_SHIFT) |
+                       (((int)drift_dir) << QED_DRIFT_CNTR_DIRECTION_SHIFT);
+
+       qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x1);
+
+       drift_state = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR);
+       if (drift_state & 1) {
+               qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF,
+                      drift_ctr_cfg);
+       } else {
+               DP_INFO(p_hwfn, "Drift counter is not reset\n");
+               return -EINVAL;
+       }
+
+       qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x0);
+
+       return 0;
+}
+
+static int qed_ptp_hw_enable(struct qed_dev *cdev)
+{
+       struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
+
+       /* Reset PTP event detection rules - will be configured in the IOCTL */
+       qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF);
+       qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF);
+       qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF);
+       qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF);
+
+       qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 7);
+       qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 7);
+
+       qed_wr(p_hwfn, p_ptt, NIG_REG_TS_OUTPUT_ENABLE_PDA, 0x1);
+
+       /* Pause free running counter */
+       if (QED_IS_BB_B0(p_hwfn->cdev))
+               qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 2);
+       if (QED_IS_AH(p_hwfn->cdev))
+               qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 2);
+
+       qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_LSB, 0);
+       qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_MSB, 0);
+       /* Resume free running counter */
+       if (QED_IS_BB_B0(p_hwfn->cdev))
+               qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 4);
+       if (QED_IS_AH(p_hwfn->cdev)) {
+               qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 4);
+               qed_wr(p_hwfn, p_ptt, NIG_REG_PTP_LATCH_OSTS_PKT_TIME, 1);
+       }
+
+       /* Disable drift register */
+       qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF, 0x0);
+       qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x0);
+
+       /* Reset possibly old timestamps */
+       qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID,
+              QED_TIMESTAMP_MASK);
+       qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID, QED_TIMESTAMP_MASK);
+
+       return 0;
+}
+
+static int qed_ptp_hw_hwtstamp_tx_on(struct qed_dev *cdev)
+{
+       struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
+
+       qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x6AA);
+       qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3EEE);
+
+       return 0;
+}
+
+static int qed_ptp_hw_disable(struct qed_dev *cdev)
+{
+       struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
+
+       /* Reset PTP event detection rules */
+       qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF);
+       qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF);
+
+       qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF);
+       qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF);
+
+       /* Disable the PTP feature */
+       qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 0x0);
+       qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0);
+
+       return 0;
+}
+
+const struct qed_eth_ptp_ops qed_ptp_ops_pass = {
+       .hwtstamp_tx_on = qed_ptp_hw_hwtstamp_tx_on,
+       .cfg_rx_filters = qed_ptp_hw_cfg_rx_filters,
+       .read_rx_ts = qed_ptp_hw_read_rx_ts,
+       .read_tx_ts = qed_ptp_hw_read_tx_ts,
+       .read_cc = qed_ptp_hw_read_cc,
+       .adjfreq = qed_ptp_hw_adjfreq,
+       .disable = qed_ptp_hw_disable,
+       .enable = qed_ptp_hw_enable,
+};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.h b/drivers/net/ethernet/qlogic/qed/qed_ptp.h
new file mode 100644 (file)
index 0000000..63c666d
--- /dev/null
@@ -0,0 +1,47 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _QED_PTP_H
+#define _QED_PTP_H
+#include <linux/types.h>
+
+int qed_ptp_hwtstamp_tx_on(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+int qed_ptp_cfg_rx_filters(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+                          enum qed_ptp_filter_type type);
+int qed_ptp_read_rx_ts(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u64 *ts);
+int qed_ptp_read_tx_ts(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u64 *ts);
+int qed_ptp_read_cc(struct qed_hwfn *p_hwfn,
+                   struct qed_ptt *p_ptt, u64 *cycles);
+int qed_ptp_adjfreq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, s32 ppb);
+int qed_ptp_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+int qed_ptp_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+#endif
index f6b86ca1ff79339ddf22205e4618f065a5c0126c..1ae73b2d6d1e1e7d96abcedc6fbdfa683ed8a566 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef REG_ADDR_H
        0x1c80000UL
 #define BAR0_MAP_REG_XSDM_RAM \
        0x1e00000UL
+#define BAR0_MAP_REG_YSDM_RAM \
+       0x1e80000UL
 #define  NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \
        0x5011f4UL
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE \
+       0x1f0164UL
 #define  PRS_REG_SEARCH_TCP \
        0x1f0400UL
 #define  PRS_REG_SEARCH_UDP \
        0x1f040cUL
 #define  PRS_REG_SEARCH_OPENFLOW       \
        0x1f0434UL
+#define PRS_REG_SEARCH_TAG1 \
+       0x1f0444UL
+#define PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST \
+       0x1f0a0cUL
+#define PRS_REG_SEARCH_TCP_FIRST_FRAG \
+       0x1f0410UL
 #define  TM_REG_PF_ENABLE_CONN \
        0x2c043cUL
 #define  TM_REG_PF_ENABLE_TASK \
        0x1009c4UL
 #define  QM_REG_PF_EN \
        0x2f2ea4UL
+#define TCFC_REG_WEAK_ENABLE_VF \
+       0x2d0704UL
 #define  TCFC_REG_STRONG_ENABLE_PF \
        0x2d0708UL
+#define  TCFC_REG_STRONG_ENABLE_VF \
+       0x2d070cUL
+#define CCFC_REG_WEAK_ENABLE_VF \
+       0x2e0704UL
 #define  CCFC_REG_STRONG_ENABLE_PF \
        0x2e0708UL
-#define  PGLUE_B_REG_PGL_ADDR_88_F0 \
+#define  PGLUE_B_REG_PGL_ADDR_88_F0_BB \
        0x2aa404UL
-#define  PGLUE_B_REG_PGL_ADDR_8C_F0 \
+#define  PGLUE_B_REG_PGL_ADDR_8C_F0_BB \
        0x2aa408UL
-#define  PGLUE_B_REG_PGL_ADDR_90_F0 \
+#define  PGLUE_B_REG_PGL_ADDR_90_F0_BB \
        0x2aa40cUL
-#define  PGLUE_B_REG_PGL_ADDR_94_F0 \
+#define  PGLUE_B_REG_PGL_ADDR_94_F0_BB \
        0x2aa410UL
 #define  PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR \
        0x2aa138UL
        0x50196cUL
 #define NIG_REG_LLH_CLS_TYPE_DUALMODE \
        0x501964UL
+#define NIG_REG_LLH_FUNC_FILTER_VALUE \
+       0x501a00UL
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_SIZE \
+       32
+#define NIG_REG_LLH_FUNC_FILTER_EN \
+       0x501a80UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE        \
+       16
+#define NIG_REG_LLH_FUNC_FILTER_MODE \
+       0x501ac0UL
+#define NIG_REG_LLH_FUNC_FILTER_MODE_SIZE \
+       16
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE \
+       0x501b00UL
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_SIZE \
+       16
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL        \
+       0x501b40UL
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_SIZE \
+       16
 #define  NCSI_REG_CONFIG       \
        0x040200UL
 #define  PBF_REG_INIT \
        0x1f0a1cUL
 #define PRS_REG_ROCE_DEST_QP_MAX_PF \
        0x1f0430UL
+#define PRS_REG_USE_LIGHT_L2 \
+       0x1f096cUL
 #define  PSDM_REG_ENABLE_IN1 \
        0xfa0004UL
 #define  PSEM_REG_ENABLE_IN \
        0x238804UL
 #define  RDIF_REG_STOP_ON_ERROR \
        0x300040UL
+#define RDIF_REG_DEBUG_ERROR_INFO \
+       0x300400UL
+#define RDIF_REG_DEBUG_ERROR_INFO_SIZE \
+       64
 #define  SRC_REG_SOFT_RST \
        0x23874cUL
 #define  TCFC_REG_ACTIVITY_COUNTER \
        0x1700004UL
 #define  TDIF_REG_STOP_ON_ERROR \
        0x310040UL
+#define TDIF_REG_DEBUG_ERROR_INFO \
+       0x310400UL
+#define TDIF_REG_DEBUG_ERROR_INFO_SIZE \
+       64
 #define  UCM_REG_INIT \
        0x1280000UL
 #define  UMAC_REG_IPG_HD_BKP_CNTL_BB_B0 \
 
 #define QM_REG_WFQPFWEIGHT     0x2f4e80UL
 #define QM_REG_WFQVPWEIGHT     0x2fa000UL
+
+#define PGLCS_REG_DBG_SELECT \
+       0x001d14UL
+#define PGLCS_REG_DBG_DWORD_ENABLE \
+       0x001d18UL
+#define PGLCS_REG_DBG_SHIFT \
+       0x001d1cUL
+#define PGLCS_REG_DBG_FORCE_VALID \
+       0x001d20UL
+#define PGLCS_REG_DBG_FORCE_FRAME \
+       0x001d24UL
+#define MISC_REG_RESET_PL_PDA_VMAIN_1 \
+       0x008070UL
+#define MISC_REG_RESET_PL_PDA_VMAIN_2 \
+       0x008080UL
+#define MISC_REG_RESET_PL_PDA_VAUX \
+       0x008090UL
+#define MISCS_REG_RESET_PL_UA \
+       0x009050UL
+#define MISCS_REG_RESET_PL_HV \
+       0x009060UL
+#define MISCS_REG_RESET_PL_HV_2        \
+       0x009150UL
+#define DMAE_REG_DBG_SELECT \
+       0x00c510UL
+#define DMAE_REG_DBG_DWORD_ENABLE \
+       0x00c514UL
+#define DMAE_REG_DBG_SHIFT \
+       0x00c518UL
+#define DMAE_REG_DBG_FORCE_VALID \
+       0x00c51cUL
+#define DMAE_REG_DBG_FORCE_FRAME \
+       0x00c520UL
+#define NCSI_REG_DBG_SELECT \
+       0x040474UL
+#define NCSI_REG_DBG_DWORD_ENABLE \
+       0x040478UL
+#define NCSI_REG_DBG_SHIFT \
+       0x04047cUL
+#define NCSI_REG_DBG_FORCE_VALID \
+       0x040480UL
+#define NCSI_REG_DBG_FORCE_FRAME \
+       0x040484UL
+#define GRC_REG_DBG_SELECT \
+       0x0500a4UL
+#define GRC_REG_DBG_DWORD_ENABLE \
+       0x0500a8UL
+#define GRC_REG_DBG_SHIFT \
+       0x0500acUL
+#define GRC_REG_DBG_FORCE_VALID        \
+       0x0500b0UL
+#define GRC_REG_DBG_FORCE_FRAME        \
+       0x0500b4UL
+#define UMAC_REG_DBG_SELECT \
+       0x051094UL
+#define UMAC_REG_DBG_DWORD_ENABLE \
+       0x051098UL
+#define UMAC_REG_DBG_SHIFT \
+       0x05109cUL
+#define UMAC_REG_DBG_FORCE_VALID \
+       0x0510a0UL
+#define UMAC_REG_DBG_FORCE_FRAME \
+       0x0510a4UL
+#define MCP2_REG_DBG_SELECT \
+       0x052400UL
+#define MCP2_REG_DBG_DWORD_ENABLE \
+       0x052404UL
+#define MCP2_REG_DBG_SHIFT \
+       0x052408UL
+#define MCP2_REG_DBG_FORCE_VALID \
+       0x052440UL
+#define MCP2_REG_DBG_FORCE_FRAME \
+       0x052444UL
+#define PCIE_REG_DBG_SELECT \
+       0x0547e8UL
+#define PCIE_REG_DBG_DWORD_ENABLE \
+       0x0547ecUL
+#define PCIE_REG_DBG_SHIFT \
+       0x0547f0UL
+#define PCIE_REG_DBG_FORCE_VALID \
+       0x0547f4UL
+#define PCIE_REG_DBG_FORCE_FRAME \
+       0x0547f8UL
+#define DORQ_REG_DBG_SELECT \
+       0x100ad0UL
+#define DORQ_REG_DBG_DWORD_ENABLE \
+       0x100ad4UL
+#define DORQ_REG_DBG_SHIFT \
+       0x100ad8UL
+#define DORQ_REG_DBG_FORCE_VALID \
+       0x100adcUL
+#define DORQ_REG_DBG_FORCE_FRAME \
+       0x100ae0UL
+#define IGU_REG_DBG_SELECT \
+       0x181578UL
+#define IGU_REG_DBG_DWORD_ENABLE \
+       0x18157cUL
+#define IGU_REG_DBG_SHIFT \
+       0x181580UL
+#define IGU_REG_DBG_FORCE_VALID        \
+       0x181584UL
+#define IGU_REG_DBG_FORCE_FRAME        \
+       0x181588UL
+#define CAU_REG_DBG_SELECT \
+       0x1c0ea8UL
+#define CAU_REG_DBG_DWORD_ENABLE \
+       0x1c0eacUL
+#define CAU_REG_DBG_SHIFT \
+       0x1c0eb0UL
+#define CAU_REG_DBG_FORCE_VALID        \
+       0x1c0eb4UL
+#define CAU_REG_DBG_FORCE_FRAME        \
+       0x1c0eb8UL
+#define PRS_REG_DBG_SELECT \
+       0x1f0b6cUL
+#define PRS_REG_DBG_DWORD_ENABLE \
+       0x1f0b70UL
+#define PRS_REG_DBG_SHIFT \
+       0x1f0b74UL
+#define PRS_REG_DBG_FORCE_VALID        \
+       0x1f0ba0UL
+#define PRS_REG_DBG_FORCE_FRAME        \
+       0x1f0ba4UL
+#define CNIG_REG_DBG_SELECT_K2 \
+       0x218254UL
+#define CNIG_REG_DBG_DWORD_ENABLE_K2 \
+       0x218258UL
+#define CNIG_REG_DBG_SHIFT_K2 \
+       0x21825cUL
+#define CNIG_REG_DBG_FORCE_VALID_K2 \
+       0x218260UL
+#define CNIG_REG_DBG_FORCE_FRAME_K2 \
+       0x218264UL
+#define PRM_REG_DBG_SELECT \
+       0x2306a8UL
+#define PRM_REG_DBG_DWORD_ENABLE \
+       0x2306acUL
+#define PRM_REG_DBG_SHIFT \
+       0x2306b0UL
+#define PRM_REG_DBG_FORCE_VALID        \
+       0x2306b4UL
+#define PRM_REG_DBG_FORCE_FRAME        \
+       0x2306b8UL
+#define SRC_REG_DBG_SELECT \
+       0x238700UL
+#define SRC_REG_DBG_DWORD_ENABLE \
+       0x238704UL
+#define SRC_REG_DBG_SHIFT \
+       0x238708UL
+#define SRC_REG_DBG_FORCE_VALID        \
+       0x23870cUL
+#define SRC_REG_DBG_FORCE_FRAME        \
+       0x238710UL
+#define RSS_REG_DBG_SELECT \
+       0x238c4cUL
+#define RSS_REG_DBG_DWORD_ENABLE \
+       0x238c50UL
+#define RSS_REG_DBG_SHIFT \
+       0x238c54UL
+#define RSS_REG_DBG_FORCE_VALID        \
+       0x238c58UL
+#define RSS_REG_DBG_FORCE_FRAME        \
+       0x238c5cUL
+#define RPB_REG_DBG_SELECT \
+       0x23c728UL
+#define RPB_REG_DBG_DWORD_ENABLE \
+       0x23c72cUL
+#define RPB_REG_DBG_SHIFT \
+       0x23c730UL
+#define RPB_REG_DBG_FORCE_VALID        \
+       0x23c734UL
+#define RPB_REG_DBG_FORCE_FRAME        \
+       0x23c738UL
+#define PSWRQ2_REG_DBG_SELECT \
+       0x240100UL
+#define PSWRQ2_REG_DBG_DWORD_ENABLE \
+       0x240104UL
+#define PSWRQ2_REG_DBG_SHIFT \
+       0x240108UL
+#define PSWRQ2_REG_DBG_FORCE_VALID \
+       0x24010cUL
+#define PSWRQ2_REG_DBG_FORCE_FRAME \
+       0x240110UL
+#define PSWRQ_REG_DBG_SELECT \
+       0x280020UL
+#define PSWRQ_REG_DBG_DWORD_ENABLE \
+       0x280024UL
+#define PSWRQ_REG_DBG_SHIFT \
+       0x280028UL
+#define PSWRQ_REG_DBG_FORCE_VALID \
+       0x28002cUL
+#define PSWRQ_REG_DBG_FORCE_FRAME \
+       0x280030UL
+#define PSWWR_REG_DBG_SELECT \
+       0x29a084UL
+#define PSWWR_REG_DBG_DWORD_ENABLE \
+       0x29a088UL
+#define PSWWR_REG_DBG_SHIFT \
+       0x29a08cUL
+#define PSWWR_REG_DBG_FORCE_VALID \
+       0x29a090UL
+#define PSWWR_REG_DBG_FORCE_FRAME \
+       0x29a094UL
+#define PSWRD_REG_DBG_SELECT \
+       0x29c040UL
+#define PSWRD_REG_DBG_DWORD_ENABLE \
+       0x29c044UL
+#define PSWRD_REG_DBG_SHIFT \
+       0x29c048UL
+#define PSWRD_REG_DBG_FORCE_VALID \
+       0x29c04cUL
+#define PSWRD_REG_DBG_FORCE_FRAME \
+       0x29c050UL
+#define PSWRD2_REG_DBG_SELECT \
+       0x29d400UL
+#define PSWRD2_REG_DBG_DWORD_ENABLE \
+       0x29d404UL
+#define PSWRD2_REG_DBG_SHIFT \
+       0x29d408UL
+#define PSWRD2_REG_DBG_FORCE_VALID \
+       0x29d40cUL
+#define PSWRD2_REG_DBG_FORCE_FRAME \
+       0x29d410UL
+#define PSWHST2_REG_DBG_SELECT \
+       0x29e058UL
+#define PSWHST2_REG_DBG_DWORD_ENABLE \
+       0x29e05cUL
+#define PSWHST2_REG_DBG_SHIFT \
+       0x29e060UL
+#define PSWHST2_REG_DBG_FORCE_VALID \
+       0x29e064UL
+#define PSWHST2_REG_DBG_FORCE_FRAME \
+       0x29e068UL
+#define PSWHST_REG_DBG_SELECT \
+       0x2a0100UL
+#define PSWHST_REG_DBG_DWORD_ENABLE \
+       0x2a0104UL
+#define PSWHST_REG_DBG_SHIFT \
+       0x2a0108UL
+#define PSWHST_REG_DBG_FORCE_VALID \
+       0x2a010cUL
+#define PSWHST_REG_DBG_FORCE_FRAME \
+       0x2a0110UL
+#define PGLUE_B_REG_DBG_SELECT \
+       0x2a8400UL
+#define PGLUE_B_REG_DBG_DWORD_ENABLE \
+       0x2a8404UL
+#define PGLUE_B_REG_DBG_SHIFT \
+       0x2a8408UL
+#define PGLUE_B_REG_DBG_FORCE_VALID \
+       0x2a840cUL
+#define PGLUE_B_REG_DBG_FORCE_FRAME \
+       0x2a8410UL
+#define TM_REG_DBG_SELECT \
+       0x2c07a8UL
+#define TM_REG_DBG_DWORD_ENABLE        \
+       0x2c07acUL
+#define TM_REG_DBG_SHIFT \
+       0x2c07b0UL
+#define TM_REG_DBG_FORCE_VALID \
+       0x2c07b4UL
+#define TM_REG_DBG_FORCE_FRAME \
+       0x2c07b8UL
+#define TCFC_REG_DBG_SELECT \
+       0x2d0500UL
+#define TCFC_REG_DBG_DWORD_ENABLE \
+       0x2d0504UL
+#define TCFC_REG_DBG_SHIFT \
+       0x2d0508UL
+#define TCFC_REG_DBG_FORCE_VALID \
+       0x2d050cUL
+#define TCFC_REG_DBG_FORCE_FRAME \
+       0x2d0510UL
+#define CCFC_REG_DBG_SELECT \
+       0x2e0500UL
+#define CCFC_REG_DBG_DWORD_ENABLE \
+       0x2e0504UL
+#define CCFC_REG_DBG_SHIFT \
+       0x2e0508UL
+#define CCFC_REG_DBG_FORCE_VALID \
+       0x2e050cUL
+#define CCFC_REG_DBG_FORCE_FRAME \
+       0x2e0510UL
+#define QM_REG_DBG_SELECT \
+       0x2f2e74UL
+#define QM_REG_DBG_DWORD_ENABLE        \
+       0x2f2e78UL
+#define QM_REG_DBG_SHIFT \
+       0x2f2e7cUL
+#define QM_REG_DBG_FORCE_VALID \
+       0x2f2e80UL
+#define QM_REG_DBG_FORCE_FRAME \
+       0x2f2e84UL
+#define RDIF_REG_DBG_SELECT \
+       0x300500UL
+#define RDIF_REG_DBG_DWORD_ENABLE \
+       0x300504UL
+#define RDIF_REG_DBG_SHIFT \
+       0x300508UL
+#define RDIF_REG_DBG_FORCE_VALID \
+       0x30050cUL
+#define RDIF_REG_DBG_FORCE_FRAME \
+       0x300510UL
+#define TDIF_REG_DBG_SELECT \
+       0x310500UL
+#define TDIF_REG_DBG_DWORD_ENABLE \
+       0x310504UL
+#define TDIF_REG_DBG_SHIFT \
+       0x310508UL
+#define TDIF_REG_DBG_FORCE_VALID \
+       0x31050cUL
+#define TDIF_REG_DBG_FORCE_FRAME \
+       0x310510UL
+#define BRB_REG_DBG_SELECT \
+       0x340ed0UL
+#define BRB_REG_DBG_DWORD_ENABLE \
+       0x340ed4UL
+#define BRB_REG_DBG_SHIFT \
+       0x340ed8UL
+#define BRB_REG_DBG_FORCE_VALID        \
+       0x340edcUL
+#define BRB_REG_DBG_FORCE_FRAME        \
+       0x340ee0UL
+#define XYLD_REG_DBG_SELECT \
+       0x4c1600UL
+#define XYLD_REG_DBG_DWORD_ENABLE \
+       0x4c1604UL
+#define XYLD_REG_DBG_SHIFT \
+       0x4c1608UL
+#define XYLD_REG_DBG_FORCE_VALID \
+       0x4c160cUL
+#define XYLD_REG_DBG_FORCE_FRAME \
+       0x4c1610UL
+#define YULD_REG_DBG_SELECT \
+       0x4c9600UL
+#define YULD_REG_DBG_DWORD_ENABLE \
+       0x4c9604UL
+#define YULD_REG_DBG_SHIFT \
+       0x4c9608UL
+#define YULD_REG_DBG_FORCE_VALID \
+       0x4c960cUL
+#define YULD_REG_DBG_FORCE_FRAME \
+       0x4c9610UL
+#define TMLD_REG_DBG_SELECT \
+       0x4d1600UL
+#define TMLD_REG_DBG_DWORD_ENABLE \
+       0x4d1604UL
+#define TMLD_REG_DBG_SHIFT \
+       0x4d1608UL
+#define TMLD_REG_DBG_FORCE_VALID \
+       0x4d160cUL
+#define TMLD_REG_DBG_FORCE_FRAME \
+       0x4d1610UL
+#define MULD_REG_DBG_SELECT \
+       0x4e1600UL
+#define MULD_REG_DBG_DWORD_ENABLE \
+       0x4e1604UL
+#define MULD_REG_DBG_SHIFT \
+       0x4e1608UL
+#define MULD_REG_DBG_FORCE_VALID \
+       0x4e160cUL
+#define MULD_REG_DBG_FORCE_FRAME \
+       0x4e1610UL
+#define NIG_REG_DBG_SELECT \
+       0x502140UL
+#define NIG_REG_DBG_DWORD_ENABLE \
+       0x502144UL
+#define NIG_REG_DBG_SHIFT \
+       0x502148UL
+#define NIG_REG_DBG_FORCE_VALID        \
+       0x50214cUL
+#define NIG_REG_DBG_FORCE_FRAME        \
+       0x502150UL
+#define BMB_REG_DBG_SELECT \
+       0x540a7cUL
+#define BMB_REG_DBG_DWORD_ENABLE \
+       0x540a80UL
+#define BMB_REG_DBG_SHIFT \
+       0x540a84UL
+#define BMB_REG_DBG_FORCE_VALID        \
+       0x540a88UL
+#define BMB_REG_DBG_FORCE_FRAME        \
+       0x540a8cUL
+#define PTU_REG_DBG_SELECT \
+       0x560100UL
+#define PTU_REG_DBG_DWORD_ENABLE \
+       0x560104UL
+#define PTU_REG_DBG_SHIFT \
+       0x560108UL
+#define PTU_REG_DBG_FORCE_VALID        \
+       0x56010cUL
+#define PTU_REG_DBG_FORCE_FRAME        \
+       0x560110UL
+#define CDU_REG_DBG_SELECT \
+       0x580704UL
+#define CDU_REG_DBG_DWORD_ENABLE \
+       0x580708UL
+#define CDU_REG_DBG_SHIFT \
+       0x58070cUL
+#define CDU_REG_DBG_FORCE_VALID        \
+       0x580710UL
+#define CDU_REG_DBG_FORCE_FRAME        \
+       0x580714UL
+#define WOL_REG_DBG_SELECT \
+       0x600140UL
+#define WOL_REG_DBG_DWORD_ENABLE \
+       0x600144UL
+#define WOL_REG_DBG_SHIFT \
+       0x600148UL
+#define WOL_REG_DBG_FORCE_VALID        \
+       0x60014cUL
+#define WOL_REG_DBG_FORCE_FRAME        \
+       0x600150UL
+#define BMBN_REG_DBG_SELECT \
+       0x610140UL
+#define BMBN_REG_DBG_DWORD_ENABLE \
+       0x610144UL
+#define BMBN_REG_DBG_SHIFT \
+       0x610148UL
+#define BMBN_REG_DBG_FORCE_VALID \
+       0x61014cUL
+#define BMBN_REG_DBG_FORCE_FRAME \
+       0x610150UL
+#define NWM_REG_DBG_SELECT \
+       0x8000ecUL
+#define NWM_REG_DBG_DWORD_ENABLE \
+       0x8000f0UL
+#define NWM_REG_DBG_SHIFT \
+       0x8000f4UL
+#define NWM_REG_DBG_FORCE_VALID        \
+       0x8000f8UL
+#define NWM_REG_DBG_FORCE_FRAME        \
+       0x8000fcUL
+#define PBF_REG_DBG_SELECT \
+       0xd80060UL
+#define PBF_REG_DBG_DWORD_ENABLE \
+       0xd80064UL
+#define PBF_REG_DBG_SHIFT \
+       0xd80068UL
+#define PBF_REG_DBG_FORCE_VALID        \
+       0xd8006cUL
+#define PBF_REG_DBG_FORCE_FRAME        \
+       0xd80070UL
+#define PBF_PB1_REG_DBG_SELECT \
+       0xda0728UL
+#define PBF_PB1_REG_DBG_DWORD_ENABLE \
+       0xda072cUL
+#define PBF_PB1_REG_DBG_SHIFT \
+       0xda0730UL
+#define PBF_PB1_REG_DBG_FORCE_VALID \
+       0xda0734UL
+#define PBF_PB1_REG_DBG_FORCE_FRAME \
+       0xda0738UL
+#define PBF_PB2_REG_DBG_SELECT \
+       0xda4728UL
+#define PBF_PB2_REG_DBG_DWORD_ENABLE \
+       0xda472cUL
+#define PBF_PB2_REG_DBG_SHIFT \
+       0xda4730UL
+#define PBF_PB2_REG_DBG_FORCE_VALID \
+       0xda4734UL
+#define PBF_PB2_REG_DBG_FORCE_FRAME \
+       0xda4738UL
+#define BTB_REG_DBG_SELECT \
+       0xdb08c8UL
+#define BTB_REG_DBG_DWORD_ENABLE \
+       0xdb08ccUL
+#define BTB_REG_DBG_SHIFT \
+       0xdb08d0UL
+#define BTB_REG_DBG_FORCE_VALID        \
+       0xdb08d4UL
+#define BTB_REG_DBG_FORCE_FRAME        \
+       0xdb08d8UL
+#define XSDM_REG_DBG_SELECT \
+       0xf80e28UL
+#define XSDM_REG_DBG_DWORD_ENABLE \
+       0xf80e2cUL
+#define XSDM_REG_DBG_SHIFT \
+       0xf80e30UL
+#define XSDM_REG_DBG_FORCE_VALID \
+       0xf80e34UL
+#define XSDM_REG_DBG_FORCE_FRAME \
+       0xf80e38UL
+#define YSDM_REG_DBG_SELECT \
+       0xf90e28UL
+#define YSDM_REG_DBG_DWORD_ENABLE \
+       0xf90e2cUL
+#define YSDM_REG_DBG_SHIFT \
+       0xf90e30UL
+#define YSDM_REG_DBG_FORCE_VALID \
+       0xf90e34UL
+#define YSDM_REG_DBG_FORCE_FRAME \
+       0xf90e38UL
+#define PSDM_REG_DBG_SELECT \
+       0xfa0e28UL
+#define PSDM_REG_DBG_DWORD_ENABLE \
+       0xfa0e2cUL
+#define PSDM_REG_DBG_SHIFT \
+       0xfa0e30UL
+#define PSDM_REG_DBG_FORCE_VALID \
+       0xfa0e34UL
+#define PSDM_REG_DBG_FORCE_FRAME \
+       0xfa0e38UL
+#define TSDM_REG_DBG_SELECT \
+       0xfb0e28UL
+#define TSDM_REG_DBG_DWORD_ENABLE \
+       0xfb0e2cUL
+#define TSDM_REG_DBG_SHIFT \
+       0xfb0e30UL
+#define TSDM_REG_DBG_FORCE_VALID \
+       0xfb0e34UL
+#define TSDM_REG_DBG_FORCE_FRAME \
+       0xfb0e38UL
+#define MSDM_REG_DBG_SELECT \
+       0xfc0e28UL
+#define MSDM_REG_DBG_DWORD_ENABLE \
+       0xfc0e2cUL
+#define MSDM_REG_DBG_SHIFT \
+       0xfc0e30UL
+#define MSDM_REG_DBG_FORCE_VALID \
+       0xfc0e34UL
+#define MSDM_REG_DBG_FORCE_FRAME \
+       0xfc0e38UL
+#define USDM_REG_DBG_SELECT \
+       0xfd0e28UL
+#define USDM_REG_DBG_DWORD_ENABLE \
+       0xfd0e2cUL
+#define USDM_REG_DBG_SHIFT \
+       0xfd0e30UL
+#define USDM_REG_DBG_FORCE_VALID \
+       0xfd0e34UL
+#define USDM_REG_DBG_FORCE_FRAME \
+       0xfd0e38UL
+#define XCM_REG_DBG_SELECT \
+       0x1000040UL
+#define XCM_REG_DBG_DWORD_ENABLE \
+       0x1000044UL
+#define XCM_REG_DBG_SHIFT \
+       0x1000048UL
+#define XCM_REG_DBG_FORCE_VALID        \
+       0x100004cUL
+#define XCM_REG_DBG_FORCE_FRAME        \
+       0x1000050UL
+#define YCM_REG_DBG_SELECT \
+       0x1080040UL
+#define YCM_REG_DBG_DWORD_ENABLE \
+       0x1080044UL
+#define YCM_REG_DBG_SHIFT \
+       0x1080048UL
+#define YCM_REG_DBG_FORCE_VALID        \
+       0x108004cUL
+#define YCM_REG_DBG_FORCE_FRAME        \
+       0x1080050UL
+#define PCM_REG_DBG_SELECT \
+       0x1100040UL
+#define PCM_REG_DBG_DWORD_ENABLE \
+       0x1100044UL
+#define PCM_REG_DBG_SHIFT \
+       0x1100048UL
+#define PCM_REG_DBG_FORCE_VALID        \
+       0x110004cUL
+#define PCM_REG_DBG_FORCE_FRAME        \
+       0x1100050UL
+#define TCM_REG_DBG_SELECT \
+       0x1180040UL
+#define TCM_REG_DBG_DWORD_ENABLE \
+       0x1180044UL
+#define TCM_REG_DBG_SHIFT \
+       0x1180048UL
+#define TCM_REG_DBG_FORCE_VALID        \
+       0x118004cUL
+#define TCM_REG_DBG_FORCE_FRAME        \
+       0x1180050UL
+#define MCM_REG_DBG_SELECT \
+       0x1200040UL
+#define MCM_REG_DBG_DWORD_ENABLE \
+       0x1200044UL
+#define MCM_REG_DBG_SHIFT \
+       0x1200048UL
+#define MCM_REG_DBG_FORCE_VALID        \
+       0x120004cUL
+#define MCM_REG_DBG_FORCE_FRAME        \
+       0x1200050UL
+#define UCM_REG_DBG_SELECT \
+       0x1280050UL
+#define UCM_REG_DBG_DWORD_ENABLE \
+       0x1280054UL
+#define UCM_REG_DBG_SHIFT \
+       0x1280058UL
+#define UCM_REG_DBG_FORCE_VALID        \
+       0x128005cUL
+#define UCM_REG_DBG_FORCE_FRAME        \
+       0x1280060UL
+#define XSEM_REG_DBG_SELECT \
+       0x1401528UL
+#define XSEM_REG_DBG_DWORD_ENABLE \
+       0x140152cUL
+#define XSEM_REG_DBG_SHIFT \
+       0x1401530UL
+#define XSEM_REG_DBG_FORCE_VALID \
+       0x1401534UL
+#define XSEM_REG_DBG_FORCE_FRAME \
+       0x1401538UL
+#define YSEM_REG_DBG_SELECT \
+       0x1501528UL
+#define YSEM_REG_DBG_DWORD_ENABLE \
+       0x150152cUL
+#define YSEM_REG_DBG_SHIFT \
+       0x1501530UL
+#define YSEM_REG_DBG_FORCE_VALID \
+       0x1501534UL
+#define YSEM_REG_DBG_FORCE_FRAME \
+       0x1501538UL
+#define PSEM_REG_DBG_SELECT \
+       0x1601528UL
+#define PSEM_REG_DBG_DWORD_ENABLE \
+       0x160152cUL
+#define PSEM_REG_DBG_SHIFT \
+       0x1601530UL
+#define PSEM_REG_DBG_FORCE_VALID \
+       0x1601534UL
+#define PSEM_REG_DBG_FORCE_FRAME \
+       0x1601538UL
+#define TSEM_REG_DBG_SELECT \
+       0x1701528UL
+#define TSEM_REG_DBG_DWORD_ENABLE \
+       0x170152cUL
+#define TSEM_REG_DBG_SHIFT \
+       0x1701530UL
+#define TSEM_REG_DBG_FORCE_VALID \
+       0x1701534UL
+#define TSEM_REG_DBG_FORCE_FRAME \
+       0x1701538UL
+#define MSEM_REG_DBG_SELECT \
+       0x1801528UL
+#define MSEM_REG_DBG_DWORD_ENABLE \
+       0x180152cUL
+#define MSEM_REG_DBG_SHIFT \
+       0x1801530UL
+#define MSEM_REG_DBG_FORCE_VALID \
+       0x1801534UL
+#define MSEM_REG_DBG_FORCE_FRAME \
+       0x1801538UL
+#define USEM_REG_DBG_SELECT \
+       0x1901528UL
+#define USEM_REG_DBG_DWORD_ENABLE \
+       0x190152cUL
+#define USEM_REG_DBG_SHIFT \
+       0x1901530UL
+#define USEM_REG_DBG_FORCE_VALID \
+       0x1901534UL
+#define USEM_REG_DBG_FORCE_FRAME \
+       0x1901538UL
+#define NWS_REG_DBG_SELECT \
+       0x700128UL
+#define NWS_REG_DBG_DWORD_ENABLE \
+       0x70012cUL
+#define NWS_REG_DBG_SHIFT \
+       0x700130UL
+#define NWS_REG_DBG_FORCE_VALID        \
+       0x700134UL
+#define NWS_REG_DBG_FORCE_FRAME        \
+       0x700138UL
+#define MS_REG_DBG_SELECT \
+       0x6a0228UL
+#define MS_REG_DBG_DWORD_ENABLE \
+       0x6a022cUL
+#define MS_REG_DBG_SHIFT \
+       0x6a0230UL
+#define MS_REG_DBG_FORCE_VALID \
+       0x6a0234UL
+#define MS_REG_DBG_FORCE_FRAME \
+       0x6a0238UL
+#define PCIE_REG_DBG_COMMON_SELECT \
+       0x054398UL
+#define PCIE_REG_DBG_COMMON_DWORD_ENABLE \
+       0x05439cUL
+#define PCIE_REG_DBG_COMMON_SHIFT \
+       0x0543a0UL
+#define PCIE_REG_DBG_COMMON_FORCE_VALID        \
+       0x0543a4UL
+#define PCIE_REG_DBG_COMMON_FORCE_FRAME        \
+       0x0543a8UL
+#define MISC_REG_RESET_PL_UA \
+       0x008050UL
+#define MISC_REG_RESET_PL_HV \
+       0x008060UL
+#define XCM_REG_CTX_RBC_ACCS \
+       0x1001800UL
+#define XCM_REG_AGG_CON_CTX \
+       0x1001804UL
+#define XCM_REG_SM_CON_CTX \
+       0x1001808UL
+#define YCM_REG_CTX_RBC_ACCS \
+       0x1081800UL
+#define YCM_REG_AGG_CON_CTX \
+       0x1081804UL
+#define YCM_REG_AGG_TASK_CTX \
+       0x1081808UL
+#define YCM_REG_SM_CON_CTX \
+       0x108180cUL
+#define YCM_REG_SM_TASK_CTX \
+       0x1081810UL
+#define PCM_REG_CTX_RBC_ACCS \
+       0x1101440UL
+#define PCM_REG_SM_CON_CTX \
+       0x1101444UL
+#define TCM_REG_CTX_RBC_ACCS \
+       0x11814c0UL
+#define TCM_REG_AGG_CON_CTX \
+       0x11814c4UL
+#define TCM_REG_AGG_TASK_CTX \
+       0x11814c8UL
+#define TCM_REG_SM_CON_CTX \
+       0x11814ccUL
+#define TCM_REG_SM_TASK_CTX \
+       0x11814d0UL
+#define MCM_REG_CTX_RBC_ACCS \
+       0x1201800UL
+#define MCM_REG_AGG_CON_CTX \
+       0x1201804UL
+#define MCM_REG_AGG_TASK_CTX \
+       0x1201808UL
+#define MCM_REG_SM_CON_CTX \
+       0x120180cUL
+#define MCM_REG_SM_TASK_CTX \
+       0x1201810UL
+#define UCM_REG_CTX_RBC_ACCS \
+       0x1281700UL
+#define UCM_REG_AGG_CON_CTX \
+       0x1281704UL
+#define UCM_REG_AGG_TASK_CTX \
+       0x1281708UL
+#define UCM_REG_SM_CON_CTX \
+       0x128170cUL
+#define UCM_REG_SM_TASK_CTX \
+       0x1281710UL
+#define XSEM_REG_SLOW_DBG_EMPTY        \
+       0x1401140UL
+#define XSEM_REG_SYNC_DBG_EMPTY        \
+       0x1401160UL
+#define XSEM_REG_SLOW_DBG_ACTIVE \
+       0x1401400UL
+#define XSEM_REG_SLOW_DBG_MODE \
+       0x1401404UL
+#define XSEM_REG_DBG_FRAME_MODE        \
+       0x1401408UL
+#define XSEM_REG_DBG_MODE1_CFG \
+       0x1401420UL
+#define XSEM_REG_FAST_MEMORY \
+       0x1440000UL
+#define YSEM_REG_SYNC_DBG_EMPTY        \
+       0x1501160UL
+#define YSEM_REG_SLOW_DBG_ACTIVE \
+       0x1501400UL
+#define YSEM_REG_SLOW_DBG_MODE \
+       0x1501404UL
+#define YSEM_REG_DBG_FRAME_MODE        \
+       0x1501408UL
+#define YSEM_REG_DBG_MODE1_CFG \
+       0x1501420UL
+#define YSEM_REG_FAST_MEMORY \
+       0x1540000UL
+#define PSEM_REG_SLOW_DBG_EMPTY        \
+       0x1601140UL
+#define PSEM_REG_SYNC_DBG_EMPTY        \
+       0x1601160UL
+#define PSEM_REG_SLOW_DBG_ACTIVE \
+       0x1601400UL
+#define PSEM_REG_SLOW_DBG_MODE \
+       0x1601404UL
+#define PSEM_REG_DBG_FRAME_MODE        \
+       0x1601408UL
+#define PSEM_REG_DBG_MODE1_CFG \
+       0x1601420UL
+#define PSEM_REG_FAST_MEMORY \
+       0x1640000UL
+#define TSEM_REG_SLOW_DBG_EMPTY        \
+       0x1701140UL
+#define TSEM_REG_SYNC_DBG_EMPTY        \
+       0x1701160UL
+#define TSEM_REG_SLOW_DBG_ACTIVE \
+       0x1701400UL
+#define TSEM_REG_SLOW_DBG_MODE \
+       0x1701404UL
+#define TSEM_REG_DBG_FRAME_MODE        \
+       0x1701408UL
+#define TSEM_REG_DBG_MODE1_CFG \
+       0x1701420UL
+#define TSEM_REG_FAST_MEMORY \
+       0x1740000UL
+#define MSEM_REG_SLOW_DBG_EMPTY        \
+       0x1801140UL
+#define MSEM_REG_SYNC_DBG_EMPTY        \
+       0x1801160UL
+#define MSEM_REG_SLOW_DBG_ACTIVE \
+       0x1801400UL
+#define MSEM_REG_SLOW_DBG_MODE \
+       0x1801404UL
+#define MSEM_REG_DBG_FRAME_MODE        \
+       0x1801408UL
+#define MSEM_REG_DBG_MODE1_CFG \
+       0x1801420UL
+#define MSEM_REG_FAST_MEMORY \
+       0x1840000UL
+#define USEM_REG_SLOW_DBG_EMPTY        \
+       0x1901140UL
+#define USEM_REG_SYNC_DBG_EMPTY        \
+       0x1901160UL
+#define USEM_REG_SLOW_DBG_ACTIVE \
+       0x1901400UL
+#define USEM_REG_SLOW_DBG_MODE \
+       0x1901404UL
+#define USEM_REG_DBG_FRAME_MODE        \
+       0x1901408UL
+#define USEM_REG_DBG_MODE1_CFG \
+       0x1901420UL
+#define USEM_REG_FAST_MEMORY \
+       0x1940000UL
+#define SEM_FAST_REG_INT_RAM \
+       0x020000UL
+#define SEM_FAST_REG_INT_RAM_SIZE \
+       20480
+#define GRC_REG_TRACE_FIFO_VALID_DATA \
+       0x050064UL
+#define GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW \
+       0x05040cUL
+#define GRC_REG_PROTECTION_OVERRIDE_WINDOW \
+       0x050500UL
+#define IGU_REG_ERROR_HANDLING_MEMORY \
+       0x181520UL
+#define MCP_REG_CPU_MODE \
+       0xe05000UL
+#define MCP_REG_CPU_MODE_SOFT_HALT \
+               (0x1 << 10)
+#define BRB_REG_BIG_RAM_ADDRESS \
+       0x340800UL
+#define BRB_REG_BIG_RAM_DATA \
+       0x341500UL
+#define SEM_FAST_REG_STALL_0 \
+       0x000488UL
+#define SEM_FAST_REG_STALLED \
+       0x000494UL
+#define BTB_REG_BIG_RAM_ADDRESS \
+       0xdb0800UL
+#define BTB_REG_BIG_RAM_DATA \
+       0xdb0c00UL
+#define BMB_REG_BIG_RAM_ADDRESS \
+       0x540800UL
+#define BMB_REG_BIG_RAM_DATA \
+       0x540f00UL
+#define SEM_FAST_REG_STORM_REG_FILE \
+       0x008000UL
+#define RSS_REG_RSS_RAM_ADDR \
+       0x238c30UL
+#define MISCS_REG_BLOCK_256B_EN \
+       0x009074UL
+#define MCP_REG_SCRATCH_SIZE \
+       57344
+#define MCP_REG_CPU_REG_FILE \
+       0xe05200UL
+#define MCP_REG_CPU_REG_FILE_SIZE \
+       32
+#define DBG_REG_DEBUG_TARGET \
+       0x01005cUL
+#define DBG_REG_FULL_MODE \
+       0x010060UL
+#define DBG_REG_CALENDAR_OUT_DATA \
+       0x010480UL
+#define GRC_REG_TRACE_FIFO \
+       0x050068UL
+#define IGU_REG_ERROR_HANDLING_DATA_VALID \
+       0x181530UL
+#define DBG_REG_DBG_BLOCK_ON \
+       0x010454UL
+#define DBG_REG_FRAMING_MODE \
+       0x010058UL
+#define SEM_FAST_REG_VFC_DATA_WR \
+       0x000b40UL
+#define SEM_FAST_REG_VFC_ADDR \
+       0x000b44UL
+#define SEM_FAST_REG_VFC_DATA_RD \
+       0x000b48UL
+#define RSS_REG_RSS_RAM_DATA \
+       0x238c20UL
+#define RSS_REG_RSS_RAM_DATA_SIZE \
+       4
+#define MISC_REG_BLOCK_256B_EN \
+       0x008c14UL
+#define NWS_REG_NWS_CMU        \
+       0x720000UL
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0 \
+       0x000680UL
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8 \
+       0x000684UL
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0 \
+       0x0006c0UL
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8 \
+       0x0006c4UL
+#define MS_REG_MS_CMU \
+       0x6a4000UL
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130 \
+       0x000208UL
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132 \
+       0x000210UL
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131 \
+       0x00020cUL
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133 \
+       0x000214UL
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130 \
+       0x000208UL
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131 \
+       0x00020cUL
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132 \
+       0x000210UL
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133 \
+       0x000214UL
+#define PHY_PCIE_REG_PHY0 \
+       0x620000UL
+#define PHY_PCIE_REG_PHY1 \
+       0x624000UL
+#define NIG_REG_ROCE_DUPLICATE_TO_HOST 0x5088f0UL
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN 0x1f0968UL
+#define NIG_REG_LLH_ENG_CLS_ENG_ID_TBL 0x501b90UL
+#define DORQ_REG_PF_DPM_ENABLE 0x100510UL
+#define DORQ_REG_PF_ICID_BIT_SHIFT_NORM        0x100448UL
+#define DORQ_REG_PF_MIN_ADDR_REG1 0x100400UL
+#define DORQ_REG_PF_DPI_BIT_SHIFT 0x100450UL
+#define NIG_REG_RX_PTP_EN 0x501900UL
+#define NIG_REG_TX_PTP_EN 0x501904UL
+#define NIG_REG_LLH_PTP_TO_HOST        0x501908UL
+#define NIG_REG_LLH_PTP_TO_MCP 0x50190cUL
+#define NIG_REG_PTP_SW_TXTSEN 0x501910UL
+#define NIG_REG_LLH_PTP_ETHERTYPE_1 0x501914UL
+#define NIG_REG_LLH_PTP_MAC_DA_2_LSB 0x501918UL
+#define NIG_REG_LLH_PTP_MAC_DA_2_MSB 0x50191cUL
+#define NIG_REG_LLH_PTP_PARAM_MASK 0x501920UL
+#define NIG_REG_LLH_PTP_RULE_MASK 0x501924UL
+#define NIG_REG_TX_LLH_PTP_PARAM_MASK 0x501928UL
+#define NIG_REG_TX_LLH_PTP_RULE_MASK 0x50192cUL
+#define NIG_REG_LLH_PTP_HOST_BUF_SEQID 0x501930UL
+#define NIG_REG_LLH_PTP_HOST_BUF_TS_LSB 0x501934UL
+#define NIG_REG_LLH_PTP_HOST_BUF_TS_MSB        0x501938UL
+#define NIG_REG_LLH_PTP_MCP_BUF_SEQID 0x50193cUL
+#define NIG_REG_LLH_PTP_MCP_BUF_TS_LSB 0x501940UL
+#define NIG_REG_LLH_PTP_MCP_BUF_TS_MSB 0x501944UL
+#define NIG_REG_TX_LLH_PTP_BUF_SEQID 0x501948UL
+#define NIG_REG_TX_LLH_PTP_BUF_TS_LSB 0x50194cUL
+#define NIG_REG_TX_LLH_PTP_BUF_TS_MSB 0x501950UL
+#define NIG_REG_RX_PTP_TS_MSB_ERR 0x501954UL
+#define NIG_REG_TX_PTP_TS_MSB_ERR 0x501958UL
+#define NIG_REG_TSGEN_SYNC_TIME_LSB 0x5088c0UL
+#define NIG_REG_TSGEN_SYNC_TIME_MSB 0x5088c4UL
+#define NIG_REG_TSGEN_RST_DRIFT_CNTR 0x5088d8UL
+#define NIG_REG_TSGEN_DRIFT_CNTR_CONF 0x5088dcUL
+#define NIG_REG_TS_OUTPUT_ENABLE_PDA 0x508870UL
+#define NIG_REG_TIMESYNC_GEN_REG_BB 0x500d00UL
+#define NIG_REG_TSGEN_FREE_CNT_VALUE_LSB 0x5088a8UL
+#define NIG_REG_TSGEN_FREE_CNT_VALUE_MSB 0x5088acUL
+#define NIG_REG_PTP_LATCH_OSTS_PKT_TIME 0x509040UL
+#define PSWRQ2_REG_WR_MBS0 0x240400UL
+
+#define PGLUE_B_REG_PGL_ADDR_E8_F0_K2 0x2aaf98UL
+#define PGLUE_B_REG_PGL_ADDR_EC_F0_K2 0x2aaf9cUL
+#define PGLUE_B_REG_PGL_ADDR_F0_F0_K2 0x2aafa0UL
+#define PGLUE_B_REG_PGL_ADDR_F4_F0_K2 0x2aafa4UL
+#define NIG_REG_TSGEN_FREECNT_UPDATE_K2 0x509008UL
+#define CNIG_REG_NIG_PORT0_CONF_K2 0x218200UL
+
+#define PRS_REG_SEARCH_GFT 0x1f11bcUL
+#define PRS_REG_CM_HDR_GFT 0x1f11c8UL
+#define PRS_REG_GFT_CAM 0x1f1100UL
+#define PRS_REG_GFT_PROFILE_MASK_RAM 0x1f1000UL
+#define PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT 0
+#define PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT 8
+#define PRS_REG_LOAD_L2_FILTER 0x1f0198UL
+
 #endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
new file mode 100644 (file)
index 0000000..b8c811f
--- /dev/null
@@ -0,0 +1,2991 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/io.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/tcp.h>
+#include <linux/bitops.h>
+#include <linux/qed/qed_roce_if.h>
+#include <linux/qed/qed_roce_if.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_int.h"
+#include "qed_ll2.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+#include "qed_roce.h"
+#include "qed_ll2.h"
+
+static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid);
+
+void qed_roce_async_event(struct qed_hwfn *p_hwfn,
+                         u8 fw_event_code, union rdma_eqe_data *rdma_data)
+{
+       if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) {
+               u16 icid =
+                   (u16)le32_to_cpu(rdma_data->rdma_destroy_qp_data.cid);
+
+               /* icid release in this async event can occur only if the icid
+                * was offloaded to the FW. In case it wasn't offloaded this is
+                * handled in qed_roce_sp_destroy_qp.
+                */
+               qed_roce_free_real_icid(p_hwfn, icid);
+       } else {
+               struct qed_rdma_events *events = &p_hwfn->p_rdma_info->events;
+
+               events->affiliated_event(p_hwfn->p_rdma_info->events.context,
+                                        fw_event_code,
+                                        &rdma_data->async_handle);
+       }
+}
+
+static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
+                              struct qed_bmap *bmap, u32 max_count)
+{
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count);
+
+       bmap->max_count = max_count;
+
+       bmap->bitmap = kzalloc(BITS_TO_LONGS(max_count) * sizeof(long),
+                              GFP_KERNEL);
+       if (!bmap->bitmap) {
+               DP_NOTICE(p_hwfn,
+                         "qed bmap alloc failed: cannot allocate memory (bitmap)\n");
+               return -ENOMEM;
+       }
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocated bitmap %p\n",
+                  bmap->bitmap);
+       return 0;
+}
+
+static int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
+                                 struct qed_bmap *bmap, u32 *id_num)
+{
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "bmap = %p\n", bmap);
+
+       *id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count);
+
+       if (*id_num >= bmap->max_count) {
+               DP_NOTICE(p_hwfn, "no id available max_count=%d\n",
+                         bmap->max_count);
+               return -EINVAL;
+       }
+
+       __set_bit(*id_num, bmap->bitmap);
+
+       return 0;
+}
+
+static void qed_bmap_set_id(struct qed_hwfn *p_hwfn,
+                           struct qed_bmap *bmap, u32 id_num)
+{
+       if (id_num >= bmap->max_count)
+               return;
+
+       __set_bit(id_num, bmap->bitmap);
+}
+
+static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
+                               struct qed_bmap *bmap, u32 id_num)
+{
+       bool b_acquired;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "id_num = %08x", id_num);
+       if (id_num >= bmap->max_count)
+               return;
+
+       b_acquired = test_and_clear_bit(id_num, bmap->bitmap);
+       if (!b_acquired) {
+               DP_NOTICE(p_hwfn, "ID %d already released\n", id_num);
+               return;
+       }
+}
+
+static int qed_bmap_test_id(struct qed_hwfn *p_hwfn,
+                           struct qed_bmap *bmap, u32 id_num)
+{
+       if (id_num >= bmap->max_count)
+               return -1;
+
+       return test_bit(id_num, bmap->bitmap);
+}
+
+static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
+{
+       /* First sb id for RoCE is after all the l2 sb */
+       return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
+}
+
+static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
+                         struct qed_ptt *p_ptt,
+                         struct qed_rdma_start_in_params *params)
+{
+       struct qed_rdma_info *p_rdma_info;
+       u32 num_cons, num_tasks;
+       int rc = -ENOMEM;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n");
+
+       /* Allocate a struct with current pf rdma info */
+       p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL);
+       if (!p_rdma_info) {
+               DP_NOTICE(p_hwfn,
+                         "qed rdma alloc failed: cannot allocate memory (rdma info). rc = %d\n",
+                         rc);
+               return rc;
+       }
+
+       p_hwfn->p_rdma_info = p_rdma_info;
+       p_rdma_info->proto = PROTOCOLID_ROCE;
+
+       num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto,
+                                              NULL);
+
+       p_rdma_info->num_qps = num_cons / 2;
+
+       num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE);
+
+       /* Each MR uses a single task */
+       p_rdma_info->num_mrs = num_tasks;
+
+       /* Queue zone lines are shared between RoCE and L2 in such a way that
+        * they can be used by each without obstructing the other.
+        */
+       p_rdma_info->queue_zone_base = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
+       p_rdma_info->max_queue_zones = (u16)RESC_NUM(p_hwfn, QED_L2_QUEUE);
+
+       /* Allocate a struct with device params and fill it */
+       p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
+       if (!p_rdma_info->dev) {
+               DP_NOTICE(p_hwfn,
+                         "qed rdma alloc failed: cannot allocate memory (rdma info dev). rc = %d\n",
+                         rc);
+               goto free_rdma_info;
+       }
+
+       /* Allocate a struct with port params and fill it */
+       p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL);
+       if (!p_rdma_info->port) {
+               DP_NOTICE(p_hwfn,
+                         "qed rdma alloc failed: cannot allocate memory (rdma info port). rc = %d\n",
+                         rc);
+               goto free_rdma_dev;
+       }
+
+       /* Allocate bit map for pd's */
+       rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS);
+       if (rc) {
+               DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+                          "Failed to allocate pd_map, rc = %d\n",
+                          rc);
+               goto free_rdma_port;
+       }
+
+       /* Allocate DPI bitmap */
+       rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map,
+                                p_hwfn->dpi_count);
+       if (rc) {
+               DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+                          "Failed to allocate DPI bitmap, rc = %d\n", rc);
+               goto free_pd_map;
+       }
+
+       /* Allocate bitmap for cq's. The maximum number of CQs is bounded to
+        * twice the number of QPs.
+        */
+       rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map,
+                                p_rdma_info->num_qps * 2);
+       if (rc) {
+               DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+                          "Failed to allocate cq bitmap, rc = %d\n", rc);
+               goto free_dpi_map;
+       }
+
+       /* Allocate bitmap for toggle bit for cq icids
+        * We toggle the bit every time we create or resize cq for a given icid.
+        * The maximum number of CQs is bounded to  twice the number of QPs.
+        */
+       rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits,
+                                p_rdma_info->num_qps * 2);
+       if (rc) {
+               DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+                          "Failed to allocate toogle bits, rc = %d\n", rc);
+               goto free_cq_map;
+       }
+
+       /* Allocate bitmap for itids */
+       rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map,
+                                p_rdma_info->num_mrs);
+       if (rc) {
+               DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+                          "Failed to allocate itids bitmaps, rc = %d\n", rc);
+               goto free_toggle_map;
+       }
+
+       /* Allocate bitmap for cids used for qps. */
+       rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons);
+       if (rc) {
+               DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+                          "Failed to allocate cid bitmap, rc = %d\n", rc);
+               goto free_tid_map;
+       }
+
+       /* Allocate bitmap for cids used for responders/requesters. */
+       rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->real_cid_map, num_cons);
+       if (rc) {
+               DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+                          "Failed to allocate real cid bitmap, rc = %d\n", rc);
+               goto free_cid_map;
+       }
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
+       return 0;
+
+free_cid_map:
+       kfree(p_rdma_info->cid_map.bitmap);
+free_tid_map:
+       kfree(p_rdma_info->tid_map.bitmap);
+free_toggle_map:
+       kfree(p_rdma_info->toggle_bits.bitmap);
+free_cq_map:
+       kfree(p_rdma_info->cq_map.bitmap);
+free_dpi_map:
+       kfree(p_rdma_info->dpi_map.bitmap);
+free_pd_map:
+       kfree(p_rdma_info->pd_map.bitmap);
+free_rdma_port:
+       kfree(p_rdma_info->port);
+free_rdma_dev:
+       kfree(p_rdma_info->dev);
+free_rdma_info:
+       kfree(p_rdma_info);
+
+       return rc;
+}
+
+static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
+{
+       struct qed_bmap *rcid_map = &p_hwfn->p_rdma_info->real_cid_map;
+       struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+       int wait_count = 0;
+
+       /* when destroying a_RoCE QP the control is returned to the user after
+        * the synchronous part. The asynchronous part may take a little longer.
+        * We delay for a short while if an async destroy QP is still expected.
+        * Beyond the added delay we clear the bitmap anyway.
+        */
+       while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) {
+               msleep(100);
+               if (wait_count++ > 20) {
+                       DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n");
+                       break;
+               }
+       }
+
+       kfree(p_rdma_info->cid_map.bitmap);
+       kfree(p_rdma_info->tid_map.bitmap);
+       kfree(p_rdma_info->toggle_bits.bitmap);
+       kfree(p_rdma_info->cq_map.bitmap);
+       kfree(p_rdma_info->dpi_map.bitmap);
+       kfree(p_rdma_info->pd_map.bitmap);
+
+       kfree(p_rdma_info->port);
+       kfree(p_rdma_info->dev);
+
+       kfree(p_rdma_info);
+}
+
+static void qed_rdma_free(struct qed_hwfn *p_hwfn)
+{
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
+
+       qed_rdma_resc_free(p_hwfn);
+}
+
+static void qed_rdma_get_guid(struct qed_hwfn *p_hwfn, u8 *guid)
+{
+       guid[0] = p_hwfn->hw_info.hw_mac_addr[0] ^ 2;
+       guid[1] = p_hwfn->hw_info.hw_mac_addr[1];
+       guid[2] = p_hwfn->hw_info.hw_mac_addr[2];
+       guid[3] = 0xff;
+       guid[4] = 0xfe;
+       guid[5] = p_hwfn->hw_info.hw_mac_addr[3];
+       guid[6] = p_hwfn->hw_info.hw_mac_addr[4];
+       guid[7] = p_hwfn->hw_info.hw_mac_addr[5];
+}
+
+static void qed_rdma_init_events(struct qed_hwfn *p_hwfn,
+                                struct qed_rdma_start_in_params *params)
+{
+       struct qed_rdma_events *events;
+
+       events = &p_hwfn->p_rdma_info->events;
+
+       events->unaffiliated_event = params->events->unaffiliated_event;
+       events->affiliated_event = params->events->affiliated_event;
+       events->context = params->events->context;
+}
+
+static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
+                                 struct qed_rdma_start_in_params *params)
+{
+       struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
+       struct qed_dev *cdev = p_hwfn->cdev;
+       u32 pci_status_control;
+       u32 num_qps;
+
+       /* Vendor specific information */
+       dev->vendor_id = cdev->vendor_id;
+       dev->vendor_part_id = cdev->device_id;
+       dev->hw_ver = 0;
+       dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |
+                     (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION);
+
+       qed_rdma_get_guid(p_hwfn, (u8 *)&dev->sys_image_guid);
+       dev->node_guid = dev->sys_image_guid;
+
+       dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE,
+                            RDMA_MAX_SGE_PER_RQ_WQE);
+
+       if (cdev->rdma_max_sge)
+               dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge);
+
+       dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE;
+
+       dev->max_inline = (cdev->rdma_max_inline) ?
+                         min_t(u32, cdev->rdma_max_inline, dev->max_inline) :
+                         dev->max_inline;
+
+       dev->max_wqe = QED_RDMA_MAX_WQE;
+       dev->max_cnq = (u8)FEAT_NUM(p_hwfn, QED_RDMA_CNQ);
+
+       /* The number of QPs may be higher than QED_ROCE_MAX_QPS, because
+        * it is up-aligned to 16 and then to ILT page size within qed cxt.
+        * This is OK in terms of ILT but we don't want to configure the FW
+        * above its abilities
+        */
+       num_qps = ROCE_MAX_QPS;
+       num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps);
+       dev->max_qp = num_qps;
+
+       /* CQs uses the same icids that QPs use hence they are limited by the
+        * number of icids. There are two icids per QP.
+        */
+       dev->max_cq = num_qps * 2;
+
+       /* The number of mrs is smaller by 1 since the first is reserved */
+       dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1;
+       dev->max_mr_size = QED_RDMA_MAX_MR_SIZE;
+
+       /* The maximum CQE capacity per CQ supported.
+        * max number of cqes will be in two layer pbl,
+        * 8 is the pointer size in bytes
+        * 32 is the size of cq element in bytes
+        */
+       if (params->cq_mode == QED_RDMA_CQ_MODE_32_BITS)
+               dev->max_cqe = QED_RDMA_MAX_CQE_32_BIT;
+       else
+               dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT;
+
+       dev->max_mw = 0;
+       dev->max_fmr = QED_RDMA_MAX_FMR;
+       dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8);
+       dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE;
+       dev->max_pkey = QED_RDMA_MAX_P_KEY;
+
+       dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
+                                         (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2);
+       dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
+                                        RDMA_REQ_RD_ATOMIC_ELM_SIZE;
+       dev->max_dev_resp_rd_atomic_resc = dev->max_qp_resp_rd_atomic_resc *
+                                          p_hwfn->p_rdma_info->num_qps;
+       dev->page_size_caps = QED_RDMA_PAGE_SIZE_CAPS;
+       dev->dev_ack_delay = QED_RDMA_ACK_DELAY;
+       dev->max_pd = RDMA_MAX_PDS;
+       dev->max_ah = p_hwfn->p_rdma_info->num_qps;
+       dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, QED_RDMA_STATS_QUEUE);
+
+       /* Set capablities */
+       dev->dev_caps = 0;
+       SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RNR_NAK, 1);
+       SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1);
+       SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1);
+       SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RESIZE_CQ, 1);
+       SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1);
+       SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1);
+       SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ZBVA, 1);
+       SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1);
+
+       /* Check atomic operations support in PCI configuration space. */
+       pci_read_config_dword(cdev->pdev,
+                             cdev->pdev->pcie_cap + PCI_EXP_DEVCTL2,
+                             &pci_status_control);
+
+       if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN)
+               SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1);
+}
+
+static void qed_rdma_init_port(struct qed_hwfn *p_hwfn)
+{
+       struct qed_rdma_port *port = p_hwfn->p_rdma_info->port;
+       struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
+
+       port->port_state = p_hwfn->mcp_info->link_output.link_up ?
+                          QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
+
+       port->max_msg_size = min_t(u64,
+                                  (dev->max_mr_mw_fmr_size *
+                                   p_hwfn->cdev->rdma_max_sge),
+                                  BIT(31));
+
+       port->pkey_bad_counter = 0;
+}
+
+static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+       u32 ll2_ethertype_en;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n");
+       p_hwfn->b_rdma_enabled_in_prs = false;
+
+       qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
+
+       p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE;
+
+       /* We delay writing to this reg until first cid is allocated. See
+        * qed_cxt_dynamic_ilt_alloc function for more details
+        */
+       ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
+       qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
+              (ll2_ethertype_en | 0x01));
+
+       if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) {
+               DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n");
+               return -EINVAL;
+       }
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n");
+       return 0;
+}
+
+static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
+                            struct qed_rdma_start_in_params *params,
+                            struct qed_ptt *p_ptt)
+{
+       struct rdma_init_func_ramrod_data *p_ramrod;
+       struct qed_rdma_cnq_params *p_cnq_pbl_list;
+       struct rdma_init_func_hdr *p_params_header;
+       struct rdma_cnq_params *p_cnq_params;
+       struct qed_sp_init_data init_data;
+       struct qed_spq_entry *p_ent;
+       u32 cnq_id, sb_id;
+       int rc;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n");
+
+       /* Save the number of cnqs for the function close ramrod */
+       p_hwfn->p_rdma_info->num_cnqs = params->desired_cnq;
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT,
+                                p_hwfn->p_rdma_info->proto, &init_data);
+       if (rc)
+               return rc;
+
+       p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
+
+       p_params_header = &p_ramrod->params_header;
+       p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn,
+                                                          QED_RDMA_CNQ_RAM);
+       p_params_header->num_cnqs = params->desired_cnq;
+
+       if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS)
+               p_params_header->cq_ring_mode = 1;
+       else
+               p_params_header->cq_ring_mode = 0;
+
+       for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) {
+               sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id);
+               p_cnq_params = &p_ramrod->cnq_params[cnq_id];
+               p_cnq_pbl_list = &params->cnq_pbl_list[cnq_id];
+               p_cnq_params->sb_num =
+                       cpu_to_le16(p_hwfn->sbs_info[sb_id]->igu_sb_id);
+
+               p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi;
+               p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages;
+
+               DMA_REGPAIR_LE(p_cnq_params->pbl_base_addr,
+                              p_cnq_pbl_list->pbl_ptr);
+
+               /* we assume here that cnq_id and qz_offset are the same */
+               p_cnq_params->queue_zone_num =
+                       cpu_to_le16(p_hwfn->p_rdma_info->queue_zone_base +
+                                   cnq_id);
+       }
+
+       return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
+{
+       struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+       int rc;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
+
+       spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+       rc = qed_rdma_bmap_alloc_id(p_hwfn,
+                                   &p_hwfn->p_rdma_info->tid_map, itid);
+       spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+       if (rc)
+               goto out;
+
+       rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
+out:
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
+       return rc;
+}
+
+static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
+{
+       struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
+
+       /* The first DPI is reserved for the Kernel */
+       __set_bit(0, p_hwfn->p_rdma_info->dpi_map.bitmap);
+
+       /* Tid 0 will be used as the key for "reserved MR".
+        * The driver should allocate memory for it so it can be loaded but no
+        * ramrod should be passed on it.
+        */
+       qed_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey);
+       if (dev->reserved_lkey != RDMA_RESERVED_LKEY) {
+               DP_NOTICE(p_hwfn,
+                         "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
+                         struct qed_ptt *p_ptt,
+                         struct qed_rdma_start_in_params *params)
+{
+       int rc;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n");
+
+       spin_lock_init(&p_hwfn->p_rdma_info->lock);
+
+       qed_rdma_init_devinfo(p_hwfn, params);
+       qed_rdma_init_port(p_hwfn);
+       qed_rdma_init_events(p_hwfn, params);
+
+       rc = qed_rdma_reserve_lkey(p_hwfn);
+       if (rc)
+               return rc;
+
+       rc = qed_rdma_init_hw(p_hwfn, p_ptt);
+       if (rc)
+               return rc;
+
+       return qed_rdma_start_fw(p_hwfn, params, p_ptt);
+}
+
+static int qed_rdma_stop(void *rdma_cxt)
+{
+       struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+       struct rdma_close_func_ramrod_data *p_ramrod;
+       struct qed_sp_init_data init_data;
+       struct qed_spq_entry *p_ent;
+       struct qed_ptt *p_ptt;
+       u32 ll2_ethertype_en;
+       int rc = -EBUSY;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop\n");
+
+       p_ptt = qed_ptt_acquire(p_hwfn);
+       if (!p_ptt) {
+               DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n");
+               return rc;
+       }
+
+       /* Disable RoCE search */
+       qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0);
+       p_hwfn->b_rdma_enabled_in_prs = false;
+
+       qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
+
+       ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
+
+       qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
+              (ll2_ethertype_en & 0xFFFE));
+
+       qed_ptt_release(p_hwfn, p_ptt);
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+       /* Stop RoCE */
+       rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE,
+                                p_hwfn->p_rdma_info->proto, &init_data);
+       if (rc)
+               goto out;
+
+       p_ramrod = &p_ent->ramrod.rdma_close_func;
+
+       p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs;
+       p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM);
+
+       rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+out:
+       qed_rdma_free(p_hwfn);
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop done, rc = %d\n", rc);
+       return rc;
+}
+
+static int qed_rdma_add_user(void *rdma_cxt,
+                            struct qed_rdma_add_user_out_params *out_params)
+{
+       struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+       u32 dpi_start_offset;
+       u32 returned_id = 0;
+       int rc;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding User\n");
+
+       /* Allocate DPI */
+       spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+       rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map,
+                                   &returned_id);
+       spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+       out_params->dpi = (u16)returned_id;
+
+       /* Calculate the corresponding DPI address */
+       dpi_start_offset = p_hwfn->dpi_start_offset;
+
+       out_params->dpi_addr = (u64)((u8 __iomem *)p_hwfn->doorbells +
+                                    dpi_start_offset +
+                                    ((out_params->dpi) * p_hwfn->dpi_size));
+
+       out_params->dpi_phys_addr = p_hwfn->cdev->db_phys_addr +
+                                   dpi_start_offset +
+                                   ((out_params->dpi) * p_hwfn->dpi_size);
+
+       out_params->dpi_size = p_hwfn->dpi_size;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc);
+       return rc;
+}
+
+static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
+{
+       struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+       struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n");
+
+       /* Link may have changed */
+       p_port->port_state = p_hwfn->mcp_info->link_output.link_up ?
+                            QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
+
+       p_port->link_speed = p_hwfn->mcp_info->link_output.speed;
+
+       return p_port;
+}
+
+static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
+{
+       struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query device\n");
+
+       /* Return struct with device parameters */
+       return p_hwfn->p_rdma_info->dev;
+}
+
+static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
+{
+       struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
+
+       spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+       qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
+       spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
+{
+       struct qed_hwfn *p_hwfn;
+       u16 qz_num;
+       u32 addr;
+
+       p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+       if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) {
+               DP_NOTICE(p_hwfn,
+                         "queue zone offset %d is too large (max is %d)\n",
+                         qz_offset, p_hwfn->p_rdma_info->max_queue_zones);
+               return;
+       }
+
+       qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
+       addr = GTT_BAR0_MAP_REG_USDM_RAM +
+              USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
+
+       REG_WR16(p_hwfn, addr, prod);
+
+       /* keep prod updates ordered */
+       wmb();
+}
+
+static int qed_fill_rdma_dev_info(struct qed_dev *cdev,
+                                 struct qed_dev_rdma_info *info)
+{
+       memset(info, 0, sizeof(*info));
+
+       info->rdma_type = QED_RDMA_TYPE_ROCE;
+
+       qed_fill_dev_info(cdev, &info->common);
+
+       return 0;
+}
+
+static int qed_rdma_get_sb_start(struct qed_dev *cdev)
+{
+       int feat_num;
+
+       if (cdev->num_hwfns > 1)
+               feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE);
+       else
+               feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE) *
+                          cdev->num_hwfns;
+
+       return feat_num;
+}
+
+static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev)
+{
+       int n_cnq = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_RDMA_CNQ);
+       int n_msix = cdev->int_params.rdma_msix_cnt;
+
+       return min_t(int, n_cnq, n_msix);
+}
+
+static int qed_rdma_set_int(struct qed_dev *cdev, u16 cnt)
+{
+       int limit = 0;
+
+       /* Mark the fastpath as free/used */
+       cdev->int_params.fp_initialized = cnt ? true : false;
+
+       if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) {
+               DP_ERR(cdev,
+                      "qed roce supports only MSI-X interrupts (detected %d).\n",
+                      cdev->int_params.out.int_mode);
+               return -EINVAL;
+       } else if (cdev->int_params.fp_msix_cnt) {
+               limit = cdev->int_params.rdma_msix_cnt;
+       }
+
+       if (!limit)
+               return -ENOMEM;
+
+       return min_t(int, cnt, limit);
+}
+
+static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info)
+{
+       memset(info, 0, sizeof(*info));
+
+       if (!cdev->int_params.fp_initialized) {
+               DP_INFO(cdev,
+                       "Protocol driver requested interrupt information, but its support is not yet configured\n");
+               return -EINVAL;
+       }
+
+       if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+               int msix_base = cdev->int_params.rdma_msix_base;
+
+               info->msix_cnt = cdev->int_params.rdma_msix_cnt;
+               info->msix = &cdev->int_params.msix_table[msix_base];
+
+               DP_VERBOSE(cdev, QED_MSG_RDMA, "msix_cnt = %d msix_base=%d\n",
+                          info->msix_cnt, msix_base);
+       }
+
+       return 0;
+}
+
+static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
+{
+       struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+       u32 returned_id;
+       int rc;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD\n");
+
+       /* Allocates an unused protection domain */
+       spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+       rc = qed_rdma_bmap_alloc_id(p_hwfn,
+                                   &p_hwfn->p_rdma_info->pd_map, &returned_id);
+       spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+       *pd = (u16)returned_id;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc);
+       return rc;
+}
+
+static void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
+{
+       struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "pd = %08x\n", pd);
+
+       /* Returns a previously allocated protection domain for reuse */
+       spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+       qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd);
+       spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+static enum qed_rdma_toggle_bit
+qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid)
+{
+       struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
+       enum qed_rdma_toggle_bit toggle_bit;
+       u32 bmap_id;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", icid);
+
+       /* the function toggle the bit that is related to a given icid
+        * and returns the new toggle bit's value
+        */
+       bmap_id = icid - qed_cxt_get_proto_cid_start(p_hwfn, p_info->proto);
+
+       spin_lock_bh(&p_info->lock);
+       toggle_bit = !test_and_change_bit(bmap_id,
+                                         p_info->toggle_bits.bitmap);
+       spin_unlock_bh(&p_info->lock);
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QED_RDMA_TOGGLE_BIT_= %d\n",
+                  toggle_bit);
+
+       return toggle_bit;
+}
+
+static int qed_rdma_create_cq(void *rdma_cxt,
+                             struct qed_rdma_create_cq_in_params *params,
+                             u16 *icid)
+{
+       struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+       struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
+       struct rdma_create_cq_ramrod_data *p_ramrod;
+       enum qed_rdma_toggle_bit toggle_bit;
+       struct qed_sp_init_data init_data;
+       struct qed_spq_entry *p_ent;
+       u32 returned_id, start_cid;
+       int rc;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "cq_handle = %08x%08x\n",
+                  params->cq_handle_hi, params->cq_handle_lo);
+
+       /* Allocate icid */
+       spin_lock_bh(&p_info->lock);
+       rc = qed_rdma_bmap_alloc_id(p_hwfn,
+                                   &p_info->cq_map, &returned_id);
+       spin_unlock_bh(&p_info->lock);
+
+       if (rc) {
+               DP_NOTICE(p_hwfn, "Can't create CQ, rc = %d\n", rc);
+               return rc;
+       }
+
+       start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
+                                               p_info->proto);
+       *icid = returned_id + start_cid;
+
+       /* Check if icid requires a page allocation */
+       rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *icid);
+       if (rc)
+               goto err;
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = *icid;
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+       /* Send create CQ ramrod */
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                RDMA_RAMROD_CREATE_CQ,
+                                p_info->proto, &init_data);
+       if (rc)
+               goto err;
+
+       p_ramrod = &p_ent->ramrod.rdma_create_cq;
+
+       p_ramrod->cq_handle.hi = cpu_to_le32(params->cq_handle_hi);
+       p_ramrod->cq_handle.lo = cpu_to_le32(params->cq_handle_lo);
+       p_ramrod->dpi = cpu_to_le16(params->dpi);
+       p_ramrod->is_two_level_pbl = params->pbl_two_level;
+       p_ramrod->max_cqes = cpu_to_le32(params->cq_size);
+       DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr);
+       p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages);
+       p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) +
+                          params->cnq_id;
+       p_ramrod->int_timeout = params->int_timeout;
+
+       /* toggle the bit for every resize or create cq for a given icid */
+       toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
+
+       p_ramrod->toggle_bit = toggle_bit;
+
+       rc = qed_spq_post(p_hwfn, p_ent, NULL);
+       if (rc) {
+               /* restore toggle bit */
+               qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
+               goto err;
+       }
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Created CQ, rc = %d\n", rc);
+       return rc;
+
+err:
+       /* release allocated icid */
+       spin_lock_bh(&p_info->lock);
+       qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id);
+       spin_unlock_bh(&p_info->lock);
+       DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc);
+
+       return rc;
+}
+
+static int
+qed_rdma_destroy_cq(void *rdma_cxt,
+                   struct qed_rdma_destroy_cq_in_params *in_params,
+                   struct qed_rdma_destroy_cq_out_params *out_params)
+{
+       struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+       struct rdma_destroy_cq_output_params *p_ramrod_res;
+       struct rdma_destroy_cq_ramrod_data *p_ramrod;
+       struct qed_sp_init_data init_data;
+       struct qed_spq_entry *p_ent;
+       dma_addr_t ramrod_res_phys;
+       int rc = -ENOMEM;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
+
+       p_ramrod_res =
+           (struct rdma_destroy_cq_output_params *)
+           dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+                              sizeof(struct rdma_destroy_cq_output_params),
+                              &ramrod_res_phys, GFP_KERNEL);
+       if (!p_ramrod_res) {
+               DP_NOTICE(p_hwfn,
+                         "qed destroy cq failed: cannot allocate memory (ramrod)\n");
+               return rc;
+       }
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = in_params->icid;
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+       /* Send destroy CQ ramrod */
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                RDMA_RAMROD_DESTROY_CQ,
+                                p_hwfn->p_rdma_info->proto, &init_data);
+       if (rc)
+               goto err;
+
+       p_ramrod = &p_ent->ramrod.rdma_destroy_cq;
+       DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
+
+       rc = qed_spq_post(p_hwfn, p_ent, NULL);
+       if (rc)
+               goto err;
+
+       out_params->num_cq_notif = le16_to_cpu(p_ramrod_res->cnq_num);
+
+       dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                         sizeof(struct rdma_destroy_cq_output_params),
+                         p_ramrod_res, ramrod_res_phys);
+
+       /* Free icid */
+       spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+
+       qed_bmap_release_id(p_hwfn,
+                           &p_hwfn->p_rdma_info->cq_map,
+                           (in_params->icid -
+                            qed_cxt_get_proto_cid_start(p_hwfn,
+                                                        p_hwfn->
+                                                        p_rdma_info->proto)));
+
+       spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc);
+       return rc;
+
+err:   dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                         sizeof(struct rdma_destroy_cq_output_params),
+                         p_ramrod_res, ramrod_res_phys);
+
+       return rc;
+}
+
+static void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac)
+{
+       p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]);
+       p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]);
+       p_fw_mac[2] = cpu_to_le16((p_qed_mac[4] << 8) + p_qed_mac[5]);
+}
+
+static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
+                              __le32 *dst_gid)
+{
+       u32 i;
+
+       if (qp->roce_mode == ROCE_V2_IPV4) {
+               /* The IPv4 addresses shall be aligned to the highest word.
+                * The lower words must be zero.
+                */
+               memset(src_gid, 0, sizeof(union qed_gid));
+               memset(dst_gid, 0, sizeof(union qed_gid));
+               src_gid[3] = cpu_to_le32(qp->sgid.ipv4_addr);
+               dst_gid[3] = cpu_to_le32(qp->dgid.ipv4_addr);
+       } else {
+               /* GIDs and IPv6 addresses coincide in location and size */
+               for (i = 0; i < ARRAY_SIZE(qp->sgid.dwords); i++) {
+                       src_gid[i] = cpu_to_le32(qp->sgid.dwords[i]);
+                       dst_gid[i] = cpu_to_le32(qp->dgid.dwords[i]);
+               }
+       }
+}
+
+static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
+{
+       enum roce_flavor flavor;
+
+       switch (roce_mode) {
+       case ROCE_V1:
+               flavor = PLAIN_ROCE;
+               break;
+       case ROCE_V2_IPV4:
+               flavor = RROCE_IPV4;
+               break;
+       case ROCE_V2_IPV6:
+               flavor = ROCE_V2_IPV6;
+               break;
+       default:
+               flavor = MAX_ROCE_MODE;
+               break;
+       }
+       return flavor;
+}
+
+void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
+{
+       spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+       qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
+       qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid + 1);
+       spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+static int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
+{
+       struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+       u32 responder_icid;
+       u32 requester_icid;
+       int rc;
+
+       spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+       rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
+                                   &responder_icid);
+       if (rc) {
+               spin_unlock_bh(&p_rdma_info->lock);
+               return rc;
+       }
+
+       rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
+                                   &requester_icid);
+
+       spin_unlock_bh(&p_rdma_info->lock);
+       if (rc)
+               goto err;
+
+       /* the two icid's should be adjacent */
+       if ((requester_icid - responder_icid) != 1) {
+               DP_NOTICE(p_hwfn, "Failed to allocate two adjacent qp's'\n");
+               rc = -EINVAL;
+               goto err;
+       }
+
+       responder_icid += qed_cxt_get_proto_cid_start(p_hwfn,
+                                                     p_rdma_info->proto);
+       requester_icid += qed_cxt_get_proto_cid_start(p_hwfn,
+                                                     p_rdma_info->proto);
+
+       /* If these icids require a new ILT line allocate DMA-able context for
+        * an ILT page
+        */
+       rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, responder_icid);
+       if (rc)
+               goto err;
+
+       rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, requester_icid);
+       if (rc)
+               goto err;
+
+       *cid = (u16)responder_icid;
+       return rc;
+
+err:
+       spin_lock_bh(&p_rdma_info->lock);
+       qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, responder_icid);
+       qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, requester_icid);
+
+       spin_unlock_bh(&p_rdma_info->lock);
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+                  "Allocate CID - failed, rc = %d\n", rc);
+       return rc;
+}
+
+static void qed_roce_set_real_cid(struct qed_hwfn *p_hwfn, u32 cid)
+{
+       spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+       qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, cid);
+       spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
+                                       struct qed_rdma_qp *qp)
+{
+       struct roce_create_qp_resp_ramrod_data *p_ramrod;
+       struct qed_sp_init_data init_data;
+       enum roce_flavor roce_flavor;
+       struct qed_spq_entry *p_ent;
+       u16 regular_latency_queue;
+       enum protocol_type proto;
+       int rc;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+       /* Allocate DMA-able memory for IRQ */
+       qp->irq_num_pages = 1;
+       qp->irq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+                                    RDMA_RING_PAGE_SIZE,
+                                    &qp->irq_phys_addr, GFP_KERNEL);
+       if (!qp->irq) {
+               rc = -ENOMEM;
+               DP_NOTICE(p_hwfn,
+                         "qed create responder failed: cannot allocate memory (irq). rc = %d\n",
+                         rc);
+               return rc;
+       }
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = qp->icid;
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_CREATE_QP,
+                                PROTOCOLID_ROCE, &init_data);
+       if (rc)
+               goto err;
+
+       p_ramrod = &p_ent->ramrod.roce_create_qp_resp;
+
+       p_ramrod->flags = 0;
+
+       roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode);
+       SET_FIELD(p_ramrod->flags,
+                 ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR, roce_flavor);
+
+       SET_FIELD(p_ramrod->flags,
+                 ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
+                 qp->incoming_rdma_read_en);
+
+       SET_FIELD(p_ramrod->flags,
+                 ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
+                 qp->incoming_rdma_write_en);
+
+       SET_FIELD(p_ramrod->flags,
+                 ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN,
+                 qp->incoming_atomic_en);
+
+       SET_FIELD(p_ramrod->flags,
+                 ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
+                 qp->e2e_flow_control_en);
+
+       SET_FIELD(p_ramrod->flags,
+                 ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
+
+       SET_FIELD(p_ramrod->flags,
+                 ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN,
+                 qp->fmr_and_reserved_lkey);
+
+       SET_FIELD(p_ramrod->flags,
+                 ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
+                 qp->min_rnr_nak_timer);
+
+       p_ramrod->max_ird = qp->max_rd_atomic_resp;
+       p_ramrod->traffic_class = qp->traffic_class_tos;
+       p_ramrod->hop_limit = qp->hop_limit_ttl;
+       p_ramrod->irq_num_pages = qp->irq_num_pages;
+       p_ramrod->p_key = cpu_to_le16(qp->pkey);
+       p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
+       p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
+       p_ramrod->mtu = cpu_to_le16(qp->mtu);
+       p_ramrod->initial_psn = cpu_to_le32(qp->rq_psn);
+       p_ramrod->pd = cpu_to_le16(qp->pd);
+       p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages);
+       DMA_REGPAIR_LE(p_ramrod->rq_pbl_addr, qp->rq_pbl_ptr);
+       DMA_REGPAIR_LE(p_ramrod->irq_pbl_addr, qp->irq_phys_addr);
+       qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
+       p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi);
+       p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
+       p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
+       p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
+       p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
+                                      qp->rq_cq_id);
+
+       regular_latency_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+
+       p_ramrod->regular_latency_phy_queue =
+           cpu_to_le16(regular_latency_queue);
+       p_ramrod->low_latency_phy_queue =
+           cpu_to_le16(regular_latency_queue);
+
+       p_ramrod->dpi = cpu_to_le16(qp->dpi);
+
+       qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
+       qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
+
+       p_ramrod->udp_src_port = qp->udp_src_port;
+       p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
+       p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id);
+       p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid);
+
+       p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
+                                    qp->stats_queue;
+
+       rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+                  "rc = %d regular physical queue = 0x%x\n", rc,
+                  regular_latency_queue);
+
+       if (rc)
+               goto err;
+
+       qp->resp_offloaded = true;
+       qp->cq_prod = 0;
+
+       proto = p_hwfn->p_rdma_info->proto;
+       qed_roce_set_real_cid(p_hwfn, qp->icid -
+                             qed_cxt_get_proto_cid_start(p_hwfn, proto));
+
+       return rc;
+
+err:
+       DP_NOTICE(p_hwfn, "create responder - failed, rc = %d\n", rc);
+       dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                         qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
+                         qp->irq, qp->irq_phys_addr);
+
+       return rc;
+}
+
+static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
+                                       struct qed_rdma_qp *qp)
+{
+       struct roce_create_qp_req_ramrod_data *p_ramrod;
+       struct qed_sp_init_data init_data;
+       enum roce_flavor roce_flavor;
+       struct qed_spq_entry *p_ent;
+       u16 regular_latency_queue;
+       enum protocol_type proto;
+       int rc;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+       /* Allocate DMA-able memory for ORQ */
+       qp->orq_num_pages = 1;
+       qp->orq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+                                    RDMA_RING_PAGE_SIZE,
+                                    &qp->orq_phys_addr, GFP_KERNEL);
+       if (!qp->orq) {
+               rc = -ENOMEM;
+               DP_NOTICE(p_hwfn,
+                         "qed create requester failed: cannot allocate memory (orq). rc = %d\n",
+                         rc);
+               return rc;
+       }
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = qp->icid + 1;
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                ROCE_RAMROD_CREATE_QP,
+                                PROTOCOLID_ROCE, &init_data);
+       if (rc)
+               goto err;
+
+       p_ramrod = &p_ent->ramrod.roce_create_qp_req;
+
+       p_ramrod->flags = 0;
+
+       roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode);
+       SET_FIELD(p_ramrod->flags,
+                 ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR, roce_flavor);
+
+       SET_FIELD(p_ramrod->flags,
+                 ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN,
+                 qp->fmr_and_reserved_lkey);
+
+       SET_FIELD(p_ramrod->flags,
+                 ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP, qp->signal_all);
+
+       SET_FIELD(p_ramrod->flags,
+                 ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
+
+       SET_FIELD(p_ramrod->flags,
+                 ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
+                 qp->rnr_retry_cnt);
+
+       p_ramrod->max_ord = qp->max_rd_atomic_req;
+       p_ramrod->traffic_class = qp->traffic_class_tos;
+       p_ramrod->hop_limit = qp->hop_limit_ttl;
+       p_ramrod->orq_num_pages = qp->orq_num_pages;
+       p_ramrod->p_key = cpu_to_le16(qp->pkey);
+       p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
+       p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
+       p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
+       p_ramrod->mtu = cpu_to_le16(qp->mtu);
+       p_ramrod->initial_psn = cpu_to_le32(qp->sq_psn);
+       p_ramrod->pd = cpu_to_le16(qp->pd);
+       p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages);
+       DMA_REGPAIR_LE(p_ramrod->sq_pbl_addr, qp->sq_pbl_ptr);
+       DMA_REGPAIR_LE(p_ramrod->orq_pbl_addr, qp->orq_phys_addr);
+       qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
+       p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi);
+       p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
+       p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
+       p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
+       p_ramrod->cq_cid =
+           cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
+
+       regular_latency_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+
+       p_ramrod->regular_latency_phy_queue =
+           cpu_to_le16(regular_latency_queue);
+       p_ramrod->low_latency_phy_queue =
+           cpu_to_le16(regular_latency_queue);
+
+       p_ramrod->dpi = cpu_to_le16(qp->dpi);
+
+       qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
+       qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
+
+       p_ramrod->udp_src_port = qp->udp_src_port;
+       p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
+       p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
+                                    qp->stats_queue;
+
+       rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+
+       if (rc)
+               goto err;
+
+       qp->req_offloaded = true;
+       proto = p_hwfn->p_rdma_info->proto;
+       qed_roce_set_real_cid(p_hwfn,
+                             qp->icid + 1 -
+                             qed_cxt_get_proto_cid_start(p_hwfn, proto));
+
+       return rc;
+
+err:
+       DP_NOTICE(p_hwfn, "Create requested - failed, rc = %d\n", rc);
+       dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                         qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
+                         qp->orq, qp->orq_phys_addr);
+       return rc;
+}
+
+static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn,
+                                       struct qed_rdma_qp *qp,
+                                       bool move_to_err, u32 modify_flags)
+{
+       struct roce_modify_qp_resp_ramrod_data *p_ramrod;
+       struct qed_sp_init_data init_data;
+       struct qed_spq_entry *p_ent;
+       int rc;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+       if (move_to_err && !qp->resp_offloaded)
+               return 0;
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = qp->icid;
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                ROCE_EVENT_MODIFY_QP,
+                                PROTOCOLID_ROCE, &init_data);
+       if (rc) {
+               DP_NOTICE(p_hwfn, "rc = %d\n", rc);
+               return rc;
+       }
+
+       p_ramrod = &p_ent->ramrod.roce_modify_qp_resp;
+
+       p_ramrod->flags = 0;
+
+       SET_FIELD(p_ramrod->flags,
+                 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err);
+
+       SET_FIELD(p_ramrod->flags,
+                 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
+                 qp->incoming_rdma_read_en);
+
+       SET_FIELD(p_ramrod->flags,
+                 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
+                 qp->incoming_rdma_write_en);
+
+       SET_FIELD(p_ramrod->flags,
+                 ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN,
+                 qp->incoming_atomic_en);
+
+       SET_FIELD(p_ramrod->flags,
+                 ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
+                 qp->e2e_flow_control_en);
+
+       SET_FIELD(p_ramrod->flags,
+                 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG,
+                 GET_FIELD(modify_flags,
+                           QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN));
+
+       SET_FIELD(p_ramrod->flags,
+                 ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG,
+                 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
+
+       SET_FIELD(p_ramrod->flags,
+                 ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG,
+                 GET_FIELD(modify_flags,
+                           QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
+
+       SET_FIELD(p_ramrod->flags,
+                 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG,
+                 GET_FIELD(modify_flags,
+                           QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP));
+
+       SET_FIELD(p_ramrod->flags,
+                 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG,
+                 GET_FIELD(modify_flags,
+                           QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER));
+
+       p_ramrod->fields = 0;
+       SET_FIELD(p_ramrod->fields,
+                 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
+                 qp->min_rnr_nak_timer);
+
+       p_ramrod->max_ird = qp->max_rd_atomic_resp;
+       p_ramrod->traffic_class = qp->traffic_class_tos;
+       p_ramrod->hop_limit = qp->hop_limit_ttl;
+       p_ramrod->p_key = cpu_to_le16(qp->pkey);
+       p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
+       p_ramrod->mtu = cpu_to_le16(qp->mtu);
+       qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
+       rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify responder, rc = %d\n", rc);
+       return rc;
+}
+
+static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
+                                       struct qed_rdma_qp *qp,
+                                       bool move_to_sqd,
+                                       bool move_to_err, u32 modify_flags)
+{
+       struct roce_modify_qp_req_ramrod_data *p_ramrod;
+       struct qed_sp_init_data init_data;
+       struct qed_spq_entry *p_ent;
+       int rc;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+       if (move_to_err && !(qp->req_offloaded))
+               return 0;
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = qp->icid + 1;
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                ROCE_EVENT_MODIFY_QP,
+                                PROTOCOLID_ROCE, &init_data);
+       if (rc) {
+               DP_NOTICE(p_hwfn, "rc = %d\n", rc);
+               return rc;
+       }
+
+       p_ramrod = &p_ent->ramrod.roce_modify_qp_req;
+
+       p_ramrod->flags = 0;
+
+       SET_FIELD(p_ramrod->flags,
+                 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err);
+
+       SET_FIELD(p_ramrod->flags,
+                 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG, move_to_sqd);
+
+       SET_FIELD(p_ramrod->flags,
+                 ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY,
+                 qp->sqd_async);
+
+       SET_FIELD(p_ramrod->flags,
+                 ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG,
+                 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
+
+       SET_FIELD(p_ramrod->flags,
+                 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG,
+                 GET_FIELD(modify_flags,
+                           QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
+
+       SET_FIELD(p_ramrod->flags,
+                 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG,
+                 GET_FIELD(modify_flags,
+                           QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ));
+
+       SET_FIELD(p_ramrod->flags,
+                 ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG,
+                 GET_FIELD(modify_flags,
+                           QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT));
+
+       SET_FIELD(p_ramrod->flags,
+                 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG,
+                 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT));
+
+       SET_FIELD(p_ramrod->flags,
+                 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG,
+                 GET_FIELD(modify_flags,
+                           QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT));
+
+       p_ramrod->fields = 0;
+       SET_FIELD(p_ramrod->fields,
+                 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
+
+       SET_FIELD(p_ramrod->fields,
+                 ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
+                 qp->rnr_retry_cnt);
+
+       p_ramrod->max_ord = qp->max_rd_atomic_req;
+       p_ramrod->traffic_class = qp->traffic_class_tos;
+       p_ramrod->hop_limit = qp->hop_limit_ttl;
+       p_ramrod->p_key = cpu_to_le16(qp->pkey);
+       p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
+       p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
+       p_ramrod->mtu = cpu_to_le16(qp->mtu);
+       qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
+       rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify requester, rc = %d\n", rc);
+       return rc;
+}
+
+static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
+                                           struct qed_rdma_qp *qp,
+                                           u32 *num_invalidated_mw,
+                                           u32 *cq_prod)
+{
+       struct roce_destroy_qp_resp_output_params *p_ramrod_res;
+       struct roce_destroy_qp_resp_ramrod_data *p_ramrod;
+       struct qed_sp_init_data init_data;
+       struct qed_spq_entry *p_ent;
+       dma_addr_t ramrod_res_phys;
+       int rc;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+       *num_invalidated_mw = 0;
+       *cq_prod = qp->cq_prod;
+
+       if (!qp->resp_offloaded) {
+               /* If a responder was never offload, we need to free the cids
+                * allocated in create_qp as a FW async event will never arrive
+                */
+               u32 cid;
+
+               cid = qp->icid -
+                     qed_cxt_get_proto_cid_start(p_hwfn,
+                                                 p_hwfn->p_rdma_info->proto);
+               qed_roce_free_cid_pair(p_hwfn, (u16)cid);
+
+               return 0;
+       }
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = qp->icid;
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                ROCE_RAMROD_DESTROY_QP,
+                                PROTOCOLID_ROCE, &init_data);
+       if (rc)
+               return rc;
+
+       p_ramrod = &p_ent->ramrod.roce_destroy_qp_resp;
+
+       p_ramrod_res = (struct roce_destroy_qp_resp_output_params *)
+           dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
+                              &ramrod_res_phys, GFP_KERNEL);
+
+       if (!p_ramrod_res) {
+               rc = -ENOMEM;
+               DP_NOTICE(p_hwfn,
+                         "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n",
+                         rc);
+               return rc;
+       }
+
+       DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
+
+       rc = qed_spq_post(p_hwfn, p_ent, NULL);
+       if (rc)
+               goto err;
+
+       *num_invalidated_mw = le32_to_cpu(p_ramrod_res->num_invalidated_mw);
+       *cq_prod = le32_to_cpu(p_ramrod_res->cq_prod);
+       qp->cq_prod = *cq_prod;
+
+       /* Free IRQ - only if ramrod succeeded, in case FW is still using it */
+       dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                         qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
+                         qp->irq, qp->irq_phys_addr);
+
+       qp->resp_offloaded = false;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy responder, rc = %d\n", rc);
+
+err:
+       dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                         sizeof(struct roce_destroy_qp_resp_output_params),
+                         p_ramrod_res, ramrod_res_phys);
+
+       return rc;
+}
+
+static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn,
+                                           struct qed_rdma_qp *qp,
+                                           u32 *num_bound_mw)
+{
+       struct roce_destroy_qp_req_output_params *p_ramrod_res;
+       struct roce_destroy_qp_req_ramrod_data *p_ramrod;
+       struct qed_sp_init_data init_data;
+       struct qed_spq_entry *p_ent;
+       dma_addr_t ramrod_res_phys;
+       int rc = -ENOMEM;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+       if (!qp->req_offloaded)
+               return 0;
+
+       p_ramrod_res = (struct roce_destroy_qp_req_output_params *)
+                      dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+                                         sizeof(*p_ramrod_res),
+                                         &ramrod_res_phys, GFP_KERNEL);
+       if (!p_ramrod_res) {
+               DP_NOTICE(p_hwfn,
+                         "qed destroy requester failed: cannot allocate memory (ramrod)\n");
+               return rc;
+       }
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = qp->icid + 1;
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_DESTROY_QP,
+                                PROTOCOLID_ROCE, &init_data);
+       if (rc)
+               goto err;
+
+       p_ramrod = &p_ent->ramrod.roce_destroy_qp_req;
+       DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
+
+       rc = qed_spq_post(p_hwfn, p_ent, NULL);
+       if (rc)
+               goto err;
+
+       *num_bound_mw = le32_to_cpu(p_ramrod_res->num_bound_mw);
+
+       /* Free ORQ - only if ramrod succeeded, in case FW is still using it */
+       dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+                         qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
+                         qp->orq, qp->orq_phys_addr);
+
+       qp->req_offloaded = false;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy requester, rc = %d\n", rc);
+
+err:
+       dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
+                         p_ramrod_res, ramrod_res_phys);
+
+       return rc;
+}
+
+static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
+                            struct qed_rdma_qp *qp,
+                            struct qed_rdma_query_qp_out_params *out_params)
+{
+       struct roce_query_qp_resp_output_params *p_resp_ramrod_res;
+       struct roce_query_qp_req_output_params *p_req_ramrod_res;
+       struct roce_query_qp_resp_ramrod_data *p_resp_ramrod;
+       struct roce_query_qp_req_ramrod_data *p_req_ramrod;
+       struct qed_sp_init_data init_data;
+       dma_addr_t resp_ramrod_res_phys;
+       dma_addr_t req_ramrod_res_phys;
+       struct qed_spq_entry *p_ent;
+       bool rq_err_state;
+       bool sq_err_state;
+       bool sq_draining;
+       int rc = -ENOMEM;
+
+       if ((!(qp->resp_offloaded)) && (!(qp->req_offloaded))) {
+               /* We can't send ramrod to the fw since this qp wasn't offloaded
+                * to the fw yet
+                */
+               out_params->draining = false;
+               out_params->rq_psn = qp->rq_psn;
+               out_params->sq_psn = qp->sq_psn;
+               out_params->state = qp->cur_state;
+
+               DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "No QPs as no offload\n");
+               return 0;
+       }
+
+       if (!(qp->resp_offloaded)) {
+               DP_NOTICE(p_hwfn,
+                         "The responder's qp should be offloded before requester's\n");
+               return -EINVAL;
+       }
+
+       /* Send a query responder ramrod to FW to get RQ-PSN and state */
+       p_resp_ramrod_res = (struct roce_query_qp_resp_output_params *)
+           dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+                              sizeof(*p_resp_ramrod_res),
+                              &resp_ramrod_res_phys, GFP_KERNEL);
+       if (!p_resp_ramrod_res) {
+               DP_NOTICE(p_hwfn,
+                         "qed query qp failed: cannot allocate memory (ramrod)\n");
+               return rc;
+       }
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.cid = qp->icid;
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+       rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
+                                PROTOCOLID_ROCE, &init_data);
+       if (rc)
+               goto err_resp;
+
+       p_resp_ramrod = &p_ent->ramrod.roce_query_qp_resp;
+       DMA_REGPAIR_LE(p_resp_ramrod->output_params_addr, resp_ramrod_res_phys);
+
+       rc = qed_spq_post(p_hwfn, p_ent, NULL);
+       if (rc)
+               goto err_resp;
+
+       out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn);
+       rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->err_flag),
+                                ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG);
+
+       dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
+                         p_resp_ramrod_res, resp_ramrod_res_phys);
+
+       if (!(qp->req_offloaded)) {
+               /* Don't send query qp for the requester */
+               out_params->sq_psn = qp->sq_psn;
+               out_params->draining = false;
+
+               if (rq_err_state)
+                       qp->cur_state = QED_ROCE_QP_STATE_ERR;
+
+               out_params->state = qp->cur_state;
+
+               return 0;
+       }
+
+       /* Send a query requester ramrod to FW to get SQ-PSN and state */
+       p_req_ramrod_res = (struct roce_query_qp_req_output_params *)
+                          dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+                                             sizeof(*p_req_ramrod_res),
+                                             &req_ramrod_res_phys,
+                                             GFP_KERNEL);
+       if (!p_req_ramrod_res) {
+               rc = -ENOMEM;
+               DP_NOTICE(p_hwfn,
+                         "qed query qp failed: cannot allocate memory (ramrod)\n");
+               return rc;
+       }
+
+       /* Get SPQ entry */
+       init_data.cid = qp->icid + 1;
+       rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
+                                PROTOCOLID_ROCE, &init_data);
+       if (rc)
+               goto err_req;
+
+       p_req_ramrod = &p_ent->ramrod.roce_query_qp_req;
+       DMA_REGPAIR_LE(p_req_ramrod->output_params_addr, req_ramrod_res_phys);
+
+       rc = qed_spq_post(p_hwfn, p_ent, NULL);
+       if (rc)
+               goto err_req;
+
+       out_params->sq_psn = le32_to_cpu(p_req_ramrod_res->psn);
+       sq_err_state = GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
+                                ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG);
+       sq_draining =
+               GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
+                         ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG);
+
+       dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
+                         p_req_ramrod_res, req_ramrod_res_phys);
+
+       out_params->draining = false;
+
+       if (rq_err_state || sq_err_state)
+               qp->cur_state = QED_ROCE_QP_STATE_ERR;
+       else if (sq_draining)
+               out_params->draining = true;
+       out_params->state = qp->cur_state;
+
+       return 0;
+
+err_req:
+       dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
+                         p_req_ramrod_res, req_ramrod_res_phys);
+       return rc;
+err_resp:
+       dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
+                         p_resp_ramrod_res, resp_ramrod_res_phys);
+       return rc;
+}
+
+static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
+{
+       u32 num_invalidated_mw = 0;
+       u32 num_bound_mw = 0;
+       u32 cq_prod;
+       int rc;
+
+       /* Destroys the specified QP */
+       if ((qp->cur_state != QED_ROCE_QP_STATE_RESET) &&
+           (qp->cur_state != QED_ROCE_QP_STATE_ERR) &&
+           (qp->cur_state != QED_ROCE_QP_STATE_INIT)) {
+               DP_NOTICE(p_hwfn,
+                         "QP must be in error, reset or init state before destroying it\n");
+               return -EINVAL;
+       }
+
+       if (qp->cur_state != QED_ROCE_QP_STATE_RESET) {
+               rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
+                                                     &num_invalidated_mw,
+                                                     &cq_prod);
+               if (rc)
+                       return rc;
+
+               /* Send destroy requester ramrod */
+               rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp,
+                                                     &num_bound_mw);
+               if (rc)
+                       return rc;
+
+               if (num_invalidated_mw != num_bound_mw) {
+                       DP_NOTICE(p_hwfn,
+                                 "number of invalidate memory windows is different from bounded ones\n");
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
+static int qed_rdma_query_qp(void *rdma_cxt,
+                            struct qed_rdma_qp *qp,
+                            struct qed_rdma_query_qp_out_params *out_params)
+{
+       struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+       int rc;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+       /* The following fields are filled in from qp and not FW as they can't
+        * be modified by FW
+        */
+       out_params->mtu = qp->mtu;
+       out_params->dest_qp = qp->dest_qp;
+       out_params->incoming_atomic_en = qp->incoming_atomic_en;
+       out_params->e2e_flow_control_en = qp->e2e_flow_control_en;
+       out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en;
+       out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en;
+       out_params->dgid = qp->dgid;
+       out_params->flow_label = qp->flow_label;
+       out_params->hop_limit_ttl = qp->hop_limit_ttl;
+       out_params->traffic_class_tos = qp->traffic_class_tos;
+       out_params->timeout = qp->ack_timeout;
+       out_params->rnr_retry = qp->rnr_retry_cnt;
+       out_params->retry_cnt = qp->retry_cnt;
+       out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer;
+       out_params->pkey_index = 0;
+       out_params->max_rd_atomic = qp->max_rd_atomic_req;
+       out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp;
+       out_params->sqd_async = qp->sqd_async;
+
+       rc = qed_roce_query_qp(p_hwfn, qp, out_params);
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc);
+       return rc;
+}
+
+static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
+{
+       struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+       int rc = 0;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+       rc = qed_roce_destroy_qp(p_hwfn, qp);
+
+       /* free qp params struct */
+       kfree(qp);
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP destroyed\n");
+       return rc;
+}
+
+static struct qed_rdma_qp *
+qed_rdma_create_qp(void *rdma_cxt,
+                  struct qed_rdma_create_qp_in_params *in_params,
+                  struct qed_rdma_create_qp_out_params *out_params)
+{
+       struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+       struct qed_rdma_qp *qp;
+       u8 max_stats_queues;
+       int rc;
+
+       if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) {
+               DP_ERR(p_hwfn->cdev,
+                      "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
+                      rdma_cxt, in_params, out_params);
+               return NULL;
+       }
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+                  "qed rdma create qp called with qp_handle = %08x%08x\n",
+                  in_params->qp_handle_hi, in_params->qp_handle_lo);
+
+       /* Some sanity checks... */
+       max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues;
+       if (in_params->stats_queue >= max_stats_queues) {
+               DP_ERR(p_hwfn->cdev,
+                      "qed rdma create qp failed due to invalid statistics queue %d. maximum is %d\n",
+                      in_params->stats_queue, max_stats_queues);
+               return NULL;
+       }
+
+       qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+       if (!qp) {
+               DP_NOTICE(p_hwfn, "Failed to allocate qed_rdma_qp\n");
+               return NULL;
+       }
+
+       rc = qed_roce_alloc_cid(p_hwfn, &qp->icid);
+       qp->qpid = ((0xFF << 16) | qp->icid);
+
+       DP_INFO(p_hwfn, "ROCE qpid=%x\n", qp->qpid);
+
+       if (rc) {
+               kfree(qp);
+               return NULL;
+       }
+
+       qp->cur_state = QED_ROCE_QP_STATE_RESET;
+       qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi);
+       qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo);
+       qp->qp_handle_async.hi = cpu_to_le32(in_params->qp_handle_async_hi);
+       qp->qp_handle_async.lo = cpu_to_le32(in_params->qp_handle_async_lo);
+       qp->use_srq = in_params->use_srq;
+       qp->signal_all = in_params->signal_all;
+       qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey;
+       qp->pd = in_params->pd;
+       qp->dpi = in_params->dpi;
+       qp->sq_cq_id = in_params->sq_cq_id;
+       qp->sq_num_pages = in_params->sq_num_pages;
+       qp->sq_pbl_ptr = in_params->sq_pbl_ptr;
+       qp->rq_cq_id = in_params->rq_cq_id;
+       qp->rq_num_pages = in_params->rq_num_pages;
+       qp->rq_pbl_ptr = in_params->rq_pbl_ptr;
+       qp->srq_id = in_params->srq_id;
+       qp->req_offloaded = false;
+       qp->resp_offloaded = false;
+       qp->e2e_flow_control_en = qp->use_srq ? false : true;
+       qp->stats_queue = in_params->stats_queue;
+
+       out_params->icid = qp->icid;
+       out_params->qp_id = qp->qpid;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Create QP, rc = %d\n", rc);
+       return qp;
+}
+
+static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
+                             struct qed_rdma_qp *qp,
+                             enum qed_roce_qp_state prev_state,
+                             struct qed_rdma_modify_qp_in_params *params)
+{
+       u32 num_invalidated_mw = 0, num_bound_mw = 0;
+       int rc = 0;
+
+       /* Perform additional operations according to the current state and the
+        * next state
+        */
+       if (((prev_state == QED_ROCE_QP_STATE_INIT) ||
+            (prev_state == QED_ROCE_QP_STATE_RESET)) &&
+           (qp->cur_state == QED_ROCE_QP_STATE_RTR)) {
+               /* Init->RTR or Reset->RTR */
+               rc = qed_roce_sp_create_responder(p_hwfn, qp);
+               return rc;
+       } else if ((prev_state == QED_ROCE_QP_STATE_RTR) &&
+                  (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
+               /* RTR-> RTS */
+               rc = qed_roce_sp_create_requester(p_hwfn, qp);
+               if (rc)
+                       return rc;
+
+               /* Send modify responder ramrod */
+               rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
+                                                 params->modify_flags);
+               return rc;
+       } else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
+                  (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
+               /* RTS->RTS */
+               rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
+                                                 params->modify_flags);
+               if (rc)
+                       return rc;
+
+               rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
+                                                 params->modify_flags);
+               return rc;
+       } else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
+                  (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
+               /* RTS->SQD */
+               rc = qed_roce_sp_modify_requester(p_hwfn, qp, true, false,
+                                                 params->modify_flags);
+               return rc;
+       } else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
+                  (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
+               /* SQD->SQD */
+               rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
+                                                 params->modify_flags);
+               if (rc)
+                       return rc;
+
+               rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
+                                                 params->modify_flags);
+               return rc;
+       } else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
+                  (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
+               /* SQD->RTS */
+               rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
+                                                 params->modify_flags);
+               if (rc)
+                       return rc;
+
+               rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
+                                                 params->modify_flags);
+
+               return rc;
+       } else if (qp->cur_state == QED_ROCE_QP_STATE_ERR ||
+                  qp->cur_state == QED_ROCE_QP_STATE_SQE) {
+               /* ->ERR */
+               rc = qed_roce_sp_modify_responder(p_hwfn, qp, true,
+                                                 params->modify_flags);
+               if (rc)
+                       return rc;
+
+               rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, true,
+                                                 params->modify_flags);
+               return rc;
+       } else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) {
+               /* Any state -> RESET */
+               u32 cq_prod;
+
+               /* Send destroy responder ramrod */
+               rc = qed_roce_sp_destroy_qp_responder(p_hwfn,
+                                                     qp,
+                                                     &num_invalidated_mw,
+                                                     &cq_prod);
+
+               if (rc)
+                       return rc;
+
+               qp->cq_prod = cq_prod;
+
+               rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp,
+                                                     &num_bound_mw);
+
+               if (num_invalidated_mw != num_bound_mw) {
+                       DP_NOTICE(p_hwfn,
+                                 "number of invalidate memory windows is different from bounded ones\n");
+                       return -EINVAL;
+               }
+       } else {
+               DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
+       }
+
+       return rc;
+}
+
+static int qed_rdma_modify_qp(void *rdma_cxt,
+                             struct qed_rdma_qp *qp,
+                             struct qed_rdma_modify_qp_in_params *params)
+{
+       struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+       enum qed_roce_qp_state prev_state;
+       int rc = 0;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x params->new_state=%d\n",
+                  qp->icid, params->new_state);
+
+       if (rc) {
+               DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+               return rc;
+       }
+
+       if (GET_FIELD(params->modify_flags,
+                     QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)) {
+               qp->incoming_rdma_read_en = params->incoming_rdma_read_en;
+               qp->incoming_rdma_write_en = params->incoming_rdma_write_en;
+               qp->incoming_atomic_en = params->incoming_atomic_en;
+       }
+
+       /* Update QP structure with the updated values */
+       if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE))
+               qp->roce_mode = params->roce_mode;
+       if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY))
+               qp->pkey = params->pkey;
+       if (GET_FIELD(params->modify_flags,
+                     QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN))
+               qp->e2e_flow_control_en = params->e2e_flow_control_en;
+       if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_DEST_QP))
+               qp->dest_qp = params->dest_qp;
+       if (GET_FIELD(params->modify_flags,
+                     QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)) {
+               /* Indicates that the following parameters have changed:
+                * Traffic class, flow label, hop limit, source GID,
+                * destination GID, loopback indicator
+                */
+               qp->traffic_class_tos = params->traffic_class_tos;
+               qp->flow_label = params->flow_label;
+               qp->hop_limit_ttl = params->hop_limit_ttl;
+
+               qp->sgid = params->sgid;
+               qp->dgid = params->dgid;
+               qp->udp_src_port = 0;
+               qp->vlan_id = params->vlan_id;
+               qp->mtu = params->mtu;
+               qp->lb_indication = params->lb_indication;
+               memcpy((u8 *)&qp->remote_mac_addr[0],
+                      (u8 *)&params->remote_mac_addr[0], ETH_ALEN);
+               if (params->use_local_mac) {
+                       memcpy((u8 *)&qp->local_mac_addr[0],
+                              (u8 *)&params->local_mac_addr[0], ETH_ALEN);
+               } else {
+                       memcpy((u8 *)&qp->local_mac_addr[0],
+                              (u8 *)&p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
+               }
+       }
+       if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RQ_PSN))
+               qp->rq_psn = params->rq_psn;
+       if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_SQ_PSN))
+               qp->sq_psn = params->sq_psn;
+       if (GET_FIELD(params->modify_flags,
+                     QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ))
+               qp->max_rd_atomic_req = params->max_rd_atomic_req;
+       if (GET_FIELD(params->modify_flags,
+                     QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP))
+               qp->max_rd_atomic_resp = params->max_rd_atomic_resp;
+       if (GET_FIELD(params->modify_flags,
+                     QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT))
+               qp->ack_timeout = params->ack_timeout;
+       if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT))
+               qp->retry_cnt = params->retry_cnt;
+       if (GET_FIELD(params->modify_flags,
+                     QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT))
+               qp->rnr_retry_cnt = params->rnr_retry_cnt;
+       if (GET_FIELD(params->modify_flags,
+                     QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER))
+               qp->min_rnr_nak_timer = params->min_rnr_nak_timer;
+
+       qp->sqd_async = params->sqd_async;
+
+       prev_state = qp->cur_state;
+       if (GET_FIELD(params->modify_flags,
+                     QED_RDMA_MODIFY_QP_VALID_NEW_STATE)) {
+               qp->cur_state = params->new_state;
+               DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "qp->cur_state=%d\n",
+                          qp->cur_state);
+       }
+
+       rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params);
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc);
+       return rc;
+}
+
+static int
+qed_rdma_register_tid(void *rdma_cxt,
+                     struct qed_rdma_register_tid_in_params *params)
+{
+       struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+       struct rdma_register_tid_ramrod_data *p_ramrod;
+       struct qed_sp_init_data init_data;
+       struct qed_spq_entry *p_ent;
+       enum rdma_tid_type tid_type;
+       u8 fw_return_code;
+       int rc;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", params->itid);
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR,
+                                p_hwfn->p_rdma_info->proto, &init_data);
+       if (rc) {
+               DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+               return rc;
+       }
+
+       if (p_hwfn->p_rdma_info->last_tid < params->itid)
+               p_hwfn->p_rdma_info->last_tid = params->itid;
+
+       p_ramrod = &p_ent->ramrod.rdma_register_tid;
+
+       p_ramrod->flags = 0;
+       SET_FIELD(p_ramrod->flags,
+                 RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL,
+                 params->pbl_two_level);
+
+       SET_FIELD(p_ramrod->flags,
+                 RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, params->zbva);
+
+       SET_FIELD(p_ramrod->flags,
+                 RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr);
+
+       /* Don't initialize D/C field, as it may override other bits. */
+       if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr))
+               SET_FIELD(p_ramrod->flags,
+                         RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG,
+                         params->page_size_log - 12);
+
+       SET_FIELD(p_ramrod->flags,
+                 RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID,
+                 p_hwfn->p_rdma_info->last_tid);
+
+       SET_FIELD(p_ramrod->flags,
+                 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ,
+                 params->remote_read);
+
+       SET_FIELD(p_ramrod->flags,
+                 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE,
+                 params->remote_write);
+
+       SET_FIELD(p_ramrod->flags,
+                 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC,
+                 params->remote_atomic);
+
+       SET_FIELD(p_ramrod->flags,
+                 RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE,
+                 params->local_write);
+
+       SET_FIELD(p_ramrod->flags,
+                 RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ, params->local_read);
+
+       SET_FIELD(p_ramrod->flags,
+                 RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND,
+                 params->mw_bind);
+
+       SET_FIELD(p_ramrod->flags1,
+                 RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG,
+                 params->pbl_page_size_log - 12);
+
+       SET_FIELD(p_ramrod->flags2,
+                 RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR, params->dma_mr);
+
+       switch (params->tid_type) {
+       case QED_RDMA_TID_REGISTERED_MR:
+               tid_type = RDMA_TID_REGISTERED_MR;
+               break;
+       case QED_RDMA_TID_FMR:
+               tid_type = RDMA_TID_FMR;
+               break;
+       case QED_RDMA_TID_MW_TYPE1:
+               tid_type = RDMA_TID_MW_TYPE1;
+               break;
+       case QED_RDMA_TID_MW_TYPE2A:
+               tid_type = RDMA_TID_MW_TYPE2A;
+               break;
+       default:
+               rc = -EINVAL;
+               DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+               return rc;
+       }
+       SET_FIELD(p_ramrod->flags1,
+                 RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE, tid_type);
+
+       p_ramrod->itid = cpu_to_le32(params->itid);
+       p_ramrod->key = params->key;
+       p_ramrod->pd = cpu_to_le16(params->pd);
+       p_ramrod->length_hi = (u8)(params->length >> 32);
+       p_ramrod->length_lo = DMA_LO_LE(params->length);
+       if (params->zbva) {
+               /* Lower 32 bits of the registered MR address.
+                * In case of zero based MR, will hold FBO
+                */
+               p_ramrod->va.hi = 0;
+               p_ramrod->va.lo = cpu_to_le32(params->fbo);
+       } else {
+               DMA_REGPAIR_LE(p_ramrod->va, params->vaddr);
+       }
+       DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr);
+
+       /* DIF */
+       if (params->dif_enabled) {
+               SET_FIELD(p_ramrod->flags2,
+                         RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1);
+               DMA_REGPAIR_LE(p_ramrod->dif_error_addr,
+                              params->dif_error_addr);
+               DMA_REGPAIR_LE(p_ramrod->dif_runt_addr, params->dif_runt_addr);
+       }
+
+       rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
+
+       if (fw_return_code != RDMA_RETURN_OK) {
+               DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
+               return -EINVAL;
+       }
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Register TID, rc = %d\n", rc);
+       return rc;
+}
+
+static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
+{
+       struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+       struct rdma_deregister_tid_ramrod_data *p_ramrod;
+       struct qed_sp_init_data init_data;
+       struct qed_spq_entry *p_ent;
+       struct qed_ptt *p_ptt;
+       u8 fw_return_code;
+       int rc;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
+
+       /* Get SPQ entry */
+       memset(&init_data, 0, sizeof(init_data));
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+       rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_DEREGISTER_MR,
+                                p_hwfn->p_rdma_info->proto, &init_data);
+       if (rc) {
+               DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+               return rc;
+       }
+
+       p_ramrod = &p_ent->ramrod.rdma_deregister_tid;
+       p_ramrod->itid = cpu_to_le32(itid);
+
+       rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
+       if (rc) {
+               DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+               return rc;
+       }
+
+       if (fw_return_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) {
+               DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
+               return -EINVAL;
+       } else if (fw_return_code == RDMA_RETURN_NIG_DRAIN_REQ) {
+               /* Bit indicating that the TID is in use and a nig drain is
+                * required before sending the ramrod again
+                */
+               p_ptt = qed_ptt_acquire(p_hwfn);
+               if (!p_ptt) {
+                       rc = -EBUSY;
+                       DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+                                  "Failed to acquire PTT\n");
+                       return rc;
+               }
+
+               rc = qed_mcp_drain(p_hwfn, p_ptt);
+               if (rc) {
+                       qed_ptt_release(p_hwfn, p_ptt);
+                       DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+                                  "Drain failed\n");
+                       return rc;
+               }
+
+               qed_ptt_release(p_hwfn, p_ptt);
+
+               /* Resend the ramrod */
+               rc = qed_sp_init_request(p_hwfn, &p_ent,
+                                        RDMA_RAMROD_DEREGISTER_MR,
+                                        p_hwfn->p_rdma_info->proto,
+                                        &init_data);
+               if (rc) {
+                       DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+                                  "Failed to init sp-element\n");
+                       return rc;
+               }
+
+               rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
+               if (rc) {
+                       DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+                                  "Ramrod failed\n");
+                       return rc;
+               }
+
+               if (fw_return_code != RDMA_RETURN_OK) {
+                       DP_NOTICE(p_hwfn, "fw_return_code = %d\n",
+                                 fw_return_code);
+                       return rc;
+               }
+       }
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "De-registered TID, rc = %d\n", rc);
+       return rc;
+}
+
+static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid)
+{
+       struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+       u32 start_cid, cid, xcid;
+
+       /* an even icid belongs to a responder while an odd icid belongs to a
+        * requester. The 'cid' received as an input can be either. We calculate
+        * the "partner" icid and call it xcid. Only if both are free then the
+        * "cid" map can be cleared.
+        */
+       start_cid = qed_cxt_get_proto_cid_start(p_hwfn, p_rdma_info->proto);
+       cid = icid - start_cid;
+       xcid = cid ^ 1;
+
+       spin_lock_bh(&p_rdma_info->lock);
+
+       qed_bmap_release_id(p_hwfn, &p_rdma_info->real_cid_map, cid);
+       if (qed_bmap_test_id(p_hwfn, &p_rdma_info->real_cid_map, xcid) == 0) {
+               qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, cid);
+               qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, xcid);
+       }
+
+       spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
+{
+       return QED_LEADING_HWFN(cdev);
+}
+
+static void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+       u32 val;
+
+       val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1;
+
+       qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val);
+       DP_VERBOSE(p_hwfn, (QED_MSG_DCB | QED_MSG_RDMA),
+                  "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n",
+                  val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm);
+}
+
+void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+       p_hwfn->db_bar_no_edpm = true;
+
+       qed_rdma_dpm_conf(p_hwfn, p_ptt);
+}
+
+static int qed_rdma_start(void *rdma_cxt,
+                         struct qed_rdma_start_in_params *params)
+{
+       struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+       struct qed_ptt *p_ptt;
+       int rc = -EBUSY;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+                  "desired_cnq = %08x\n", params->desired_cnq);
+
+       p_ptt = qed_ptt_acquire(p_hwfn);
+       if (!p_ptt)
+               goto err;
+
+       rc = qed_rdma_alloc(p_hwfn, p_ptt, params);
+       if (rc)
+               goto err1;
+
+       rc = qed_rdma_setup(p_hwfn, p_ptt, params);
+       if (rc)
+               goto err2;
+
+       qed_ptt_release(p_hwfn, p_ptt);
+
+       return rc;
+
+err2:
+       qed_rdma_free(p_hwfn);
+err1:
+       qed_ptt_release(p_hwfn, p_ptt);
+err:
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA start - error, rc = %d\n", rc);
+       return rc;
+}
+
+static int qed_rdma_init(struct qed_dev *cdev,
+                        struct qed_rdma_start_in_params *params)
+{
+       return qed_rdma_start(QED_LEADING_HWFN(cdev), params);
+}
+
+static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
+{
+       struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "dpi = %08x\n", dpi);
+
+       spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+       qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi);
+       spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+                                    u8 connection_handle,
+                                    void *cookie,
+                                    dma_addr_t first_frag_addr,
+                                    bool b_last_fragment, bool b_last_packet)
+{
+       struct qed_roce_ll2_packet *packet = cookie;
+       struct qed_roce_ll2_info *roce_ll2 = p_hwfn->ll2;
+
+       roce_ll2->cbs.tx_cb(roce_ll2->cb_cookie, packet);
+}
+
+void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+                                   u8 connection_handle,
+                                   void *cookie,
+                                   dma_addr_t first_frag_addr,
+                                   bool b_last_fragment, bool b_last_packet)
+{
+       qed_ll2b_complete_tx_gsi_packet(p_hwfn, connection_handle,
+                                       cookie, first_frag_addr,
+                                       b_last_fragment, b_last_packet);
+}
+
+void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
+                                    u8 connection_handle,
+                                    void *cookie,
+                                    dma_addr_t rx_buf_addr,
+                                    u16 data_length,
+                                    u8 data_length_error,
+                                    u16 parse_flags,
+                                    u16 vlan,
+                                    u32 src_mac_addr_hi,
+                                    u16 src_mac_addr_lo, bool b_last_packet)
+{
+       struct qed_roce_ll2_info *roce_ll2 = p_hwfn->ll2;
+       struct qed_roce_ll2_rx_params params;
+       struct qed_dev *cdev = p_hwfn->cdev;
+       struct qed_roce_ll2_packet pkt;
+
+       DP_VERBOSE(cdev,
+                  QED_MSG_LL2,
+                  "roce ll2 rx complete: bus_addr=%p, len=%d, data_len_err=%d\n",
+                  (void *)(uintptr_t)rx_buf_addr,
+                  data_length, data_length_error);
+
+       memset(&pkt, 0, sizeof(pkt));
+       pkt.n_seg = 1;
+       pkt.payload[0].baddr = rx_buf_addr;
+       pkt.payload[0].len = data_length;
+
+       memset(&params, 0, sizeof(params));
+       params.vlan_id = vlan;
+       *((u32 *)&params.smac[0]) = ntohl(src_mac_addr_hi);
+       *((u16 *)&params.smac[4]) = ntohs(src_mac_addr_lo);
+
+       if (data_length_error) {
+               DP_ERR(cdev,
+                      "roce ll2 rx complete: data length error %d, length=%d\n",
+                      data_length_error, data_length);
+               params.rc = -EINVAL;
+       }
+
+       roce_ll2->cbs.rx_cb(roce_ll2->cb_cookie, &pkt, &params);
+}
+
+static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev,
+                                      u8 *old_mac_address,
+                                      u8 *new_mac_address)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_ptt *p_ptt;
+       int rc = 0;
+
+       if (!hwfn->ll2 || hwfn->ll2->handle == QED_LL2_UNUSED_HANDLE) {
+               DP_ERR(cdev,
+                      "qed roce mac filter failed - roce_info/ll2 NULL\n");
+               return -EINVAL;
+       }
+
+       p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+       if (!p_ptt) {
+               DP_ERR(cdev,
+                      "qed roce ll2 mac filter set: failed to acquire PTT\n");
+               return -EINVAL;
+       }
+
+       mutex_lock(&hwfn->ll2->lock);
+       if (old_mac_address)
+               qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
+                                         old_mac_address);
+       if (new_mac_address)
+               rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
+                                           new_mac_address);
+       mutex_unlock(&hwfn->ll2->lock);
+
+       qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
+
+       if (rc)
+               DP_ERR(cdev,
+                      "qed roce ll2 mac filter set: failed to add mac filter\n");
+
+       return rc;
+}
+
+static int qed_roce_ll2_start(struct qed_dev *cdev,
+                             struct qed_roce_ll2_params *params)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_roce_ll2_info *roce_ll2;
+       struct qed_ll2_conn ll2_params;
+       int rc;
+
+       if (!params) {
+               DP_ERR(cdev, "qed roce ll2 start: failed due to NULL params\n");
+               return -EINVAL;
+       }
+       if (!params->cbs.tx_cb || !params->cbs.rx_cb) {
+               DP_ERR(cdev,
+                      "qed roce ll2 start: failed due to NULL tx/rx. tx_cb=%p, rx_cb=%p\n",
+                      params->cbs.tx_cb, params->cbs.rx_cb);
+               return -EINVAL;
+       }
+       if (!is_valid_ether_addr(params->mac_address)) {
+               DP_ERR(cdev,
+                      "qed roce ll2 start: failed due to invalid Ethernet address %pM\n",
+                      params->mac_address);
+               return -EINVAL;
+       }
+
+       /* Initialize */
+       roce_ll2 = kzalloc(sizeof(*roce_ll2), GFP_ATOMIC);
+       if (!roce_ll2) {
+               DP_ERR(cdev, "qed roce ll2 start: failed memory allocation\n");
+               return -ENOMEM;
+       }
+       roce_ll2->handle = QED_LL2_UNUSED_HANDLE;
+       roce_ll2->cbs = params->cbs;
+       roce_ll2->cb_cookie = params->cb_cookie;
+       mutex_init(&roce_ll2->lock);
+
+       memset(&ll2_params, 0, sizeof(ll2_params));
+       ll2_params.conn_type = QED_LL2_TYPE_ROCE;
+       ll2_params.mtu = params->mtu;
+       ll2_params.rx_drop_ttl0_flg = true;
+       ll2_params.rx_vlan_removal_en = false;
+       ll2_params.tx_dest = CORE_TX_DEST_NW;
+       ll2_params.ai_err_packet_too_big = LL2_DROP_PACKET;
+       ll2_params.ai_err_no_buf = LL2_DROP_PACKET;
+       ll2_params.gsi_enable = true;
+
+       rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_params,
+                                       params->max_rx_buffers,
+                                       params->max_tx_buffers,
+                                       &roce_ll2->handle);
+       if (rc) {
+               DP_ERR(cdev,
+                      "qed roce ll2 start: failed to acquire LL2 connection (rc=%d)\n",
+                      rc);
+               goto err;
+       }
+
+       rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
+                                         roce_ll2->handle);
+       if (rc) {
+               DP_ERR(cdev,
+                      "qed roce ll2 start: failed to establish LL2 connection (rc=%d)\n",
+                      rc);
+               goto err1;
+       }
+
+       hwfn->ll2 = roce_ll2;
+
+       rc = qed_roce_ll2_set_mac_filter(cdev, NULL, params->mac_address);
+       if (rc) {
+               hwfn->ll2 = NULL;
+               goto err2;
+       }
+       ether_addr_copy(roce_ll2->mac_address, params->mac_address);
+
+       return 0;
+
+err2:
+       qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
+err1:
+       qed_ll2_release_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
+err:
+       kfree(roce_ll2);
+       return rc;
+}
+
+static int qed_roce_ll2_stop(struct qed_dev *cdev)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
+       int rc;
+
+       if (roce_ll2->handle == QED_LL2_UNUSED_HANDLE) {
+               DP_ERR(cdev, "qed roce ll2 stop: cannot stop an unused LL2\n");
+               return -EINVAL;
+       }
+
+       /* remove LL2 MAC address filter */
+       rc = qed_roce_ll2_set_mac_filter(cdev, roce_ll2->mac_address, NULL);
+       eth_zero_addr(roce_ll2->mac_address);
+
+       rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
+                                         roce_ll2->handle);
+       if (rc)
+               DP_ERR(cdev,
+                      "qed roce ll2 stop: failed to terminate LL2 connection (rc=%d)\n",
+                      rc);
+
+       qed_ll2_release_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
+
+       roce_ll2->handle = QED_LL2_UNUSED_HANDLE;
+
+       kfree(roce_ll2);
+
+       return rc;
+}
+
+static int qed_roce_ll2_tx(struct qed_dev *cdev,
+                          struct qed_roce_ll2_packet *pkt,
+                          struct qed_roce_ll2_tx_params *params)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
+       enum qed_ll2_roce_flavor_type qed_roce_flavor;
+       u8 flags = 0;
+       int rc;
+       int i;
+
+       if (!pkt || !params) {
+               DP_ERR(cdev,
+                      "roce ll2 tx: failed tx because one of the following is NULL - drv=%p, pkt=%p, params=%p\n",
+                      cdev, pkt, params);
+               return -EINVAL;
+       }
+
+       qed_roce_flavor = (pkt->roce_mode == ROCE_V1) ? QED_LL2_ROCE
+                                                     : QED_LL2_RROCE;
+
+       if (pkt->roce_mode == ROCE_V2_IPV4)
+               flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
+
+       /* Tx header */
+       rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), roce_ll2->handle,
+                                      1 + pkt->n_seg, 0, flags, 0,
+                                      QED_LL2_TX_DEST_NW,
+                                      qed_roce_flavor, pkt->header.baddr,
+                                      pkt->header.len, pkt, 1);
+       if (rc) {
+               DP_ERR(cdev, "roce ll2 tx: header failed (rc=%d)\n", rc);
+               return QED_ROCE_TX_HEAD_FAILURE;
+       }
+
+       /* Tx payload */
+       for (i = 0; i < pkt->n_seg; i++) {
+               rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
+                                                      roce_ll2->handle,
+                                                      pkt->payload[i].baddr,
+                                                      pkt->payload[i].len);
+               if (rc) {
+                       /* If failed not much to do here, partial packet has
+                        * been posted * we can't free memory, will need to wait
+                        * for completion
+                        */
+                       DP_ERR(cdev,
+                              "roce ll2 tx: payload failed (rc=%d)\n", rc);
+                       return QED_ROCE_TX_FRAG_FAILURE;
+               }
+       }
+
+       return 0;
+}
+
+static int qed_roce_ll2_post_rx_buffer(struct qed_dev *cdev,
+                                      struct qed_roce_ll2_buffer *buf,
+                                      u64 cookie, u8 notify_fw)
+{
+       return qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
+                                     QED_LEADING_HWFN(cdev)->ll2->handle,
+                                     buf->baddr, buf->len,
+                                     (void *)(uintptr_t)cookie, notify_fw);
+}
+
+static int qed_roce_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
+{
+       struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
+
+       return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
+                                roce_ll2->handle, stats);
+}
+
+static const struct qed_rdma_ops qed_rdma_ops_pass = {
+       .common = &qed_common_ops_pass,
+       .fill_dev_info = &qed_fill_rdma_dev_info,
+       .rdma_get_rdma_ctx = &qed_rdma_get_rdma_ctx,
+       .rdma_init = &qed_rdma_init,
+       .rdma_add_user = &qed_rdma_add_user,
+       .rdma_remove_user = &qed_rdma_remove_user,
+       .rdma_stop = &qed_rdma_stop,
+       .rdma_query_port = &qed_rdma_query_port,
+       .rdma_query_device = &qed_rdma_query_device,
+       .rdma_get_start_sb = &qed_rdma_get_sb_start,
+       .rdma_get_rdma_int = &qed_rdma_get_int,
+       .rdma_set_rdma_int = &qed_rdma_set_int,
+       .rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix,
+       .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update,
+       .rdma_alloc_pd = &qed_rdma_alloc_pd,
+       .rdma_dealloc_pd = &qed_rdma_free_pd,
+       .rdma_create_cq = &qed_rdma_create_cq,
+       .rdma_destroy_cq = &qed_rdma_destroy_cq,
+       .rdma_create_qp = &qed_rdma_create_qp,
+       .rdma_modify_qp = &qed_rdma_modify_qp,
+       .rdma_query_qp = &qed_rdma_query_qp,
+       .rdma_destroy_qp = &qed_rdma_destroy_qp,
+       .rdma_alloc_tid = &qed_rdma_alloc_tid,
+       .rdma_free_tid = &qed_rdma_free_tid,
+       .rdma_register_tid = &qed_rdma_register_tid,
+       .rdma_deregister_tid = &qed_rdma_deregister_tid,
+       .roce_ll2_start = &qed_roce_ll2_start,
+       .roce_ll2_stop = &qed_roce_ll2_stop,
+       .roce_ll2_tx = &qed_roce_ll2_tx,
+       .roce_ll2_post_rx_buffer = &qed_roce_ll2_post_rx_buffer,
+       .roce_ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter,
+       .roce_ll2_stats = &qed_roce_ll2_stats,
+};
+
+const struct qed_rdma_ops *qed_get_rdma_ops(void)
+{
+       return &qed_rdma_ops_pass;
+}
+EXPORT_SYMBOL(qed_get_rdma_ops);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h
new file mode 100644 (file)
index 0000000..3ccc08a
--- /dev/null
@@ -0,0 +1,220 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _QED_ROCE_H
+#define _QED_ROCE_H
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_roce_if.h>
+#include "qed.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_ll2.h"
+
+#define QED_RDMA_MAX_FMR                    (RDMA_MAX_TIDS)
+#define QED_RDMA_MAX_P_KEY                  (1)
+#define QED_RDMA_MAX_WQE                    (0x7FFF)
+#define QED_RDMA_MAX_SRQ_WQE_ELEM           (0x7FFF)
+#define QED_RDMA_PAGE_SIZE_CAPS             (0xFFFFF000)
+#define QED_RDMA_ACK_DELAY                  (15)
+#define QED_RDMA_MAX_MR_SIZE                (0x10000000000ULL)
+#define QED_RDMA_MAX_CQS                    (RDMA_MAX_CQS)
+#define QED_RDMA_MAX_MRS                    (RDMA_MAX_TIDS)
+/* Add 1 for header element */
+#define QED_RDMA_MAX_SRQ_ELEM_PER_WQE      (RDMA_MAX_SGE_PER_RQ_WQE + 1)
+#define QED_RDMA_MAX_SGE_PER_SRQ_WQE        (RDMA_MAX_SGE_PER_RQ_WQE)
+#define QED_RDMA_SRQ_WQE_ELEM_SIZE          (16)
+#define QED_RDMA_MAX_SRQS                   (32 * 1024)
+
+#define QED_RDMA_MAX_CQE_32_BIT             (0x7FFFFFFF - 1)
+#define QED_RDMA_MAX_CQE_16_BIT             (0x7FFF - 1)
+
+enum qed_rdma_toggle_bit {
+       QED_RDMA_TOGGLE_BIT_CLEAR = 0,
+       QED_RDMA_TOGGLE_BIT_SET = 1
+};
+
+struct qed_bmap {
+       unsigned long *bitmap;
+       u32 max_count;
+};
+
+struct qed_rdma_info {
+       /* spin lock to protect bitmaps */
+       spinlock_t lock;
+
+       struct qed_bmap cq_map;
+       struct qed_bmap pd_map;
+       struct qed_bmap tid_map;
+       struct qed_bmap qp_map;
+       struct qed_bmap srq_map;
+       struct qed_bmap cid_map;
+       struct qed_bmap real_cid_map;
+       struct qed_bmap dpi_map;
+       struct qed_bmap toggle_bits;
+       struct qed_rdma_events events;
+       struct qed_rdma_device *dev;
+       struct qed_rdma_port *port;
+       u32 last_tid;
+       u8 num_cnqs;
+       u32 num_qps;
+       u32 num_mrs;
+       u16 queue_zone_base;
+       u16 max_queue_zones;
+       enum protocol_type proto;
+};
+
+struct qed_rdma_qp {
+       struct regpair qp_handle;
+       struct regpair qp_handle_async;
+       u32 qpid;
+       u16 icid;
+       enum qed_roce_qp_state cur_state;
+       bool use_srq;
+       bool signal_all;
+       bool fmr_and_reserved_lkey;
+
+       bool incoming_rdma_read_en;
+       bool incoming_rdma_write_en;
+       bool incoming_atomic_en;
+       bool e2e_flow_control_en;
+
+       u16 pd;
+       u16 pkey;
+       u32 dest_qp;
+       u16 mtu;
+       u16 srq_id;
+       u8 traffic_class_tos;
+       u8 hop_limit_ttl;
+       u16 dpi;
+       u32 flow_label;
+       bool lb_indication;
+       u16 vlan_id;
+       u32 ack_timeout;
+       u8 retry_cnt;
+       u8 rnr_retry_cnt;
+       u8 min_rnr_nak_timer;
+       bool sqd_async;
+       union qed_gid sgid;
+       union qed_gid dgid;
+       enum roce_mode roce_mode;
+       u16 udp_src_port;
+       u8 stats_queue;
+
+       /* requeseter */
+       u8 max_rd_atomic_req;
+       u32 sq_psn;
+       u16 sq_cq_id;
+       u16 sq_num_pages;
+       dma_addr_t sq_pbl_ptr;
+       void *orq;
+       dma_addr_t orq_phys_addr;
+       u8 orq_num_pages;
+       bool req_offloaded;
+
+       /* responder */
+       u8 max_rd_atomic_resp;
+       u32 rq_psn;
+       u16 rq_cq_id;
+       u16 rq_num_pages;
+       dma_addr_t rq_pbl_ptr;
+       void *irq;
+       dma_addr_t irq_phys_addr;
+       u8 irq_num_pages;
+       bool resp_offloaded;
+       u32 cq_prod;
+
+       u8 remote_mac_addr[6];
+       u8 local_mac_addr[6];
+
+       void *shared_queue;
+       dma_addr_t shared_queue_phys_addr;
+};
+
+#if IS_ENABLED(CONFIG_QED_RDMA)
+void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+void qed_roce_async_event(struct qed_hwfn *p_hwfn,
+                         u8 fw_event_code, union rdma_eqe_data *rdma_data);
+void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+                                    u8 connection_handle,
+                                    void *cookie,
+                                    dma_addr_t first_frag_addr,
+                                    bool b_last_fragment, bool b_last_packet);
+void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+                                   u8 connection_handle,
+                                   void *cookie,
+                                   dma_addr_t first_frag_addr,
+                                   bool b_last_fragment, bool b_last_packet);
+void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
+                                    u8 connection_handle,
+                                    void *cookie,
+                                    dma_addr_t rx_buf_addr,
+                                    u16 data_length,
+                                    u8 data_length_error,
+                                    u16 parse_flags,
+                                    u16 vlan,
+                                    u32 src_mac_addr_hi,
+                                    u16 src_mac_addr_lo, bool b_last_packet);
+#else
+static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
+static inline void qed_roce_async_event(struct qed_hwfn *p_hwfn,
+                                       u8 fw_event_code,
+                                       union rdma_eqe_data *rdma_data) {}
+static inline void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+                                                  u8 connection_handle,
+                                                  void *cookie,
+                                                  dma_addr_t first_frag_addr,
+                                                  bool b_last_fragment,
+                                                  bool b_last_packet) {}
+static inline void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+                                                 u8 connection_handle,
+                                                 void *cookie,
+                                                 dma_addr_t first_frag_addr,
+                                                 bool b_last_fragment,
+                                                 bool b_last_packet) {}
+static inline void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
+                                                  u8 connection_handle,
+                                                  void *cookie,
+                                                  dma_addr_t rx_buf_addr,
+                                                  u16 data_length,
+                                                  u8 data_length_error,
+                                                  u16 parse_flags,
+                                                  u16 vlan,
+                                                  u32 src_mac_addr_hi,
+                                                  u16 src_mac_addr_lo,
+                                                  bool b_last_packet) {}
+#endif
+#endif
index a342bfe4280d8e176fa74dd5f12a71330114c246..1bafc05db2b89fd6ccf58cafb6c394b6c6b00d11 100644 (file)
@@ -1,7 +1,41 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2016  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/crc32.h>
 #include "qed.h"
 #include "qed_dev_api.h"
 #include "qed_mcp.h"
 #include "qed_sp.h"
+#include "qed_selftest.h"
 
 int qed_selftest_memory(struct qed_dev *cdev)
 {
@@ -74,3 +108,103 @@ int qed_selftest_clock(struct qed_dev *cdev)
 
        return rc;
 }
+
+int qed_selftest_nvram(struct qed_dev *cdev)
+{
+       struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+       u32 num_images, i, j, nvm_crc, calc_crc;
+       struct bist_nvm_image_att image_att;
+       u8 *buf = NULL;
+       __be32 val;
+       int rc;
+
+       if (!p_ptt) {
+               DP_ERR(p_hwfn, "failed to acquire ptt\n");
+               return -EBUSY;
+       }
+
+       /* Acquire from MFW the amount of available images */
+       rc = qed_mcp_bist_nvm_test_get_num_images(p_hwfn, p_ptt, &num_images);
+       if (rc || !num_images) {
+               DP_ERR(p_hwfn, "Failed getting number of images\n");
+               return -EINVAL;
+       }
+
+       /* Iterate over images and validate CRC */
+       for (i = 0; i < num_images; i++) {
+               /* This mailbox returns information about the image required for
+                * reading it.
+                */
+               rc = qed_mcp_bist_nvm_test_get_image_att(p_hwfn, p_ptt,
+                                                        &image_att, i);
+               if (rc) {
+                       DP_ERR(p_hwfn,
+                              "Failed getting image index %d attributes\n",
+                              i);
+                       goto err0;
+               }
+
+               /* After MFW crash dump is collected - the image's CRC stops
+                * being valid.
+                */
+               if (image_att.image_type == NVM_TYPE_MDUMP)
+                       continue;
+
+               DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n",
+                          i, image_att.len);
+
+               /* Allocate a buffer for holding the nvram image */
+               buf = kzalloc(image_att.len, GFP_KERNEL);
+               if (!buf) {
+                       rc = -ENOMEM;
+                       goto err0;
+               }
+
+               /* Read image into buffer */
+               rc = qed_mcp_nvm_read(p_hwfn->cdev, image_att.nvm_start_addr,
+                                     buf, image_att.len);
+               if (rc) {
+                       DP_ERR(p_hwfn,
+                              "Failed reading image index %d from nvm.\n", i);
+                       goto err1;
+               }
+
+               /* Convert the buffer into big-endian format (excluding the
+                * closing 4 bytes of CRC).
+                */
+               for (j = 0; j < image_att.len - 4; j += 4) {
+                       val = cpu_to_be32(*(u32 *)&buf[j]);
+                       *(u32 *)&buf[j] = (__force u32)val;
+               }
+
+               /* Calc CRC for the "actual" image buffer, i.e. not including
+                * the last 4 CRC bytes.
+                */
+               nvm_crc = *(u32 *)(buf + image_att.len - 4);
+               calc_crc = crc32(0xffffffff, buf, image_att.len - 4);
+               calc_crc = (__force u32)~cpu_to_be32(calc_crc);
+               DP_VERBOSE(p_hwfn, QED_MSG_SP,
+                          "nvm crc 0x%x, calc_crc 0x%x\n", nvm_crc, calc_crc);
+
+               if (calc_crc != nvm_crc) {
+                       rc = -EINVAL;
+                       goto err1;
+               }
+
+               /* Done with this image; Free to prevent double release
+                * on subsequent failure.
+                */
+               kfree(buf);
+               buf = NULL;
+       }
+
+       qed_ptt_release(p_hwfn, p_ptt);
+       return 0;
+
+err1:
+       kfree(buf);
+err0:
+       qed_ptt_release(p_hwfn, p_ptt);
+       return rc;
+}
index 50eb0b49950f69471a388b6568197653d1d73a40..739ddb73096794e5ede557723aefe19f72646491 100644 (file)
@@ -37,4 +37,14 @@ int qed_selftest_register(struct qed_dev *cdev);
  * @return int
  */
 int qed_selftest_clock(struct qed_dev *cdev);
+
+/**
+ * @brief qed_selftest_nvram - Perform nvram test
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_selftest_nvram(struct qed_dev *cdev);
+
 #endif
index a548504c3420d86bcc2d06945b6b9f3f90d6578b..583c8d38c8d79f0a82dfdc57aae7bebfd0162d16 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef _QED_SP_H
@@ -60,7 +84,12 @@ union ramrod_data {
        struct tx_queue_stop_ramrod_data tx_queue_stop;
        struct vport_start_ramrod_data vport_start;
        struct vport_stop_ramrod_data vport_stop;
+       struct rx_update_gft_filter_data rx_update_gft;
        struct vport_update_ramrod_data vport_update;
+       struct core_rx_start_ramrod_data core_rx_queue_start;
+       struct core_rx_stop_ramrod_data core_rx_queue_stop;
+       struct core_tx_start_ramrod_data core_tx_queue_start;
+       struct core_tx_stop_ramrod_data core_tx_queue_stop;
        struct vport_filter_update_ramrod_data vport_filter_update;
 
        struct rdma_init_func_ramrod_data rdma_init_func;
@@ -76,11 +105,15 @@ union ramrod_data {
        struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp;
        struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req;
        struct rdma_create_cq_ramrod_data rdma_create_cq;
-       struct rdma_resize_cq_ramrod_data rdma_resize_cq;
        struct rdma_destroy_cq_ramrod_data rdma_destroy_cq;
        struct rdma_srq_create_ramrod_data rdma_create_srq;
        struct rdma_srq_destroy_ramrod_data rdma_destroy_srq;
        struct rdma_srq_modify_ramrod_data rdma_modify_srq;
+       struct roce_init_func_ramrod_data roce_init_func;
+       struct fcoe_init_ramrod_params fcoe_init;
+       struct fcoe_conn_offload_ramrod_params fcoe_conn_ofld;
+       struct fcoe_conn_terminate_ramrod_params fcoe_conn_terminate;
+       struct fcoe_stat_ramrod_params fcoe_stat;
 
        struct iscsi_slow_path_hdr iscsi_empty;
        struct iscsi_init_ramrod_params iscsi_init;
@@ -106,8 +139,8 @@ union qed_spq_req_comp {
 };
 
 struct qed_spq_comp_done {
-       u64     done;
-       u8      fw_return_code;
+       unsigned int    done;
+       u8              fw_return_code;
 };
 
 struct qed_spq_entry {
index a52f3fc051f510a9e8c3a1498041b3f4fb5241ac..7e25b85561ac9669210cf2ab470e6c849bcd5ed8 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #include <linux/types.h>
@@ -25,9 +49,7 @@
 
 int qed_sp_init_request(struct qed_hwfn *p_hwfn,
                        struct qed_spq_entry **pp_ent,
-                       u8 cmd,
-                       u8 protocol,
-                       struct qed_sp_init_data *p_data)
+                       u8 cmd, u8 protocol, struct qed_sp_init_data *p_data)
 {
        u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
        struct qed_spq_entry *p_ent = NULL;
@@ -38,7 +60,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
 
        rc = qed_spq_get_entry(p_hwfn, pp_ent);
 
-       if (rc != 0)
+       if (rc)
                return rc;
 
        p_ent = *pp_ent;
@@ -321,8 +343,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
 
        rc = qed_sp_init_request(p_hwfn, &p_ent,
                                 COMMON_RAMROD_PF_START,
-                                PROTOCOLID_COMMON,
-                                &init_data);
+                                PROTOCOLID_COMMON, &init_data);
        if (rc)
                return rc;
 
@@ -350,14 +371,13 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
 
        /* Place EQ address in RAMROD */
        DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
-                      p_hwfn->p_eq->chain.pbl.p_phys_table);
+                      p_hwfn->p_eq->chain.pbl_sp.p_phys_table);
        page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain);
        p_ramrod->event_ring_num_pages = page_cnt;
        DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
-                      p_hwfn->p_consq->chain.pbl.p_phys_table);
+                      p_hwfn->p_consq->chain.pbl_sp.p_phys_table);
 
-       qed_tunn_set_pf_start_params(p_hwfn, p_tunn,
-                                    &p_ramrod->tunnel_config);
+       qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config);
 
        if (IS_MF_SI(p_hwfn))
                p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
@@ -366,6 +386,9 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
        case QED_PCI_ETH:
                p_ramrod->personality = PERSONALITY_ETH;
                break;
+       case QED_PCI_FCOE:
+               p_ramrod->personality = PERSONALITY_FCOE;
+               break;
        case QED_PCI_ISCSI:
                p_ramrod->personality = PERSONALITY_ISCSI;
                break;
@@ -389,8 +412,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
 
        DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
                   "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
-                  sb, sb_index,
-                  p_ramrod->outer_tag);
+                  sb, sb_index, p_ramrod->outer_tag);
 
        rc = qed_spq_post(p_hwfn, p_ent, NULL);
 
index d73456eab1d7e660a5a6ce404c3ceb00465fa62f..f6423a139ca074a60909db7d369c42eb34394c27 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #include <linux/types.h>
 #include "qed_hsi.h"
 #include "qed_hw.h"
 #include "qed_int.h"
+#include "qed_iscsi.h"
 #include "qed_mcp.h"
+#include "qed_ooo.h"
 #include "qed_reg_addr.h"
 #include "qed_sp.h"
 #include "qed_sriov.h"
+#include "qed_roce.h"
 
 /***************************************************************************
 * Structures & Definitions
 ***************************************************************************/
 
 #define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
-#define SPQ_BLOCK_SLEEP_LENGTH          (1000)
+
+#define SPQ_BLOCK_DELAY_MAX_ITER        (10)
+#define SPQ_BLOCK_DELAY_US              (10)
+#define SPQ_BLOCK_SLEEP_MAX_ITER        (1000)
+#define SPQ_BLOCK_SLEEP_MS              (5)
 
 /***************************************************************************
 * Blocking Imp. (BLOCK/EBLOCK mode)
 ***************************************************************************/
 static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
                                void *cookie,
-                               union event_ring_data *data,
-                               u8 fw_return_code)
+                               union event_ring_data *data, u8 fw_return_code)
 {
        struct qed_spq_comp_done *comp_done;
 
        comp_done = (struct qed_spq_comp_done *)cookie;
 
-       comp_done->done                 = 0x1;
-       comp_done->fw_return_code       = fw_return_code;
+       comp_done->fw_return_code = fw_return_code;
 
-       /* make update visible to waiting thread */
-       smp_wmb();
+       /* Make sure completion done is visible on waiting thread */
+       smp_store_release(&comp_done->done, 0x1);
 }
 
-static int qed_spq_block(struct qed_hwfn *p_hwfn,
-                        struct qed_spq_entry *p_ent,
-                        u8 *p_fw_ret)
+static int __qed_spq_block(struct qed_hwfn *p_hwfn,
+                          struct qed_spq_entry *p_ent,
+                          u8 *p_fw_ret, bool sleep_between_iter)
 {
-       int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
        struct qed_spq_comp_done *comp_done;
-       int rc;
+       u32 iter_cnt;
 
        comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
-       while (sleep_count) {
-               /* validate we receive completion update */
-               smp_rmb();
-               if (comp_done->done == 1) {
+       iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
+                                     : SPQ_BLOCK_DELAY_MAX_ITER;
+
+       while (iter_cnt--) {
+               /* Validate we receive completion update */
+               if (READ_ONCE(comp_done->done) == 1) {
+                       /* Read updated FW return value */
+                       smp_read_barrier_depends();
                        if (p_fw_ret)
                                *p_fw_ret = comp_done->fw_return_code;
                        return 0;
                }
-               usleep_range(5000, 10000);
-               sleep_count--;
+
+               if (sleep_between_iter)
+                       msleep(SPQ_BLOCK_SLEEP_MS);
+               else
+                       udelay(SPQ_BLOCK_DELAY_US);
+       }
+
+       return -EBUSY;
+}
+
+static int qed_spq_block(struct qed_hwfn *p_hwfn,
+                        struct qed_spq_entry *p_ent,
+                        u8 *p_fw_ret, bool skip_quick_poll)
+{
+       struct qed_spq_comp_done *comp_done;
+       struct qed_ptt *p_ptt;
+       int rc;
+
+       /* A relatively short polling period w/o sleeping, to allow the FW to
+        * complete the ramrod and thus possibly to avoid the following sleeps.
+        */
+       if (!skip_quick_poll) {
+               rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false);
+               if (!rc)
+                       return 0;
+       }
+
+       /* Move to polling with a sleeping period between iterations */
+       rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
+       if (!rc)
+               return 0;
+
+       p_ptt = qed_ptt_acquire(p_hwfn);
+       if (!p_ptt) {
+               DP_NOTICE(p_hwfn, "ptt, failed to acquire\n");
+               return -EAGAIN;
        }
 
        DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
-       rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
-       if (rc != 0)
+       rc = qed_mcp_drain(p_hwfn, p_ptt);
+       if (rc) {
                DP_NOTICE(p_hwfn, "MCP drain failed\n");
+               goto err;
+       }
 
        /* Retry after drain */
-       sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
-       while (sleep_count) {
-               /* validate we receive completion update */
-               smp_rmb();
-               if (comp_done->done == 1) {
-                       if (p_fw_ret)
-                               *p_fw_ret = comp_done->fw_return_code;
-                       return 0;
-               }
-               usleep_range(5000, 10000);
-               sleep_count--;
-       }
+       rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
+       if (!rc)
+               goto out;
 
-       if (comp_done->done == 1) {
+       comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
+       if (comp_done->done == 1)
                if (p_fw_ret)
                        *p_fw_ret = comp_done->fw_return_code;
-               return 0;
-       }
+out:
+       qed_ptt_release(p_hwfn, p_ptt);
+       return 0;
 
-       DP_NOTICE(p_hwfn, "Ramrod is stuck, MCP drain failed\n");
+err:
+       qed_ptt_release(p_hwfn, p_ptt);
+       DP_NOTICE(p_hwfn,
+                 "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
+                 le32_to_cpu(p_ent->elem.hdr.cid),
+                 p_ent->elem.hdr.cmd_id,
+                 p_ent->elem.hdr.protocol_id,
+                 le16_to_cpu(p_ent->elem.hdr.echo));
 
        return -EBUSY;
 }
@@ -109,9 +177,8 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
 /***************************************************************************
 * SPQ entries inner API
 ***************************************************************************/
-static int
-qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
-                  struct qed_spq_entry *p_ent)
+static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
+                             struct qed_spq_entry *p_ent)
 {
        p_ent->flags = 0;
 
@@ -148,11 +215,10 @@ qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
 static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
                                  struct qed_spq *p_spq)
 {
-       u16                             pq;
-       struct qed_cxt_info             cxt_info;
-       struct core_conn_context        *p_cxt;
-       union qed_qm_pq_params          pq_params;
-       int                             rc;
+       struct core_conn_context *p_cxt;
+       struct qed_cxt_info cxt_info;
+       u16 physical_q;
+       int rc;
 
        cxt_info.iid = p_spq->cid;
 
@@ -174,10 +240,8 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
                  XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
 
        /* QM physical queue */
-       memset(&pq_params, 0, sizeof(pq_params));
-       pq_params.core.tc = LB_TC;
-       pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
-       p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(pq);
+       physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
+       p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q);
 
        p_cxt->xstorm_st_context.spq_base_lo =
                DMA_LO_LE(p_spq->chain.p_phys_addr);
@@ -189,8 +253,7 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
 }
 
 static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
-                          struct qed_spq *p_spq,
-                          struct qed_spq_entry *p_ent)
+                          struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
 {
        struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
        u16 echo = qed_chain_get_prod_idx(p_chain);
@@ -240,10 +303,30 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
                           struct event_ring_entry *p_eqe)
 {
        switch (p_eqe->protocol_id) {
+#if IS_ENABLED(CONFIG_QED_RDMA)
+       case PROTOCOLID_ROCE:
+               qed_roce_async_event(p_hwfn, p_eqe->opcode,
+                                    &p_eqe->data.rdma_data);
+               return 0;
+#endif
        case PROTOCOLID_COMMON:
                return qed_sriov_eqe_event(p_hwfn,
                                           p_eqe->opcode,
                                           p_eqe->echo, &p_eqe->data);
+       case PROTOCOLID_ISCSI:
+               if (!IS_ENABLED(CONFIG_QED_ISCSI))
+                       return -EINVAL;
+
+               if (p_hwfn->p_iscsi_info->event_cb) {
+                       struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
+
+                       return p_iscsi->event_cb(p_iscsi->event_context,
+                                                p_eqe->opcode, &p_eqe->data);
+               } else {
+                       DP_NOTICE(p_hwfn,
+                                 "iSCSI async completion is not set\n");
+                       return -EINVAL;
+               }
        default:
                DP_NOTICE(p_hwfn,
                          "Unknown Async completion for protocol: %d\n",
@@ -255,8 +338,7 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
 /***************************************************************************
 * EQ API
 ***************************************************************************/
-void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
-                       u16 prod)
+void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
 {
        u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
                   USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
@@ -267,9 +349,7 @@ void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
        mmiowb();
 }
 
-int qed_eq_completion(struct qed_hwfn *p_hwfn,
-                     void *cookie)
-
+int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
 {
        struct qed_eq *p_eq = cookie;
        struct qed_chain *p_chain = &p_eq->chain;
@@ -323,17 +403,14 @@ int qed_eq_completion(struct qed_hwfn *p_hwfn,
        return rc;
 }
 
-struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
-                           u16 num_elem)
+struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
 {
        struct qed_eq *p_eq;
 
        /* Allocate EQ struct */
        p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
-       if (!p_eq) {
-               DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_eq'\n");
+       if (!p_eq)
                return NULL;
-       }
 
        /* Allocate and initialize EQ chain*/
        if (qed_chain_alloc(p_hwfn->cdev,
@@ -342,17 +419,12 @@ struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
                            QED_CHAIN_CNT_TYPE_U16,
                            num_elem,
                            sizeof(union event_ring_element),
-                           &p_eq->chain)) {
-               DP_NOTICE(p_hwfn, "Failed to allocate eq chain\n");
+                           &p_eq->chain))
                goto eq_allocate_fail;
-       }
 
        /* register EQ completion on the SP SB */
-       qed_int_register_cb(p_hwfn,
-                           qed_eq_completion,
-                           p_eq,
-                           &p_eq->eq_sb_index,
-                           &p_eq->p_fw_cons);
+       qed_int_register_cb(p_hwfn, qed_eq_completion,
+                           p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
 
        return p_eq;
 
@@ -361,14 +433,12 @@ eq_allocate_fail:
        return NULL;
 }
 
-void qed_eq_setup(struct qed_hwfn *p_hwfn,
-                 struct qed_eq *p_eq)
+void qed_eq_setup(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
 {
        qed_chain_reset(&p_eq->chain);
 }
 
-void qed_eq_free(struct qed_hwfn *p_hwfn,
-                struct qed_eq *p_eq)
+void qed_eq_free(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
 {
        if (!p_eq)
                return;
@@ -379,10 +449,9 @@ void qed_eq_free(struct qed_hwfn *p_hwfn,
 /***************************************************************************
 * CQE API - manipulate EQ functionality
 ***************************************************************************/
-static int qed_cqe_completion(
-       struct qed_hwfn *p_hwfn,
-       struct eth_slow_path_rx_cqe *cqe,
-       enum protocol_type protocol)
+static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
+                             struct eth_slow_path_rx_cqe *cqe,
+                             enum protocol_type protocol)
 {
        if (IS_VF(p_hwfn->cdev))
                return 0;
@@ -463,12 +532,9 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn)
        u32 capacity;
 
        /* SPQ struct */
-       p_spq =
-               kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
-       if (!p_spq) {
-               DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_spq'\n");
+       p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
+       if (!p_spq)
                return -ENOMEM;
-       }
 
        /* SPQ ring  */
        if (qed_chain_alloc(p_hwfn->cdev,
@@ -477,18 +543,14 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn)
                            QED_CHAIN_CNT_TYPE_U16,
                            0,   /* N/A when the mode is SINGLE */
                            sizeof(struct slow_path_element),
-                           &p_spq->chain)) {
-               DP_NOTICE(p_hwfn, "Failed to allocate spq chain\n");
+                           &p_spq->chain))
                goto spq_allocate_fail;
-       }
 
        /* allocate and fill the SPQ elements (incl. ramrod data list) */
        capacity = qed_chain_get_capacity(&p_spq->chain);
        p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
-                                   capacity *
-                                   sizeof(struct qed_spq_entry),
+                                   capacity * sizeof(struct qed_spq_entry),
                                    &p_phys, GFP_KERNEL);
-
        if (!p_virt)
                goto spq_allocate_fail;
 
@@ -525,9 +587,7 @@ void qed_spq_free(struct qed_hwfn *p_hwfn)
        kfree(p_spq);
 }
 
-int
-qed_spq_get_entry(struct qed_hwfn *p_hwfn,
-                 struct qed_spq_entry **pp_ent)
+int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
 {
        struct qed_spq *p_spq = p_hwfn->p_spq;
        struct qed_spq_entry *p_ent = NULL;
@@ -538,14 +598,15 @@ qed_spq_get_entry(struct qed_hwfn *p_hwfn,
        if (list_empty(&p_spq->free_pool)) {
                p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
                if (!p_ent) {
+                       DP_NOTICE(p_hwfn,
+                                 "Failed to allocate an SPQ entry for a pending ramrod\n");
                        rc = -ENOMEM;
                        goto out_unlock;
                }
                p_ent->queue = &p_spq->unlimited_pending;
        } else {
                p_ent = list_first_entry(&p_spq->free_pool,
-                                        struct qed_spq_entry,
-                                        list);
+                                        struct qed_spq_entry, list);
                list_del(&p_ent->list);
                p_ent->queue = &p_spq->pending;
        }
@@ -564,8 +625,7 @@ static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
        list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
 }
 
-void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
-                         struct qed_spq_entry *p_ent)
+void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
 {
        spin_lock_bh(&p_hwfn->p_spq->lock);
        __qed_spq_return_entry(p_hwfn, p_ent);
@@ -586,10 +646,9 @@ void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
  *
  * @return int
  */
-static int
-qed_spq_add_entry(struct qed_hwfn *p_hwfn,
-                 struct qed_spq_entry *p_ent,
-                 enum spq_priority priority)
+static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
+                            struct qed_spq_entry *p_ent,
+                            enum spq_priority priority)
 {
        struct qed_spq *p_spq = p_hwfn->p_spq;
 
@@ -604,8 +663,7 @@ qed_spq_add_entry(struct qed_hwfn *p_hwfn,
                        struct qed_spq_entry *p_en2;
 
                        p_en2 = list_first_entry(&p_spq->free_pool,
-                                                struct qed_spq_entry,
-                                                list);
+                                                struct qed_spq_entry, list);
                        list_del(&p_en2->list);
 
                        /* Copy the ring element physical pointer to the new
@@ -655,8 +713,7 @@ u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
 * Posting new Ramrods
 ***************************************************************************/
 static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
-                            struct list_head *head,
-                            u32 keep_reserve)
+                            struct list_head *head, u32 keep_reserve)
 {
        struct qed_spq *p_spq = p_hwfn->p_spq;
        int rc;
@@ -690,8 +747,7 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
                        break;
 
                p_ent = list_first_entry(&p_spq->unlimited_pending,
-                                        struct qed_spq_entry,
-                                        list);
+                                        struct qed_spq_entry, list);
                if (!p_ent)
                        return -EINVAL;
 
@@ -705,8 +761,7 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
 }
 
 int qed_spq_post(struct qed_hwfn *p_hwfn,
-                struct qed_spq_entry *p_ent,
-                u8 *fw_return_code)
+                struct qed_spq_entry *p_ent, u8 *fw_return_code)
 {
        int rc = 0;
        struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
@@ -752,7 +807,8 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
                 * access p_ent here to see whether it's successful or not.
                 * Thus, after gaining the answer perform the cleanup here.
                 */
-               rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
+               rc = qed_spq_block(p_hwfn, p_ent, fw_return_code,
+                                  p_ent->queue == &p_spq->unlimited_pending);
 
                if (p_ent->queue == &p_spq->unlimited_pending) {
                        /* This is an allocated p_ent which does not need to
@@ -803,8 +859,7 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
                return -EINVAL;
 
        spin_lock_bh(&p_spq->lock);
-       list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending,
-                                list) {
+       list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
                if (p_ent->elem.hdr.echo == echo) {
                        u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
 
@@ -846,15 +901,22 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
 
        if (!found) {
                DP_NOTICE(p_hwfn,
-                         "Failed to find an entry this EQE completes\n");
+                         "Failed to find an entry this EQE [echo %04x] completes\n",
+                         le16_to_cpu(echo));
                return -EEXIST;
        }
 
-       DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Complete: func %p cookie %p)\n",
+       DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+                  "Complete EQE [echo %04x]: func %p cookie %p)\n",
+                  le16_to_cpu(echo),
                   p_ent->comp_cb.function, p_ent->comp_cb.cookie);
        if (found->comp_cb.function)
                found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
                                        fw_return_code);
+       else
+               DP_VERBOSE(p_hwfn,
+                          QED_MSG_SPQ,
+                          "Got a completion without a callback function\n");
 
        if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
            (found->queue == &p_spq->unlimited_pending))
@@ -878,10 +940,8 @@ struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
 
        /* Allocate ConsQ struct */
        p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
-       if (!p_consq) {
-               DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_consq'\n");
+       if (!p_consq)
                return NULL;
-       }
 
        /* Allocate and initialize EQ chain*/
        if (qed_chain_alloc(p_hwfn->cdev,
@@ -889,10 +949,8 @@ struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
                            QED_CHAIN_MODE_PBL,
                            QED_CHAIN_CNT_TYPE_U16,
                            QED_CHAIN_PAGE_SIZE / 0x80,
-                           0x80, &p_consq->chain)) {
-               DP_NOTICE(p_hwfn, "Failed to allocate consq chain");
+                           0x80, &p_consq->chain))
                goto consq_allocate_fail;
-       }
 
        return p_consq;
 
@@ -901,14 +959,12 @@ consq_allocate_fail:
        return NULL;
 }
 
-void qed_consq_setup(struct qed_hwfn *p_hwfn,
-                    struct qed_consq *p_consq)
+void qed_consq_setup(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
 {
        qed_chain_reset(&p_consq->chain);
 }
 
-void qed_consq_free(struct qed_hwfn *p_hwfn,
-                   struct qed_consq *p_consq)
+void qed_consq_free(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
 {
        if (!p_consq)
                return;
index 15399da268d9652769f0fe76a715622e719c0aaa..92a3ee1715d9b9f0ccf62467f1212938c7a2627d 100644 (file)
@@ -1,13 +1,38 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #include <linux/etherdevice.h>
 #include <linux/crc32.h>
+#include <linux/vmalloc.h>
 #include <linux/qed/qed_iov_if.h>
 #include "qed_cxt.h"
 #include "qed_hsi.h"
@@ -60,7 +85,8 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
        }
 
        fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
-       if (fp_minor > ETH_HSI_VER_MINOR) {
+       if (fp_minor > ETH_HSI_VER_MINOR &&
+           fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
                DP_VERBOSE(p_hwfn,
                           QED_MSG_IOV,
                           "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n",
@@ -107,8 +133,9 @@ static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
        return qed_spq_post(p_hwfn, p_ent, NULL);
 }
 
-bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
-                          int rel_vf_id, bool b_enabled_only)
+static bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
+                                 int rel_vf_id,
+                                 bool b_enabled_only, bool b_non_malicious)
 {
        if (!p_hwfn->pf_iov_info) {
                DP_NOTICE(p_hwfn->cdev, "No iov info\n");
@@ -123,6 +150,10 @@ bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
            b_enabled_only)
                return false;
 
+       if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
+           b_non_malicious)
+               return false;
+
        return true;
 }
 
@@ -137,7 +168,8 @@ static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
                return NULL;
        }
 
-       if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only))
+       if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id,
+                                 b_enabled_only, false))
                vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
        else
                DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
@@ -146,26 +178,59 @@ static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
        return vf;
 }
 
+enum qed_iov_validate_q_mode {
+       QED_IOV_VALIDATE_Q_NA,
+       QED_IOV_VALIDATE_Q_ENABLE,
+       QED_IOV_VALIDATE_Q_DISABLE,
+};
+
+static bool qed_iov_validate_queue_mode(struct qed_hwfn *p_hwfn,
+                                       struct qed_vf_info *p_vf,
+                                       u16 qid,
+                                       enum qed_iov_validate_q_mode mode,
+                                       bool b_is_tx)
+{
+       if (mode == QED_IOV_VALIDATE_Q_NA)
+               return true;
+
+       if ((b_is_tx && p_vf->vf_queues[qid].p_tx_cid) ||
+           (!b_is_tx && p_vf->vf_queues[qid].p_rx_cid))
+               return mode == QED_IOV_VALIDATE_Q_ENABLE;
+
+       /* In case we haven't found any valid cid, then its disabled */
+       return mode == QED_IOV_VALIDATE_Q_DISABLE;
+}
+
 static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn,
-                                struct qed_vf_info *p_vf, u16 rx_qid)
+                                struct qed_vf_info *p_vf,
+                                u16 rx_qid,
+                                enum qed_iov_validate_q_mode mode)
 {
-       if (rx_qid >= p_vf->num_rxqs)
+       if (rx_qid >= p_vf->num_rxqs) {
                DP_VERBOSE(p_hwfn,
                           QED_MSG_IOV,
                           "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n",
                           p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
-       return rx_qid < p_vf->num_rxqs;
+               return false;
+       }
+
+       return qed_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid, mode, false);
 }
 
 static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn,
-                                struct qed_vf_info *p_vf, u16 tx_qid)
+                                struct qed_vf_info *p_vf,
+                                u16 tx_qid,
+                                enum qed_iov_validate_q_mode mode)
 {
-       if (tx_qid >= p_vf->num_txqs)
+       if (tx_qid >= p_vf->num_txqs) {
                DP_VERBOSE(p_hwfn,
                           QED_MSG_IOV,
                           "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n",
                           p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
-       return tx_qid < p_vf->num_txqs;
+               return false;
+       }
+
+       return qed_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid, mode, true);
 }
 
 static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn,
@@ -185,8 +250,36 @@ static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn,
        return false;
 }
 
-int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
-                            int vfid, struct qed_ptt *p_ptt)
+static bool qed_iov_validate_active_rxq(struct qed_hwfn *p_hwfn,
+                                       struct qed_vf_info *p_vf)
+{
+       u8 i;
+
+       for (i = 0; i < p_vf->num_rxqs; i++)
+               if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i,
+                                               QED_IOV_VALIDATE_Q_ENABLE,
+                                               false))
+                       return true;
+
+       return false;
+}
+
+static bool qed_iov_validate_active_txq(struct qed_hwfn *p_hwfn,
+                                       struct qed_vf_info *p_vf)
+{
+       u8 i;
+
+       for (i = 0; i < p_vf->num_txqs; i++)
+               if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i,
+                                               QED_IOV_VALIDATE_Q_ENABLE,
+                                               true))
+                       return true;
+
+       return false;
+}
+
+static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
+                                   int vfid, struct qed_ptt *p_ptt)
 {
        struct qed_bulletin_content *p_bulletin;
        int crc_size = sizeof(p_bulletin->crc);
@@ -454,10 +547,8 @@ int qed_iov_alloc(struct qed_hwfn *p_hwfn)
        }
 
        p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL);
-       if (!p_sriov) {
-               DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n");
+       if (!p_sriov)
                return -ENOMEM;
-       }
 
        p_hwfn->pf_iov_info = p_sriov;
 
@@ -506,10 +597,9 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
 
        /* Allocate a new struct for IOV information */
        cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL);
-       if (!cdev->p_iov_info) {
-               DP_NOTICE(p_hwfn, "Can't support IOV due to lack of memory\n");
+       if (!cdev->p_iov_info)
                return -ENOMEM;
-       }
+
        cdev->p_iov_info->pos = pos;
 
        rc = qed_iov_pci_cfg_info(cdev);
@@ -528,14 +618,30 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
                return 0;
        }
 
-       /* Calculate the first VF index - this is a bit tricky; Basically,
-        * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
-        * after the first engine's VFs.
+       /* First VF index based on offset is tricky:
+        *  - If ARI is supported [likely], offset - (16 - pf_id) would
+        *    provide the number for eng0. 2nd engine Vfs would begin
+        *    after the first engine's VFs.
+        *  - If !ARI, VFs would start on next device.
+        *    so offset - (256 - pf_id) would provide the number.
+        * Utilize the fact that (256 - pf_id) is achieved only by later
+        * to diffrentiate between the two.
         */
-       cdev->p_iov_info->first_vf_in_pf = p_hwfn->cdev->p_iov_info->offset +
-                                          p_hwfn->abs_pf_id - 16;
-       if (QED_PATH_ID(p_hwfn))
-               cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
+
+       if (p_hwfn->cdev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
+               u32 first = p_hwfn->cdev->p_iov_info->offset +
+                           p_hwfn->abs_pf_id - 16;
+
+               cdev->p_iov_info->first_vf_in_pf = first;
+
+               if (QED_PATH_ID(p_hwfn))
+                       cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
+       } else {
+               u32 first = p_hwfn->cdev->p_iov_info->offset +
+                           p_hwfn->abs_pf_id - 256;
+
+               cdev->p_iov_info->first_vf_in_pf = first;
+       }
 
        DP_VERBOSE(p_hwfn, QED_MSG_IOV,
                   "First VF in hwfn 0x%08x\n",
@@ -544,7 +650,8 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
        return 0;
 }
 
-static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
+bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn,
+                             int vfid, bool b_fail_malicious)
 {
        /* Check PF supports sriov */
        if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) ||
@@ -552,12 +659,17 @@ static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
                return false;
 
        /* Check VF validity */
-       if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true))
+       if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
                return false;
 
        return true;
 }
 
+bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
+{
+       return _qed_iov_pf_sanity_check(p_hwfn, vfid, true);
+}
+
 static void qed_iov_set_vf_to_disable(struct qed_dev *cdev,
                                      u16 rel_vf_id, u8 to_disable)
 {
@@ -575,7 +687,7 @@ static void qed_iov_set_vf_to_disable(struct qed_dev *cdev,
        }
 }
 
-void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable)
+static void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable)
 {
        u16 i;
 
@@ -642,6 +754,11 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
        u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
        int rc;
 
+       /* It's possible VF was previously considered malicious -
+        * clear the indication even if we're only going to disable VF.
+        */
+       vf->b_malicious = false;
+
        if (vf->to_disable)
                return 0;
 
@@ -699,7 +816,7 @@ static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn,
                                &qzone_id);
 
                reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
-               val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
+               val = enable ? (vf->abs_vf_id | BIT(8)) : 0;
                qed_wr(p_hwfn, p_ptt, reg_addr, val);
        }
 }
@@ -793,39 +910,114 @@ static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn,
        vf->num_sbs = 0;
 }
 
+static void qed_iov_set_link(struct qed_hwfn *p_hwfn,
+                            u16 vfid,
+                            struct qed_mcp_link_params *params,
+                            struct qed_mcp_link_state *link,
+                            struct qed_mcp_link_capabilities *p_caps)
+{
+       struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
+                                                      vfid,
+                                                      false);
+       struct qed_bulletin_content *p_bulletin;
+
+       if (!p_vf)
+               return;
+
+       p_bulletin = p_vf->bulletin.p_virt;
+       p_bulletin->req_autoneg = params->speed.autoneg;
+       p_bulletin->req_adv_speed = params->speed.advertised_speeds;
+       p_bulletin->req_forced_speed = params->speed.forced_speed;
+       p_bulletin->req_autoneg_pause = params->pause.autoneg;
+       p_bulletin->req_forced_rx = params->pause.forced_rx;
+       p_bulletin->req_forced_tx = params->pause.forced_tx;
+       p_bulletin->req_loopback = params->loopback_mode;
+
+       p_bulletin->link_up = link->link_up;
+       p_bulletin->speed = link->speed;
+       p_bulletin->full_duplex = link->full_duplex;
+       p_bulletin->autoneg = link->an;
+       p_bulletin->autoneg_complete = link->an_complete;
+       p_bulletin->parallel_detection = link->parallel_detection;
+       p_bulletin->pfc_enabled = link->pfc_enabled;
+       p_bulletin->partner_adv_speed = link->partner_adv_speed;
+       p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
+       p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
+       p_bulletin->partner_adv_pause = link->partner_adv_pause;
+       p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
+
+       p_bulletin->capability_speed = p_caps->speed_capabilities;
+}
+
 static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
                                  struct qed_ptt *p_ptt,
-                                 u16 rel_vf_id, u16 num_rx_queues)
+                                 struct qed_iov_vf_init_params *p_params)
 {
+       struct qed_mcp_link_capabilities link_caps;
+       struct qed_mcp_link_params link_params;
+       struct qed_mcp_link_state link_state;
        u8 num_of_vf_avaiable_chains = 0;
        struct qed_vf_info *vf = NULL;
+       u16 qid, num_irqs;
        int rc = 0;
        u32 cids;
        u8 i;
 
-       vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+       vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
        if (!vf) {
                DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n");
                return -EINVAL;
        }
 
        if (vf->b_init) {
-               DP_NOTICE(p_hwfn, "VF[%d] is already active.\n", rel_vf_id);
+               DP_NOTICE(p_hwfn, "VF[%d] is already active.\n",
+                         p_params->rel_vf_id);
                return -EINVAL;
        }
 
+       /* Perform sanity checking on the requested queue_id */
+       for (i = 0; i < p_params->num_queues; i++) {
+               u16 min_vf_qzone = FEAT_NUM(p_hwfn, QED_PF_L2_QUE);
+               u16 max_vf_qzone = min_vf_qzone +
+                   FEAT_NUM(p_hwfn, QED_VF_L2_QUE) - 1;
+
+               qid = p_params->req_rx_queue[i];
+               if (qid < min_vf_qzone || qid > max_vf_qzone) {
+                       DP_NOTICE(p_hwfn,
+                                 "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n",
+                                 qid,
+                                 p_params->rel_vf_id,
+                                 min_vf_qzone, max_vf_qzone);
+                       return -EINVAL;
+               }
+
+               qid = p_params->req_tx_queue[i];
+               if (qid > max_vf_qzone) {
+                       DP_NOTICE(p_hwfn,
+                                 "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n",
+                                 qid, p_params->rel_vf_id, max_vf_qzone);
+                       return -EINVAL;
+               }
+
+               /* If client *really* wants, Tx qid can be shared with PF */
+               if (qid < min_vf_qzone)
+                       DP_VERBOSE(p_hwfn,
+                                  QED_MSG_IOV,
+                                  "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n",
+                                  p_params->rel_vf_id, qid, i);
+       }
+
        /* Limit number of queues according to number of CIDs */
        qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
        DP_VERBOSE(p_hwfn,
                   QED_MSG_IOV,
                   "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
-                  vf->relative_vf_id, num_rx_queues, (u16) cids);
-       num_rx_queues = min_t(u16, num_rx_queues, ((u16) cids));
+                  vf->relative_vf_id, p_params->num_queues, (u16)cids);
+       num_irqs = min_t(u16, p_params->num_queues, ((u16)cids));
 
        num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn,
                                                             p_ptt,
-                                                            vf,
-                                                            num_rx_queues);
+                                                            vf, num_irqs);
        if (!num_of_vf_avaiable_chains) {
                DP_ERR(p_hwfn, "no available igu sbs\n");
                return -ENOMEM;
@@ -836,25 +1028,31 @@ static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
        vf->num_txqs = num_of_vf_avaiable_chains;
 
        for (i = 0; i < vf->num_rxqs; i++) {
-               u16 queue_id = qed_int_queue_id_from_sb_id(p_hwfn,
-                                                          vf->igu_sbs[i]);
+               struct qed_vf_q_info *p_queue = &vf->vf_queues[i];
 
-               if (queue_id > RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
-                       DP_NOTICE(p_hwfn,
-                                 "VF[%d] will require utilizing of out-of-bounds queues - %04x\n",
-                                 vf->relative_vf_id, queue_id);
-                       return -EINVAL;
-               }
+               p_queue->fw_rx_qid = p_params->req_rx_queue[i];
+               p_queue->fw_tx_qid = p_params->req_tx_queue[i];
 
                /* CIDs are per-VF, so no problem having them 0-based. */
-               vf->vf_queues[i].fw_rx_qid = queue_id;
-               vf->vf_queues[i].fw_tx_qid = queue_id;
-               vf->vf_queues[i].fw_cid = i;
+               p_queue->fw_cid = i;
 
                DP_VERBOSE(p_hwfn, QED_MSG_IOV,
-                          "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n",
-                          vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i);
+                          "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]  CID %04x\n",
+                          vf->relative_vf_id,
+                          i, vf->igu_sbs[i],
+                          p_queue->fw_rx_qid,
+                          p_queue->fw_tx_qid, p_queue->fw_cid);
        }
+
+       /* Update the link configuration in bulletin */
+       memcpy(&link_params, qed_mcp_get_link_params(p_hwfn),
+              sizeof(link_params));
+       memcpy(&link_state, qed_mcp_get_link_state(p_hwfn), sizeof(link_state));
+       memcpy(&link_caps, qed_mcp_get_link_capabilities(p_hwfn),
+              sizeof(link_caps));
+       qed_iov_set_link(p_hwfn, p_params->rel_vf_id,
+                        &link_params, &link_state, &link_caps);
+
        rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf);
        if (!rc) {
                vf->b_init = true;
@@ -866,45 +1064,6 @@ static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
        return rc;
 }
 
-static void qed_iov_set_link(struct qed_hwfn *p_hwfn,
-                            u16 vfid,
-                            struct qed_mcp_link_params *params,
-                            struct qed_mcp_link_state *link,
-                            struct qed_mcp_link_capabilities *p_caps)
-{
-       struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
-                                                      vfid,
-                                                      false);
-       struct qed_bulletin_content *p_bulletin;
-
-       if (!p_vf)
-               return;
-
-       p_bulletin = p_vf->bulletin.p_virt;
-       p_bulletin->req_autoneg = params->speed.autoneg;
-       p_bulletin->req_adv_speed = params->speed.advertised_speeds;
-       p_bulletin->req_forced_speed = params->speed.forced_speed;
-       p_bulletin->req_autoneg_pause = params->pause.autoneg;
-       p_bulletin->req_forced_rx = params->pause.forced_rx;
-       p_bulletin->req_forced_tx = params->pause.forced_tx;
-       p_bulletin->req_loopback = params->loopback_mode;
-
-       p_bulletin->link_up = link->link_up;
-       p_bulletin->speed = link->speed;
-       p_bulletin->full_duplex = link->full_duplex;
-       p_bulletin->autoneg = link->an;
-       p_bulletin->autoneg_complete = link->an_complete;
-       p_bulletin->parallel_detection = link->parallel_detection;
-       p_bulletin->pfc_enabled = link->pfc_enabled;
-       p_bulletin->partner_adv_speed = link->partner_adv_speed;
-       p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
-       p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
-       p_bulletin->partner_adv_pause = link->partner_adv_pause;
-       p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
-
-       p_bulletin->capability_speed = p_caps->speed_capabilities;
-}
-
 static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
                                     struct qed_ptt *p_ptt, u16 rel_vf_id)
 {
@@ -1038,13 +1197,17 @@ static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
                           (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
                           &params);
 
-       qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
-                          mbx->req_virt->first_tlv.reply_address,
-                          sizeof(u64) / 4, &params);
-
+       /* Once PF copies the rc to the VF, the latter can continue
+        * and send an additional message. So we have to make sure the
+        * channel would be re-set to ready prior to that.
+        */
        REG_WR(p_hwfn,
               GTT_BAR0_MAP_REG_USDM_RAM +
               USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
+
+       qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
+                          mbx->req_virt->first_tlv.reply_address,
+                          sizeof(u64) / 4, &params);
 }
 
 static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn,
@@ -1090,13 +1253,13 @@ static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn,
 
        /* Prepare response for all extended tlvs if they are found by PF */
        for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) {
-               if (!(tlvs_mask & (1 << i)))
+               if (!(tlvs_mask & BIT(i)))
                        continue;
 
                resp = qed_add_tlv(p_hwfn, &p_mbx->offset,
                                   qed_iov_vport_to_tlv(p_hwfn, i), size);
 
-               if (tlvs_accepted & (1 << i))
+               if (tlvs_accepted & BIT(i))
                        resp->hdr.status = status;
                else
                        resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
@@ -1132,9 +1295,10 @@ static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn,
        qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
 }
 
-struct qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
-                                                     u16 relative_vf_id,
-                                                     bool b_enabled_only)
+static struct
+qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
+                                              u16 relative_vf_id,
+                                              bool b_enabled_only)
 {
        struct qed_vf_info *vf = NULL;
 
@@ -1145,7 +1309,7 @@ struct qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
        return &vf->p_vf_info;
 }
 
-void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
+static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
 {
        struct qed_public_vf_info *vf_info;
 
@@ -1155,7 +1319,10 @@ void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
                return;
 
        /* Clear the VF mac */
-       memset(vf_info->mac, 0, ETH_ALEN);
+       eth_zero_addr(vf_info->mac);
+
+       vf_info->rx_accept_mode = 0;
+       vf_info->tx_accept_mode = 0;
 }
 
 static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
@@ -1173,8 +1340,19 @@ static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
 
        p_vf->num_active_rxqs = 0;
 
-       for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++)
-               p_vf->vf_queues[i].rxq_active = 0;
+       for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
+               struct qed_vf_q_info *p_queue = &p_vf->vf_queues[i];
+
+               if (p_queue->p_rx_cid) {
+                       qed_eth_queue_cid_release(p_hwfn, p_queue->p_rx_cid);
+                       p_queue->p_rx_cid = NULL;
+               }
+
+               if (p_queue->p_tx_cid) {
+                       qed_eth_queue_cid_release(p_hwfn, p_queue->p_tx_cid);
+                       p_queue->p_tx_cid = NULL;
+               }
+       }
 
        memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
        memset(&p_vf->acquire, 0, sizeof(p_vf->acquire));
@@ -1241,6 +1419,16 @@ static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
                           p_req->num_vlan_filters,
                           p_resp->num_vlan_filters,
                           p_req->num_mc_filters, p_resp->num_mc_filters);
+
+               /* Some legacy OSes are incapable of correctly handling this
+                * failure.
+                */
+               if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+                    ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
+                   (p_vf->acquire.vfdev_info.os_type ==
+                    VFPF_ACQUIRE_OS_WINDOWS))
+                       return PFVF_STATUS_SUCCESS;
+
                return PFVF_STATUS_NO_RESOURCE;
        }
 
@@ -1280,22 +1468,42 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
 
        memset(resp, 0, sizeof(*resp));
 
+       /* Write the PF version so that VF would know which version
+        * is supported - might be later overriden. This guarantees that
+        * VF could recognize legacy PF based on lack of versions in reply.
+        */
+       pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
+       pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
+
+       if (vf->state != VF_FREE && vf->state != VF_STOPPED) {
+               DP_VERBOSE(p_hwfn,
+                          QED_MSG_IOV,
+                          "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
+                          vf->abs_vf_id, vf->state);
+               goto out;
+       }
+
        /* Validate FW compatibility */
        if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
-               DP_INFO(p_hwfn,
-                       "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
-                       vf->abs_vf_id,
-                       req->vfdev_info.eth_fp_hsi_major,
-                       req->vfdev_info.eth_fp_hsi_minor,
-                       ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
-
-               /* Write the PF version so that VF would know which version
-                * is supported.
-                */
-               pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
-               pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
+               if (req->vfdev_info.capabilities &
+                   VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
+                       struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
 
-               goto out;
+                       DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                                  "VF[%d] is pre-fastpath HSI\n",
+                                  vf->abs_vf_id);
+                       p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
+                       p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
+               } else {
+                       DP_INFO(p_hwfn,
+                               "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
+                               vf->abs_vf_id,
+                               req->vfdev_info.eth_fp_hsi_major,
+                               req->vfdev_info.eth_fp_hsi_minor,
+                               ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
+
+                       goto out;
+               }
        }
 
        /* On 100g PFs, prevent old VFs from loading */
@@ -1334,8 +1542,11 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
        pfdev_info->fw_minor = FW_MINOR_VERSION;
        pfdev_info->fw_rev = FW_REVISION_VERSION;
        pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
-       pfdev_info->minor_fp_hsi = min_t(u8,
-                                        ETH_HSI_VER_MINOR,
+
+       /* Incorrect when legacy, but doesn't matter as legacy isn't reading
+        * this field.
+        */
+       pfdev_info->minor_fp_hsi = min_t(u8, ETH_HSI_VER_MINOR,
                                         req->vfdev_info.eth_fp_hsi_minor);
        pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
        qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
@@ -1438,14 +1649,11 @@ static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn,
 
                filter.type = QED_FILTER_VLAN;
                filter.vlan = p_vf->shadow_config.vlans[i].vid;
-               DP_VERBOSE(p_hwfn,
-                          QED_MSG_IOV,
+               DP_VERBOSE(p_hwfn, QED_MSG_IOV,
                           "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
                           filter.vlan, p_vf->relative_vf_id);
-               rc = qed_sp_eth_filter_ucast(p_hwfn,
-                                            p_vf->opaque_fid,
-                                            &filter,
-                                            QED_SPQ_MODE_CB, NULL);
+               rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
+                                            &filter, QED_SPQ_MODE_CB, NULL);
                if (rc) {
                        DP_NOTICE(p_hwfn,
                                  "Failed to configure VLAN [%04x] to VF [%04x]\n",
@@ -1463,7 +1671,7 @@ qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn,
 {
        int rc = 0;
 
-       if ((events & (1 << VLAN_ADDR_FORCED)) &&
+       if ((events & BIT(VLAN_ADDR_FORCED)) &&
            !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
                rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
 
@@ -1479,7 +1687,7 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
        if (!p_vf->vport_instance)
                return -EINVAL;
 
-       if (events & (1 << MAC_ADDR_FORCED)) {
+       if (events & BIT(MAC_ADDR_FORCED)) {
                /* Since there's no way [currently] of removing the MAC,
                 * we can always assume this means we need to force it.
                 */
@@ -1502,7 +1710,7 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
                p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
        }
 
-       if (events & (1 << VLAN_ADDR_FORCED)) {
+       if (events & BIT(VLAN_ADDR_FORCED)) {
                struct qed_sp_vport_update_params vport_update;
                u8 removal;
                int i;
@@ -1550,21 +1758,21 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
 
                /* Update all the Rx queues */
                for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
-                       u16 qid;
+                       struct qed_queue_cid *p_cid;
 
-                       if (!p_vf->vf_queues[i].rxq_active)
+                       p_cid = p_vf->vf_queues[i].p_rx_cid;
+                       if (!p_cid)
                                continue;
 
-                       qid = p_vf->vf_queues[i].fw_rx_qid;
-
-                       rc = qed_sp_eth_rx_queues_update(p_hwfn, qid,
+                       rc = qed_sp_eth_rx_queues_update(p_hwfn,
+                                                        (void **)&p_cid,
                                                         1, 0, 1,
                                                         QED_SPQ_MODE_EBLOCK,
                                                         NULL);
                        if (rc) {
                                DP_NOTICE(p_hwfn,
                                          "Failed to send Rx update fo queue[0x%04x]\n",
-                                         qid);
+                                         p_cid->rel.queue_id);
                                return rc;
                        }
                }
@@ -1572,7 +1780,7 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
                if (filter.vlan)
                        p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
                else
-                       p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED);
+                       p_vf->configured_features &= ~BIT(VLAN_ADDR_FORCED);
        }
 
        /* If forced features are terminated, we need to configure the shadow
@@ -1608,6 +1816,8 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
        vf->state = VF_ENABLED;
        start = &mbx->req_virt->start_vport;
 
+       qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
+
        /* Initialize Status block in CAU */
        for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
                if (!start->sb_addr[sb_id]) {
@@ -1619,10 +1829,8 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
 
                qed_int_cau_conf_sb(p_hwfn, p_ptt,
                                    start->sb_addr[sb_id],
-                                   vf->igu_sbs[sb_id],
-                                   vf->abs_vf_id, 1);
+                                   vf->igu_sbs[sb_id], vf->abs_vf_id, 1);
        }
-       qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
 
        vf->mtu = start->mtu;
        vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
@@ -1632,7 +1840,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
         * vfs that would still be fine, since they passed '0' as padding].
         */
        p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
-       if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
+       if (!(*p_bitmap & BIT(VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
                u8 vf_req = start->only_untagged;
 
                vf_info->bulletin.p_virt->default_only_untagged = vf_req;
@@ -1650,9 +1858,10 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
        params.vport_id = vf->vport_id;
        params.max_buffers_per_cqe = start->max_buffers_per_cqe;
        params.mtu = vf->mtu;
+       params.check_mac = true;
 
        rc = qed_sp_eth_vport_start(p_hwfn, &params);
-       if (rc != 0) {
+       if (rc) {
                DP_ERR(p_hwfn,
                       "qed_iov_vf_mbx_start_vport returned error %d\n", rc);
                status = PFVF_STATUS_FAILURE;
@@ -1678,8 +1887,18 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
        vf->vport_instance--;
        vf->spoof_chk = false;
 
+       if ((qed_iov_validate_active_rxq(p_hwfn, vf)) ||
+           (qed_iov_validate_active_txq(p_hwfn, vf))) {
+               vf->b_malicious = true;
+               DP_NOTICE(p_hwfn,
+                         "VF [%02x] - considered malicious; Unable to stop RX/TX queuess\n",
+                         vf->abs_vf_id);
+               status = PFVF_STATUS_MALICIOUS;
+               goto out;
+       }
+
        rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
-       if (rc != 0) {
+       if (rc) {
                DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
                       rc);
                status = PFVF_STATUS_FAILURE;
@@ -1689,27 +1908,39 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
        vf->configured_features = 0;
        memset(&vf->shadow_config, 0, sizeof(vf->shadow_config));
 
+out:
        qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
                             sizeof(struct pfvf_def_resp_tlv), status);
 }
 
 static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
                                          struct qed_ptt *p_ptt,
-                                         struct qed_vf_info *vf, u8 status)
+                                         struct qed_vf_info *vf,
+                                         u8 status, bool b_legacy)
 {
        struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
        struct pfvf_start_queue_resp_tlv *p_tlv;
        struct vfpf_start_rxq_tlv *req;
+       u16 length;
 
        mbx->offset = (u8 *)mbx->reply_virt;
 
+       /* Taking a bigger struct instead of adding a TLV to list was a
+        * mistake, but one which we're now stuck with, as some older
+        * clients assume the size of the previous response.
+        */
+       if (!b_legacy)
+               length = sizeof(*p_tlv);
+       else
+               length = sizeof(struct pfvf_def_resp_tlv);
+
        p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
-                           sizeof(*p_tlv));
+                           length);
        qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
                    sizeof(struct channel_list_end_tlv));
 
        /* Update the TLV with the response */
-       if (status == PFVF_STATUS_SUCCESS) {
+       if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
                req = &mbx->req_virt->start_rxq;
                p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
                                offsetof(struct mstorm_vf_zone,
@@ -1717,7 +1948,7 @@ static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
                                sizeof(struct eth_rx_prod_data) * req->rx_qid;
        }
 
-       qed_iov_send_response(p_hwfn, p_ptt, vf, sizeof(*p_tlv), status);
+       qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
 }
 
 static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
@@ -1727,40 +1958,65 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
        struct qed_queue_start_common_params params;
        struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
        u8 status = PFVF_STATUS_NO_RESOURCE;
+       struct qed_vf_q_info *p_queue;
        struct vfpf_start_rxq_tlv *req;
+       bool b_legacy_vf = false;
        int rc;
 
-       memset(&params, 0, sizeof(params));
        req = &mbx->req_virt->start_rxq;
 
-       if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid) ||
+       if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
+                                 QED_IOV_VALIDATE_Q_DISABLE) ||
            !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
                goto out;
 
-       params.queue_id =  vf->vf_queues[req->rx_qid].fw_rx_qid;
-       params.vf_qid = req->rx_qid;
+       /* Acquire a new queue-cid */
+       p_queue = &vf->vf_queues[req->rx_qid];
+
+       memset(&params, 0, sizeof(params));
+       params.queue_id = p_queue->fw_rx_qid;
        params.vport_id = vf->vport_id;
+       params.stats_id = vf->abs_vf_id + 0x10;
        params.sb = req->hw_sb;
        params.sb_idx = req->sb_index;
 
-       rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid,
-                                        vf->vf_queues[req->rx_qid].fw_cid,
-                                        &params,
-                                        vf->abs_vf_id + 0x10,
-                                        req->bd_max_bytes,
-                                        req->rxq_addr,
-                                        req->cqe_pbl_addr, req->cqe_pbl_size);
+       p_queue->p_rx_cid = _qed_eth_queue_to_cid(p_hwfn,
+                                                 vf->opaque_fid,
+                                                 p_queue->fw_cid,
+                                                 req->rx_qid, &params);
+       if (!p_queue->p_rx_cid)
+               goto out;
 
+       /* Legacy VFs have their Producers in a different location, which they
+        * calculate on their own and clean the producer prior to this.
+        */
+       if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+           ETH_HSI_VER_NO_PKT_LEN_TUNN) {
+               b_legacy_vf = true;
+       } else {
+               REG_WR(p_hwfn,
+                      GTT_BAR0_MAP_REG_MSDM_RAM +
+                      MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
+                      0);
+       }
+       p_queue->p_rx_cid->b_legacy_vf = b_legacy_vf;
+
+       rc = qed_eth_rxq_start_ramrod(p_hwfn,
+                                     p_queue->p_rx_cid,
+                                     req->bd_max_bytes,
+                                     req->rxq_addr,
+                                     req->cqe_pbl_addr, req->cqe_pbl_size);
        if (rc) {
                status = PFVF_STATUS_FAILURE;
+               qed_eth_queue_cid_release(p_hwfn, p_queue->p_rx_cid);
+               p_queue->p_rx_cid = NULL;
        } else {
                status = PFVF_STATUS_SUCCESS;
-               vf->vf_queues[req->rx_qid].rxq_active = true;
                vf->num_active_rxqs++;
        }
 
 out:
-       qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status);
+       qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status, b_legacy_vf);
 }
 
 static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
@@ -1769,23 +2025,38 @@ static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
 {
        struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
        struct pfvf_start_queue_resp_tlv *p_tlv;
+       bool b_legacy = false;
+       u16 length;
 
        mbx->offset = (u8 *)mbx->reply_virt;
 
+       /* Taking a bigger struct instead of adding a TLV to list was a
+        * mistake, but one which we're now stuck with, as some older
+        * clients assume the size of the previous response.
+        */
+       if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+           ETH_HSI_VER_NO_PKT_LEN_TUNN)
+               b_legacy = true;
+
+       if (!b_legacy)
+               length = sizeof(*p_tlv);
+       else
+               length = sizeof(struct pfvf_def_resp_tlv);
+
        p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
-                           sizeof(*p_tlv));
+                           length);
        qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
                    sizeof(struct channel_list_end_tlv));
 
        /* Update the TLV with the response */
-       if (status == PFVF_STATUS_SUCCESS) {
+       if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
                u16 qid = mbx->req_virt->start_txq.tx_qid;
 
-               p_tlv->offset = qed_db_addr(p_vf->vf_queues[qid].fw_cid,
-                                           DQ_DEMS_LEGACY);
+               p_tlv->offset = qed_db_addr_vf(p_vf->vf_queues[qid].fw_cid,
+                                              DQ_DEMS_LEGACY);
        }
 
-       qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_tlv), status);
+       qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
 }
 
 static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
@@ -1795,40 +2066,44 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
        struct qed_queue_start_common_params params;
        struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
        u8 status = PFVF_STATUS_NO_RESOURCE;
-       union qed_qm_pq_params pq_params;
        struct vfpf_start_txq_tlv *req;
+       struct qed_vf_q_info *p_queue;
        int rc;
-
-       /* Prepare the parameters which would choose the right PQ */
-       memset(&pq_params, 0, sizeof(pq_params));
-       pq_params.eth.is_vf = 1;
-       pq_params.eth.vf_id = vf->relative_vf_id;
+       u16 pq;
 
        memset(&params, 0, sizeof(params));
        req = &mbx->req_virt->start_txq;
 
-       if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid) ||
+       if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid,
+                                 QED_IOV_VALIDATE_Q_DISABLE) ||
            !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
                goto out;
 
-       params.queue_id =  vf->vf_queues[req->tx_qid].fw_tx_qid;
+       /* Acquire a new queue-cid */
+       p_queue = &vf->vf_queues[req->tx_qid];
+
+       params.queue_id = p_queue->fw_tx_qid;
        params.vport_id = vf->vport_id;
+       params.stats_id = vf->abs_vf_id + 0x10;
        params.sb = req->hw_sb;
        params.sb_idx = req->sb_index;
 
-       rc = qed_sp_eth_txq_start_ramrod(p_hwfn,
-                                        vf->opaque_fid,
-                                        vf->vf_queues[req->tx_qid].fw_cid,
-                                        &params,
-                                        vf->abs_vf_id + 0x10,
-                                        req->pbl_addr,
-                                        req->pbl_size, &pq_params);
+       p_queue->p_tx_cid = _qed_eth_queue_to_cid(p_hwfn,
+                                                 vf->opaque_fid,
+                                                 p_queue->fw_cid,
+                                                 req->tx_qid, &params);
+       if (!p_queue->p_tx_cid)
+               goto out;
 
+       pq = qed_get_cm_pq_idx_vf(p_hwfn, vf->relative_vf_id);
+       rc = qed_eth_txq_start_ramrod(p_hwfn, p_queue->p_tx_cid,
+                                     req->pbl_addr, req->pbl_size, pq);
        if (rc) {
                status = PFVF_STATUS_FAILURE;
+               qed_eth_queue_cid_release(p_hwfn, p_queue->p_tx_cid);
+               p_queue->p_tx_cid = NULL;
        } else {
                status = PFVF_STATUS_SUCCESS;
-               vf->vf_queues[req->tx_qid].txq_active = true;
        }
 
 out:
@@ -1837,52 +2112,53 @@ out:
 
 static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
                                struct qed_vf_info *vf,
-                               u16 rxq_id, u8 num_rxqs, bool cqe_completion)
+                               u16 rxq_id, bool cqe_completion)
 {
+       struct qed_vf_q_info *p_queue;
        int rc = 0;
-       int qid;
 
-       if (rxq_id + num_rxqs > ARRAY_SIZE(vf->vf_queues))
+       if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id,
+                                 QED_IOV_VALIDATE_Q_ENABLE)) {
+               DP_VERBOSE(p_hwfn,
+                          QED_MSG_IOV,
+                          "VF[%d] Tried Closing Rx 0x%04x which is inactive\n",
+                          vf->relative_vf_id, rxq_id);
                return -EINVAL;
+       }
 
-       for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
-               if (vf->vf_queues[qid].rxq_active) {
-                       rc = qed_sp_eth_rx_queue_stop(p_hwfn,
-                                                     vf->vf_queues[qid].
-                                                     fw_rx_qid, false,
-                                                     cqe_completion);
+       p_queue = &vf->vf_queues[rxq_id];
 
-                       if (rc)
-                               return rc;
-               }
-               vf->vf_queues[qid].rxq_active = false;
-               vf->num_active_rxqs--;
-       }
+       rc = qed_eth_rx_queue_stop(p_hwfn,
+                                  p_queue->p_rx_cid,
+                                  false, cqe_completion);
+       if (rc)
+               return rc;
 
-       return rc;
+       p_queue->p_rx_cid = NULL;
+       vf->num_active_rxqs--;
+
+       return 0;
 }
 
 static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn,
-                               struct qed_vf_info *vf, u16 txq_id, u8 num_txqs)
+                               struct qed_vf_info *vf, u16 txq_id)
 {
+       struct qed_vf_q_info *p_queue;
        int rc = 0;
-       int qid;
 
-       if (txq_id + num_txqs > ARRAY_SIZE(vf->vf_queues))
+       if (!qed_iov_validate_txq(p_hwfn, vf, txq_id,
+                                 QED_IOV_VALIDATE_Q_ENABLE))
                return -EINVAL;
 
-       for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
-               if (vf->vf_queues[qid].txq_active) {
-                       rc = qed_sp_eth_tx_queue_stop(p_hwfn,
-                                                     vf->vf_queues[qid].
-                                                     fw_tx_qid);
+       p_queue = &vf->vf_queues[txq_id];
 
-                       if (rc)
-                               return rc;
-               }
-               vf->vf_queues[qid].txq_active = false;
-       }
-       return rc;
+       rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->p_tx_cid);
+       if (rc)
+               return rc;
+
+       p_queue->p_tx_cid = NULL;
+
+       return 0;
 }
 
 static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
@@ -1891,20 +2167,28 @@ static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
 {
        u16 length = sizeof(struct pfvf_def_resp_tlv);
        struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
-       u8 status = PFVF_STATUS_SUCCESS;
+       u8 status = PFVF_STATUS_FAILURE;
        struct vfpf_stop_rxqs_tlv *req;
        int rc;
 
-       /* We give the option of starting from qid != 0, in this case we
-        * need to make sure that qid + num_qs doesn't exceed the actual
-        * amount of queues that exist.
+       /* There has never been an official driver that used this interface
+        * for stopping multiple queues, and it is now considered deprecated.
+        * Validate this isn't used here.
         */
        req = &mbx->req_virt->stop_rxqs;
-       rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
-                                 req->num_rxqs, req->cqe_completion);
-       if (rc)
-               status = PFVF_STATUS_FAILURE;
+       if (req->num_rxqs != 1) {
+               DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                          "Odd; VF[%d] tried stopping multiple Rx queues\n",
+                          vf->relative_vf_id);
+               status = PFVF_STATUS_NOT_SUPPORTED;
+               goto out;
+       }
 
+       rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
+                                 req->cqe_completion);
+       if (!rc)
+               status = PFVF_STATUS_SUCCESS;
+out:
        qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
                             length, status);
 }
@@ -1915,19 +2199,27 @@ static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn,
 {
        u16 length = sizeof(struct pfvf_def_resp_tlv);
        struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
-       u8 status = PFVF_STATUS_SUCCESS;
+       u8 status = PFVF_STATUS_FAILURE;
        struct vfpf_stop_txqs_tlv *req;
        int rc;
 
-       /* We give the option of starting from qid != 0, in this case we
-        * need to make sure that qid + num_qs doesn't exceed the actual
-        * amount of queues that exist.
+       /* There has never been an official driver that used this interface
+        * for stopping multiple queues, and it is now considered deprecated.
+        * Validate this isn't used here.
         */
        req = &mbx->req_virt->stop_txqs;
-       rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);
-       if (rc)
-               status = PFVF_STATUS_FAILURE;
+       if (req->num_txqs != 1) {
+               DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                          "Odd; VF[%d] tried stopping multiple Tx queues\n",
+                          vf->relative_vf_id);
+               status = PFVF_STATUS_NOT_SUPPORTED;
+               goto out;
+       }
+       rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid);
+       if (!rc)
+               status = PFVF_STATUS_SUCCESS;
 
+out:
        qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
                             length, status);
 }
@@ -1936,10 +2228,11 @@ static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn,
                                       struct qed_ptt *p_ptt,
                                       struct qed_vf_info *vf)
 {
+       struct qed_queue_cid *handlers[QED_MAX_VF_CHAINS_PER_PF];
        u16 length = sizeof(struct pfvf_def_resp_tlv);
        struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
        struct vfpf_update_rxq_tlv *req;
-       u8 status = PFVF_STATUS_SUCCESS;
+       u8 status = PFVF_STATUS_FAILURE;
        u8 complete_event_flg;
        u8 complete_cqe_flg;
        u16 qid;
@@ -1950,29 +2243,31 @@ static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn,
        complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
        complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
 
-       for (i = 0; i < req->num_rxqs; i++) {
-               qid = req->rx_qid + i;
-
-               if (!vf->vf_queues[qid].rxq_active) {
-                       DP_NOTICE(p_hwfn, "VF rx_qid = %d isn`t active!\n",
-                                 qid);
-                       status = PFVF_STATUS_FAILURE;
-                       break;
+       /* Validate inputs */
+       for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++)
+               if (!qed_iov_validate_rxq(p_hwfn, vf, i,
+                                         QED_IOV_VALIDATE_Q_ENABLE)) {
+                       DP_INFO(p_hwfn, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
+                               vf->relative_vf_id, req->rx_qid, req->num_rxqs);
+                       goto out;
                }
 
-               rc = qed_sp_eth_rx_queues_update(p_hwfn,
-                                                vf->vf_queues[qid].fw_rx_qid,
-                                                1,
-                                                complete_cqe_flg,
-                                                complete_event_flg,
-                                                QED_SPQ_MODE_EBLOCK, NULL);
-
-               if (rc) {
-                       status = PFVF_STATUS_FAILURE;
-                       break;
-               }
+       /* Prepare the handlers */
+       for (i = 0; i < req->num_rxqs; i++) {
+               qid = req->rx_qid + i;
+               handlers[i] = vf->vf_queues[qid].p_rx_cid;
        }
 
+       rc = qed_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
+                                        req->num_rxqs,
+                                        complete_cqe_flg,
+                                        complete_event_flg,
+                                        QED_SPQ_MODE_EBLOCK, NULL);
+       if (rc)
+               goto out;
+
+       status = PFVF_STATUS_SUCCESS;
+out:
        qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
                             length, status);
 }
@@ -2045,7 +2340,7 @@ qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn,
        p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
 
        /* Ignore the VF request if we're forcing a vlan */
-       if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) {
+       if (!(p_vf->configured_features & BIT(VLAN_ADDR_FORCED))) {
                p_data->update_inner_vlan_removal_flg = 1;
                p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
        }
@@ -2137,12 +2432,14 @@ qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
                            struct qed_vf_info *vf,
                            struct qed_sp_vport_update_params *p_data,
                            struct qed_rss_params *p_rss,
-                           struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+                           struct qed_iov_vf_mbx *p_mbx,
+                           u16 *tlvs_mask, u16 *tlvs_accepted)
 {
        struct vfpf_vport_update_rss_tlv *p_rss_tlv;
        u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
-       u16 i, q_idx, max_q_idx;
+       bool b_reject = false;
        u16 table_size;
+       u16 i, q_idx;
 
        p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
                    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
@@ -2166,34 +2463,31 @@ qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
        p_rss->rss_eng_id = vf->relative_vf_id + 1;
        p_rss->rss_caps = p_rss_tlv->rss_caps;
        p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
-       memcpy(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table,
-              sizeof(p_rss->rss_ind_table));
        memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key));
 
        table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table),
                           (1 << p_rss_tlv->rss_table_size_log));
 
-       max_q_idx = ARRAY_SIZE(vf->vf_queues);
-
        for (i = 0; i < table_size; i++) {
-               u16 index = vf->vf_queues[0].fw_rx_qid;
+               q_idx = p_rss_tlv->rss_ind_table[i];
+               if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx,
+                                         QED_IOV_VALIDATE_Q_ENABLE)) {
+                       DP_VERBOSE(p_hwfn,
+                                  QED_MSG_IOV,
+                                  "VF[%d]: Omitting RSS due to wrong queue %04x\n",
+                                  vf->relative_vf_id, q_idx);
+                       b_reject = true;
+                       goto out;
+               }
 
-               q_idx = p_rss->rss_ind_table[i];
-               if (q_idx >= max_q_idx)
-                       DP_NOTICE(p_hwfn,
-                                 "rss_ind_table[%d] = %d, rxq is out of range\n",
-                                 i, q_idx);
-               else if (!vf->vf_queues[q_idx].rxq_active)
-                       DP_NOTICE(p_hwfn,
-                                 "rss_ind_table[%d] = %d, rxq is not active\n",
-                                 i, q_idx);
-               else
-                       index = vf->vf_queues[q_idx].fw_rx_qid;
-               p_rss->rss_ind_table[i] = index;
+               p_rss->rss_ind_table[i] = vf->vf_queues[q_idx].p_rx_cid;
        }
 
        p_data->rss_params = p_rss;
+out:
        *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS;
+       if (!b_reject)
+               *tlvs_accepted |= 1 << QED_IOV_VP_UPDATE_RSS;
 }
 
 static void
@@ -2244,16 +2538,49 @@ qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn,
        *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA;
 }
 
+static int qed_iov_pre_update_vport(struct qed_hwfn *hwfn,
+                                   u8 vfid,
+                                   struct qed_sp_vport_update_params *params,
+                                   u16 *tlvs)
+{
+       u8 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED;
+       struct qed_filter_accept_flags *flags = &params->accept_flags;
+       struct qed_public_vf_info *vf_info;
+
+       /* Untrusted VFs can't even be trusted to know that fact.
+        * Simply indicate everything is configured fine, and trace
+        * configuration 'behind their back'.
+        */
+       if (!(*tlvs & BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM)))
+               return 0;
+
+       vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
+
+       if (flags->update_rx_mode_config) {
+               vf_info->rx_accept_mode = flags->rx_accept_filter;
+               if (!vf_info->is_trusted_configured)
+                       flags->rx_accept_filter &= ~mask;
+       }
+
+       if (flags->update_tx_mode_config) {
+               vf_info->tx_accept_mode = flags->tx_accept_filter;
+               if (!vf_info->is_trusted_configured)
+                       flags->tx_accept_filter &= ~mask;
+       }
+
+       return 0;
+}
+
 static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
                                        struct qed_ptt *p_ptt,
                                        struct qed_vf_info *vf)
 {
+       struct qed_rss_params *p_rss_params = NULL;
        struct qed_sp_vport_update_params params;
        struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
        struct qed_sge_tpa_params sge_tpa_params;
-       struct qed_rss_params rss_params;
+       u16 tlvs_mask = 0, tlvs_accepted = 0;
        u8 status = PFVF_STATUS_SUCCESS;
-       u16 tlvs_mask = 0;
        u16 length;
        int rc;
 
@@ -2266,6 +2593,11 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
                status = PFVF_STATUS_FAILURE;
                goto out;
        }
+       p_rss_params = vzalloc(sizeof(*p_rss_params));
+       if (p_rss_params == NULL) {
+               status = PFVF_STATUS_FAILURE;
+               goto out;
+       }
 
        memset(&params, 0, sizeof(params));
        params.opaque_fid = vf->opaque_fid;
@@ -2280,20 +2612,33 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
        qed_iov_vp_update_tx_switch(p_hwfn, &params, mbx, &tlvs_mask);
        qed_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
        qed_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
-       qed_iov_vp_update_rss_param(p_hwfn, vf, &params, &rss_params,
-                                   mbx, &tlvs_mask);
        qed_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask);
        qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, &params,
                                        &sge_tpa_params, mbx, &tlvs_mask);
 
-       /* Just log a message if there is no single extended tlv in buffer.
-        * When all features of vport update ramrod would be requested by VF
-        * as extended TLVs in buffer then an error can be returned in response
-        * if there is no extended TLV present in buffer.
+       tlvs_accepted = tlvs_mask;
+
+       /* Some of the extended TLVs need to be validated first; In that case,
+        * they can update the mask without updating the accepted [so that
+        * PF could communicate to VF it has rejected request].
         */
-       if (!tlvs_mask) {
-               DP_NOTICE(p_hwfn,
-                         "No feature tlvs found for vport update\n");
+       qed_iov_vp_update_rss_param(p_hwfn, vf, &params, p_rss_params,
+                                   mbx, &tlvs_mask, &tlvs_accepted);
+
+       if (qed_iov_pre_update_vport(p_hwfn, vf->relative_vf_id,
+                                    &params, &tlvs_accepted)) {
+               tlvs_accepted = 0;
+               status = PFVF_STATUS_NOT_SUPPORTED;
+               goto out;
+       }
+
+       if (!tlvs_accepted) {
+               if (tlvs_mask)
+                       DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                                  "Upper-layer prevents VF vport configuration\n");
+               else
+                       DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                                  "No feature tlvs found for vport update\n");
                status = PFVF_STATUS_NOT_SUPPORTED;
                goto out;
        }
@@ -2304,8 +2649,9 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
                status = PFVF_STATUS_FAILURE;
 
 out:
+       vfree(p_rss_params);
        length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
-                                                 tlvs_mask, tlvs_mask);
+                                                 tlvs_mask, tlvs_accepted);
        qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
 }
 
@@ -2340,7 +2686,7 @@ static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn,
        /* In forced mode, we're willing to remove entries - but we don't add
         * new ones.
         */
-       if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))
+       if (p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED))
                return 0;
 
        if (p_params->opcode == QED_FILTER_ADD ||
@@ -2374,7 +2720,7 @@ static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
        int i;
 
        /* If we're in forced-mode, we don't allow any change */
-       if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
+       if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))
                return 0;
 
        /* First remove entries and then add new ones */
@@ -2382,8 +2728,7 @@ static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
                for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
                        if (ether_addr_equal(p_vf->shadow_config.macs[i],
                                             p_params->mac)) {
-                               memset(p_vf->shadow_config.macs[i], 0,
-                                      ETH_ALEN);
+                               eth_zero_addr(p_vf->shadow_config.macs[i]);
                                break;
                        }
                }
@@ -2396,7 +2741,7 @@ static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
        } else if (p_params->opcode == QED_FILTER_REPLACE ||
                   p_params->opcode == QED_FILTER_FLUSH) {
                for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++)
-                       memset(p_vf->shadow_config.macs[i], 0, ETH_ALEN);
+                       eth_zero_addr(p_vf->shadow_config.macs[i]);
        }
 
        /* List the new MAC address */
@@ -2441,8 +2786,8 @@ qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn,
        return rc;
 }
 
-int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
-                     int vfid, struct qed_filter_ucast *params)
+static int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
+                            int vfid, struct qed_filter_ucast *params)
 {
        struct qed_public_vf_info *vf;
 
@@ -2509,7 +2854,7 @@ static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
        }
 
        /* Determine if the unicast filtering is acceptible by PF */
-       if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
+       if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) &&
            (params.type == QED_FILTER_VLAN ||
             params.type == QED_FILTER_MAC_VLAN)) {
                /* Once VLAN is forced or PVID is set, do not allow
@@ -2521,7 +2866,7 @@ static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
                goto out;
        }
 
-       if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) &&
+       if ((p_bulletin->valid_bitmap & BIT(MAC_ADDR_FORCED)) &&
            (params.type == QED_FILTER_MAC ||
             params.type == QED_FILTER_MAC_VLAN)) {
                if (!ether_addr_equal(p_bulletin->mac, params.mac) ||
@@ -2734,6 +3079,13 @@ qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
                        return rc;
                }
 
+               /* Workaround to make VF-PF channel ready, as FW
+                * doesn't do that as a part of FLR.
+                */
+               REG_WR(p_hwfn,
+                      GTT_BAR0_MAP_REG_USDM_RAM +
+                      USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
+
                /* VF_STOPPED has to be set only after final cleanup
                 * but prior to re-enabling the VF.
                 */
@@ -2749,17 +3101,17 @@ cleanup:
                /* Mark VF for ack and clean pending state */
                if (p_vf->state == VF_RESET)
                        p_vf->state = VF_STOPPED;
-               ack_vfs[vfid / 32] |= (1 << (vfid % 32));
+               ack_vfs[vfid / 32] |= BIT((vfid % 32));
                p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
                    ~(1ULL << (rel_vf_id % 64));
-               p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
-                   ~(1ULL << (rel_vf_id % 64));
+               p_vf->vf_mbx.b_pending_msg = false;
        }
 
        return rc;
 }
 
-int qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+static int
+qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
        u32 ack_vfs[VF_MAX_STATIC / 32];
        int rc = 0;
@@ -2780,9 +3132,10 @@ int qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
        return rc;
 }
 
-int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
+bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
 {
-       u16 i, found = 0;
+       bool found = false;
+       u16 i;
 
        DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n");
        for (i = 0; i < (VF_MAX_STATIC / 32); i++)
@@ -2792,7 +3145,7 @@ int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
 
        if (!p_hwfn->cdev->p_iov_info) {
                DP_NOTICE(p_hwfn, "VF flr but no IOV\n");
-               return 0;
+               return false;
        }
 
        /* Mark VFs */
@@ -2805,7 +3158,7 @@ int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
                        continue;
 
                vfid = p_vf->abs_vf_id;
-               if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
+               if (BIT((vfid % 32)) & p_disabled_vfs[vfid / 32]) {
                        u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
                        u16 rel_vf_id = p_vf->relative_vf_id;
 
@@ -2821,7 +3174,7 @@ int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
                         * VF flr until ACKs, we're safe.
                         */
                        p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
-                       found = 1;
+                       found = true;
                }
        }
 
@@ -2865,13 +3218,23 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
        mbx = &p_vf->vf_mbx;
 
        /* qed_iov_process_mbx_request */
-       DP_VERBOSE(p_hwfn, QED_MSG_IOV,
-                  "VF[%02x]: Processing mailbox message\n", p_vf->abs_vf_id);
+       if (!mbx->b_pending_msg) {
+               DP_NOTICE(p_hwfn,
+                         "VF[%02x]: Trying to process mailbox message when none is pending\n",
+                         p_vf->abs_vf_id);
+               return;
+       }
+       mbx->b_pending_msg = false;
 
        mbx->first_tlv = mbx->req_virt->first_tlv;
 
+       DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                  "VF[%02x]: Processing mailbox message [type %04x]\n",
+                  p_vf->abs_vf_id, mbx->first_tlv.tl.type);
+
        /* check if tlv type is known */
-       if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
+       if (qed_iov_tlv_supported(mbx->first_tlv.tl.type) &&
+           !p_vf->b_malicious) {
                switch (mbx->first_tlv.tl.type) {
                case CHANNEL_TLV_ACQUIRE:
                        qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
@@ -2913,6 +3276,15 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
                        qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
                        break;
                }
+       } else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
+               DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                          "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
+                          p_vf->abs_vf_id, mbx->first_tlv.tl.type);
+
+               qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
+                                    mbx->first_tlv.tl.type,
+                                    sizeof(struct pfvf_def_resp_tlv),
+                                    PFVF_STATUS_MALICIOUS);
        } else {
                /* unknown TLV - this may belong to a VF driver from the future
                 * - a version written after this PF driver was written, which
@@ -2946,36 +3318,45 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
        }
 }
 
-void qed_iov_pf_add_pending_events(struct qed_hwfn *p_hwfn, u8 vfid)
+void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events)
 {
-       u64 add_bit = 1ULL << (vfid % 64);
+       int i;
+
+       memset(events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
+
+       qed_for_each_vf(p_hwfn, i) {
+               struct qed_vf_info *p_vf;
 
-       p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
+               p_vf = &p_hwfn->pf_iov_info->vfs_array[i];
+               if (p_vf->vf_mbx.b_pending_msg)
+                       events[i / 64] |= 1ULL << (i % 64);
+       }
 }
 
-static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn *p_hwfn,
-                                                   u64 *events)
+static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn,
+                                                      u16 abs_vfid)
 {
-       u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
+       u8 min = (u8) p_hwfn->cdev->p_iov_info->first_vf_in_pf;
+
+       if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
+               DP_VERBOSE(p_hwfn,
+                          QED_MSG_IOV,
+                          "Got indication for VF [abs 0x%08x] that cannot be handled by PF\n",
+                          abs_vfid);
+               return NULL;
+       }
 
-       memcpy(events, p_pending_events, sizeof(u64) * QED_VF_ARRAY_LENGTH);
-       memset(p_pending_events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
+       return &p_hwfn->pf_iov_info->vfs_array[(u8) abs_vfid - min];
 }
 
 static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
                              u16 abs_vfid, struct regpair *vf_msg)
 {
-       u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf;
-       struct qed_vf_info *p_vf;
-
-       if (!qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min)) {
-               DP_VERBOSE(p_hwfn,
-                          QED_MSG_IOV,
-                          "Got a message from VF [abs 0x%08x] that cannot be handled by PF\n",
+       struct qed_vf_info *p_vf = qed_sriov_get_vf_from_absid(p_hwfn,
                           abs_vfid);
+
+       if (!p_vf)
                return 0;
-       }
-       p_vf = &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
 
        /* List the physical address of the request so that handler
         * could later on copy the message from it.
@@ -2983,12 +3364,35 @@ static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
        p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
 
        /* Mark the event and schedule the workqueue */
-       qed_iov_pf_add_pending_events(p_hwfn, p_vf->relative_vf_id);
+       p_vf->vf_mbx.b_pending_msg = true;
        qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
 
        return 0;
 }
 
+static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
+                                    struct malicious_vf_eqe_data *p_data)
+{
+       struct qed_vf_info *p_vf;
+
+       p_vf = qed_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);
+
+       if (!p_vf)
+               return;
+
+       if (!p_vf->b_malicious) {
+               DP_NOTICE(p_hwfn,
+                         "VF [%d] - Malicious behavior [%02x]\n",
+                         p_vf->abs_vf_id, p_data->err_id);
+
+               p_vf->b_malicious = true;
+       } else {
+               DP_INFO(p_hwfn,
+                       "VF [%d] - Malicious behavior [%02x]\n",
+                       p_vf->abs_vf_id, p_data->err_id);
+       }
+}
+
 int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
                        u8 opcode, __le16 echo, union event_ring_data *data)
 {
@@ -2996,6 +3400,9 @@ int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
        case COMMON_EVENT_VF_PF_CHANNEL:
                return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
                                          &data->vf_pf_channel.msg_addr);
+       case COMMON_EVENT_MALICIOUS_VF:
+               qed_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
+               return 0;
        default:
                DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
                        opcode);
@@ -3012,7 +3419,7 @@ u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
                goto out;
 
        for (i = rel_vf_id; i < p_iov->total_vfs; i++)
-               if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true))
+               if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
                        return i;
 
 out:
@@ -3059,19 +3466,24 @@ static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn,
                return;
        }
 
+       if (vf_info->b_malicious) {
+               DP_NOTICE(p_hwfn->cdev,
+                         "Can't set forced MAC to malicious VF [%d]\n", vfid);
+               return;
+       }
+
        feature = 1 << MAC_ADDR_FORCED;
        memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
 
        vf_info->bulletin.p_virt->valid_bitmap |= feature;
        /* Forced MAC will disable MAC_ADDR */
-       vf_info->bulletin.p_virt->valid_bitmap &=
-                               ~(1 << VFPF_BULLETIN_MAC_ADDR);
+       vf_info->bulletin.p_virt->valid_bitmap &= ~BIT(VFPF_BULLETIN_MAC_ADDR);
 
        qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
 }
 
-void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
-                                     u16 pvid, int vfid)
+static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
+                                            u16 pvid, int vfid)
 {
        struct qed_vf_info *vf_info;
        u64 feature;
@@ -3083,6 +3495,12 @@ void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
                return;
        }
 
+       if (vf_info->b_malicious) {
+               DP_NOTICE(p_hwfn->cdev,
+                         "Can't set forced vlan to malicious VF [%d]\n", vfid);
+               return;
+       }
+
        feature = 1 << VLAN_ADDR_FORCED;
        vf_info->bulletin.p_virt->pvid = pvid;
        if (pvid)
@@ -3104,7 +3522,7 @@ static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid)
        return !!p_vf_info->vport_instance;
 }
 
-bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
+static bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
 {
        struct qed_vf_info *p_vf_info;
 
@@ -3126,7 +3544,7 @@ static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid)
        return vf_info->spoof_chk;
 }
 
-int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val)
+static int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val)
 {
        struct qed_vf_info *vf;
        int rc = -EINVAL;
@@ -3163,13 +3581,14 @@ static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn,
        if (!p_vf || !p_vf->bulletin.p_virt)
                return NULL;
 
-       if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)))
+       if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)))
                return NULL;
 
        return p_vf->bulletin.p_virt->mac;
 }
 
-u16 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
+static u16
+qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
 {
        struct qed_vf_info *p_vf;
 
@@ -3177,7 +3596,7 @@ u16 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
        if (!p_vf || !p_vf->bulletin.p_virt)
                return 0;
 
-       if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
+       if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED)))
                return 0;
 
        return p_vf->bulletin.p_virt->pvid;
@@ -3201,7 +3620,8 @@ static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
        return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
 }
 
-int qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
+static int
+qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
 {
        struct qed_vf_info *vf;
        u8 vport_id;
@@ -3295,7 +3715,7 @@ int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
                qed_for_each_vf(hwfn, j) {
                        int k;
 
-                       if (!qed_iov_is_valid_vfid(hwfn, j, true))
+                       if (!qed_iov_is_valid_vfid(hwfn, j, true, false))
                                continue;
 
                        /* Wait until VF is disabled before releasing */
@@ -3322,9 +3742,28 @@ int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
        return 0;
 }
 
+static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn,
+                                       u16 vfid,
+                                       struct qed_iov_vf_init_params *params)
+{
+       u16 base, i;
+
+       /* Since we have an equal resource distribution per-VF, and we assume
+        * PF has acquired the QED_PF_L2_QUE first queues, we start setting
+        * sequentially from there.
+        */
+       base = FEAT_NUM(hwfn, QED_PF_L2_QUE) + vfid * params->num_queues;
+
+       params->rel_vf_id = vfid;
+       for (i = 0; i < params->num_queues; i++) {
+               params->req_rx_queue[i] = base + i;
+               params->req_tx_queue[i] = base + i;
+       }
+}
+
 static int qed_sriov_enable(struct qed_dev *cdev, int num)
 {
-       struct qed_sb_cnt_info sb_cnt_info;
+       struct qed_iov_vf_init_params params;
        int i, j, rc;
 
        if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
@@ -3333,11 +3772,17 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num)
                return -EINVAL;
        }
 
+       memset(&params, 0, sizeof(params));
+
        /* Initialize HW for VF access */
        for_each_hwfn(cdev, j) {
                struct qed_hwfn *hwfn = &cdev->hwfns[j];
                struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
-               int num_sbs = 0, limit = 16;
+
+               /* Make sure not to use more than 16 queues per VF */
+               params.num_queues = min_t(int,
+                                         FEAT_NUM(hwfn, QED_VF_L2_QUE) / num,
+                                         16);
 
                if (!ptt) {
                        DP_ERR(hwfn, "Failed to acquire ptt\n");
@@ -3345,19 +3790,12 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num)
                        goto err;
                }
 
-               if (IS_MF_DEFAULT(hwfn))
-                       limit = MAX_NUM_VFS_BB / hwfn->num_funcs_on_engine;
-
-               memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
-               qed_int_get_num_sbs(hwfn, &sb_cnt_info);
-               num_sbs = min_t(int, sb_cnt_info.sb_free_blk, limit);
-
                for (i = 0; i < num; i++) {
-                       if (!qed_iov_is_valid_vfid(hwfn, i, false))
+                       if (!qed_iov_is_valid_vfid(hwfn, i, false, true))
                                continue;
 
-                       rc = qed_iov_init_hw_for_vf(hwfn,
-                                                   ptt, i, num_sbs / num);
+                       qed_sriov_enable_qid_config(hwfn, i, &params);
+                       rc = qed_iov_init_hw_for_vf(hwfn, ptt, &params);
                        if (rc) {
                                DP_ERR(cdev, "Failed to enable VF[%d]\n", i);
                                qed_ptt_release(hwfn, ptt);
@@ -3405,7 +3843,7 @@ static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid)
                return -EINVAL;
        }
 
-       if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true)) {
+       if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) {
                DP_VERBOSE(cdev, QED_MSG_IOV,
                           "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
                return -EINVAL;
@@ -3437,7 +3875,7 @@ static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid)
                return -EINVAL;
        }
 
-       if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true)) {
+       if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) {
                DP_VERBOSE(cdev, QED_MSG_IOV,
                           "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
                return -EINVAL;
@@ -3471,7 +3909,7 @@ static int qed_get_vf_config(struct qed_dev *cdev,
        if (IS_VF(cdev))
                return -EINVAL;
 
-       if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true)) {
+       if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, false)) {
                DP_VERBOSE(cdev, QED_MSG_IOV,
                           "VF index [%d] isn't active\n", vf_id);
                return -EINVAL;
@@ -3501,6 +3939,7 @@ static int qed_get_vf_config(struct qed_dev *cdev,
 
 void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
 {
+       struct qed_hwfn *lead_hwfn = QED_LEADING_HWFN(hwfn->cdev);
        struct qed_mcp_link_capabilities caps;
        struct qed_mcp_link_params params;
        struct qed_mcp_link_state link;
@@ -3517,9 +3956,15 @@ void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
                if (!vf_info)
                        continue;
 
-               memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
-               memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
-               memcpy(&caps, qed_mcp_get_link_capabilities(hwfn),
+               /* Only hwfn0 is actually interested in the link speed.
+                * But since only it would receive an MFW indication of link,
+                * need to take configuration from it - otherwise things like
+                * rate limiting for hwfn1 VF would not work.
+                */
+               memcpy(&params, qed_mcp_get_link_params(lead_hwfn),
+                      sizeof(params));
+               memcpy(&link, qed_mcp_get_link_state(lead_hwfn), sizeof(link));
+               memcpy(&caps, qed_mcp_get_link_capabilities(lead_hwfn),
                       sizeof(caps));
 
                /* Modify link according to the VF's configured link state */
@@ -3575,7 +4020,7 @@ static int qed_set_vf_link_state(struct qed_dev *cdev,
        if (IS_VF(cdev))
                return -EINVAL;
 
-       if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true)) {
+       if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, true)) {
                DP_VERBOSE(cdev, QED_MSG_IOV,
                           "VF index [%d] isn't active\n", vf_id);
                return -EINVAL;
@@ -3656,6 +4101,32 @@ static int qed_set_vf_rate(struct qed_dev *cdev,
        return 0;
 }
 
+static int qed_set_vf_trust(struct qed_dev *cdev, int vfid, bool trust)
+{
+       int i;
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *hwfn = &cdev->hwfns[i];
+               struct qed_public_vf_info *vf;
+
+               if (!qed_iov_pf_sanity_check(hwfn, vfid)) {
+                       DP_NOTICE(hwfn,
+                                 "SR-IOV sanity check failed, can't set trust\n");
+                       return -EINVAL;
+               }
+
+               vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
+
+               if (vf->is_trusted_request == trust)
+                       return 0;
+               vf->is_trusted_request = trust;
+
+               qed_schedule_iov(hwfn, QED_IOV_WQ_TRUST_FLAG);
+       }
+
+       return 0;
+}
+
 static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
 {
        u64 events[QED_VF_ARRAY_LENGTH];
@@ -3670,7 +4141,7 @@ static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
                return;
        }
 
-       qed_iov_pf_get_and_clear_pending_events(hwfn, events);
+       qed_iov_pf_get_pending_events(hwfn, events);
 
        DP_VERBOSE(hwfn, QED_MSG_IOV,
                   "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
@@ -3760,7 +4231,63 @@ static void qed_handle_bulletin_post(struct qed_hwfn *hwfn)
        qed_ptt_release(hwfn, ptt);
 }
 
-void qed_iov_pf_task(struct work_struct *work)
+static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
+{
+       struct qed_sp_vport_update_params params;
+       struct qed_filter_accept_flags *flags;
+       struct qed_public_vf_info *vf_info;
+       struct qed_vf_info *vf;
+       u8 mask;
+       int i;
+
+       mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED;
+       flags = &params.accept_flags;
+
+       qed_for_each_vf(hwfn, i) {
+               /* Need to make sure current requested configuration didn't
+                * flip so that we'll end up configuring something that's not
+                * needed.
+                */
+               vf_info = qed_iov_get_public_vf_info(hwfn, i, true);
+               if (vf_info->is_trusted_configured ==
+                   vf_info->is_trusted_request)
+                       continue;
+               vf_info->is_trusted_configured = vf_info->is_trusted_request;
+
+               /* Validate that the VF has a configured vport */
+               vf = qed_iov_get_vf_info(hwfn, i, true);
+               if (!vf->vport_instance)
+                       continue;
+
+               memset(&params, 0, sizeof(params));
+               params.opaque_fid = vf->opaque_fid;
+               params.vport_id = vf->vport_id;
+
+               if (vf_info->rx_accept_mode & mask) {
+                       flags->update_rx_mode_config = 1;
+                       flags->rx_accept_filter = vf_info->rx_accept_mode;
+               }
+
+               if (vf_info->tx_accept_mode & mask) {
+                       flags->update_tx_mode_config = 1;
+                       flags->tx_accept_filter = vf_info->tx_accept_mode;
+               }
+
+               /* Remove if needed; Otherwise this would set the mask */
+               if (!vf_info->is_trusted_configured) {
+                       flags->rx_accept_filter &= ~mask;
+                       flags->tx_accept_filter &= ~mask;
+               }
+
+               if (flags->update_rx_mode_config ||
+                   flags->update_tx_mode_config)
+                       qed_sp_vport_update(hwfn, &params,
+                                           QED_SPQ_MODE_EBLOCK, NULL);
+       }
+}
+
+static void qed_iov_pf_task(struct work_struct *work)
+
 {
        struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
                                             iov_task.work);
@@ -3794,6 +4321,9 @@ void qed_iov_pf_task(struct work_struct *work)
        if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
                               &hwfn->iov_task_flags))
                qed_handle_bulletin_post(hwfn);
+
+       if (test_and_clear_bit(QED_IOV_WQ_TRUST_FLAG, &hwfn->iov_task_flags))
+               qed_iov_handle_trust_change(hwfn);
 }
 
 void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
@@ -3856,4 +4386,5 @@ const struct qed_iov_hv_ops qed_iov_ops_pass = {
        .set_link_state = &qed_set_vf_link_state,
        .set_spoof = &qed_spoof_configure,
        .set_rate = &qed_set_vf_rate,
+       .set_trust = &qed_set_vf_trust,
 };
index 0dd23e409b3ff500bab9da2c311555e3dcd1b557..8e96b1d1930847fc3b03e1518da1a205f09b6291 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef _QED_SRIOV_H
@@ -56,6 +80,31 @@ struct qed_public_vf_info {
 
        /* Currently configured Tx rate in MB/sec. 0 if unconfigured */
        int tx_rate;
+
+       /* Trusted VFs can configure promiscuous mode.
+        * Also store shadow promisc configuration if needed.
+        */
+       bool is_trusted_configured;
+       bool is_trusted_request;
+       u8 rx_accept_mode;
+       u8 tx_accept_mode;
+};
+
+struct qed_iov_vf_init_params {
+       u16 rel_vf_id;
+
+       /* Number of requested Queues; Currently, don't support different
+        * number of Rx/Tx queues.
+        */
+
+       u16 num_queues;
+
+       /* Allow the client to choose which qzones to use for Rx/Tx,
+        * and which queue_base to use for Tx queues on a per-queue basis.
+        * Notice values should be relative to the PF resources.
+        */
+       u16 req_rx_queue[QED_MAX_VF_CHAINS_PER_PF];
+       u16 req_tx_queue[QED_MAX_VF_CHAINS_PER_PF];
 };
 
 /* This struct is part of qed_dev and contains data relevant to all hwfns;
@@ -91,6 +140,9 @@ struct qed_iov_vf_mbx {
        /* Address in VF where a pending message is located */
        dma_addr_t pending_req;
 
+       /* Message from VF awaits handling */
+       bool b_pending_msg;
+
        u8 *offset;
 
        /* saved VF request header */
@@ -99,10 +151,10 @@ struct qed_iov_vf_mbx {
 
 struct qed_vf_q_info {
        u16 fw_rx_qid;
+       struct qed_queue_cid *p_rx_cid;
        u16 fw_tx_qid;
+       struct qed_queue_cid *p_tx_cid;
        u8 fw_cid;
-       u8 rxq_active;
-       u8 txq_active;
 };
 
 enum vf_state {
@@ -132,6 +184,7 @@ struct qed_vf_info {
        struct qed_iov_vf_mbx vf_mbx;
        enum vf_state state;
        bool b_init;
+       bool b_malicious;
        u8 to_disable;
 
        struct qed_bulletin bulletin;
@@ -182,7 +235,6 @@ struct qed_vf_info {
  */
 struct qed_pf_iov {
        struct qed_vf_info vfs_array[MAX_NUM_VFS];
-       u64 pending_events[QED_VF_ARRAY_LENGTH];
        u64 pending_flr[QED_VF_ARRAY_LENGTH];
 
        /* Allocate message address continuosuly and split to each VF */
@@ -203,6 +255,8 @@ enum qed_iov_wq_flag {
        QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
        QED_IOV_WQ_STOP_WQ_FLAG,
        QED_IOV_WQ_FLR_FLAG,
+       QED_IOV_WQ_TRUST_FLAG,
+       QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG,
 };
 
 #ifdef CONFIG_QED_SRIOV
@@ -294,9 +348,9 @@ int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
  * @param p_hwfn
  * @param disabled_vfs - bitmask of all VFs on path that were FLRed
  *
- * @return 1 iff one of the PF's vfs got FLRed. 0 otherwise.
+ * @return true iff one of the PF's vfs got FLRed. false otherwise.
  */
-int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs);
+bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs);
 
 /**
  * @brief Search extended TLVs in request/reply buffer.
@@ -353,10 +407,10 @@ static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
        return -EINVAL;
 }
 
-static inline int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn,
-                                     u32 *disabled_vfs)
+static inline bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn,
+                                      u32 *disabled_vfs)
 {
-       return 0;
+       return false;
 }
 
 static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
index 9b780b31b15c84c92c754d58e9a7ebd7d6eead53..798786562b1bbc266f9da05132cd37f39a5b2cdf 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #include <linux/crc32.h>
@@ -46,6 +70,17 @@ static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length)
        return p_tlv;
 }
 
+static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status)
+{
+       union pfvf_tlvs *resp = p_hwfn->vf_iov_info->pf2vf_reply;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                  "VF request status = 0x%x, PF reply status = 0x%x\n",
+                  req_status, resp->default_resp.hdr.status);
+
+       mutex_unlock(&(p_hwfn->vf_iov_info->mutex));
+}
+
 static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
 {
        union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
@@ -99,20 +134,22 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
        }
 
        if (!*done) {
-               DP_VERBOSE(p_hwfn, QED_MSG_IOV,
-                          "VF <-- PF Timeout [Type %d]\n",
-                          p_req->first_tlv.tl.type);
+               DP_NOTICE(p_hwfn,
+                         "VF <-- PF Timeout [Type %d]\n",
+                         p_req->first_tlv.tl.type);
                rc = -EBUSY;
-               goto exit;
        } else {
-               DP_VERBOSE(p_hwfn, QED_MSG_IOV,
-                          "PF response: %d [Type %d]\n",
-                          *done, p_req->first_tlv.tl.type);
+               if ((*done != PFVF_STATUS_SUCCESS) &&
+                   (*done != PFVF_STATUS_NO_RESOURCE))
+                       DP_NOTICE(p_hwfn,
+                                 "PF response: %d [Type %d]\n",
+                                 *done, p_req->first_tlv.tl.type);
+               else
+                       DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                                  "PF response: %d [Type %d]\n",
+                                  *done, p_req->first_tlv.tl.type);
        }
 
-exit:
-       mutex_unlock(&(p_hwfn->vf_iov_info->mutex));
-
        return rc;
 }
 
@@ -191,6 +228,9 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
                DP_VERBOSE(p_hwfn,
                           QED_MSG_IOV, "attempting to acquire resources\n");
 
+               /* Clear response buffer, as this might be a re-send */
+               memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
+
                /* send acquire request */
                rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
                if (rc)
@@ -205,9 +245,12 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
                        /* PF agrees to allocate our resources */
                        if (!(resp->pfdev_info.capabilities &
                              PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) {
-                               DP_INFO(p_hwfn,
-                                       "PF is using old incompatible driver; Either downgrade driver or request provider to update hypervisor version\n");
-                               return -EINVAL;
+                               /* It's possible legacy PF mistakenly accepted;
+                                * but we don't care - simply mark it as
+                                * legacy and continue.
+                                */
+                               req->vfdev_info.capabilities |=
+                                   VFPF_ACQUIRE_CAP_PRE_FP_HSI;
                        }
                        DP_VERBOSE(p_hwfn, QED_MSG_IOV, "resources acquired\n");
                        resources_acquired = true;
@@ -215,27 +258,55 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
                           attempts < VF_ACQUIRE_THRESH) {
                        qed_vf_pf_acquire_reduce_resc(p_hwfn, p_resc,
                                                      &resp->resc);
+               } else if (resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) {
+                       if (pfdev_info->major_fp_hsi &&
+                           (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) {
+                               DP_NOTICE(p_hwfn,
+                                         "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n",
+                                         pfdev_info->major_fp_hsi,
+                                         pfdev_info->minor_fp_hsi,
+                                         ETH_HSI_VER_MAJOR,
+                                         ETH_HSI_VER_MINOR,
+                                         pfdev_info->major_fp_hsi);
+                               rc = -EINVAL;
+                               goto exit;
+                       }
 
-                       /* Clear response buffer */
-                       memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
-               } else if ((resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) &&
-                          pfdev_info->major_fp_hsi &&
-                          (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) {
-                       DP_NOTICE(p_hwfn,
-                                 "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n",
-                                 pfdev_info->major_fp_hsi,
-                                 pfdev_info->minor_fp_hsi,
-                                 ETH_HSI_VER_MAJOR,
-                                 ETH_HSI_VER_MINOR, pfdev_info->major_fp_hsi);
-                       return -EINVAL;
+                       if (!pfdev_info->major_fp_hsi) {
+                               if (req->vfdev_info.capabilities &
+                                   VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
+                                       DP_NOTICE(p_hwfn,
+                                                 "PF uses very old drivers. Please change to a VF driver using no later than 8.8.x.x.\n");
+                                       rc = -EINVAL;
+                                       goto exit;
+                               } else {
+                                       DP_INFO(p_hwfn,
+                                               "PF is old - try re-acquire to see if it supports FW-version override\n");
+                                       req->vfdev_info.capabilities |=
+                                           VFPF_ACQUIRE_CAP_PRE_FP_HSI;
+                                       continue;
+                               }
+                       }
+
+                       /* If PF/VF are using same Major, PF must have had
+                        * it's reasons. Simply fail.
+                        */
+                       DP_NOTICE(p_hwfn, "PF rejected acquisition by VF\n");
+                       rc = -EINVAL;
+                       goto exit;
                } else {
                        DP_ERR(p_hwfn,
                               "PF returned error %d to VF acquisition request\n",
                               resp->hdr.status);
-                       return -EAGAIN;
+                       rc = -EAGAIN;
+                       goto exit;
                }
        }
 
+       /* Mark the PF as legacy, if needed */
+       if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_PRE_FP_HSI)
+               p_iov->b_pre_fp_hsi = true;
+
        /* Update bulletin board size with response from PF */
        p_iov->bulletin.size = resp->bulletin_size;
 
@@ -253,14 +324,18 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
                }
        }
 
-       if (ETH_HSI_VER_MINOR &&
+       if (!p_iov->b_pre_fp_hsi &&
+           ETH_HSI_VER_MINOR &&
            (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) {
                DP_INFO(p_hwfn,
                        "PF is using older fastpath HSI; %02x.%02x is configured\n",
                        ETH_HSI_VER_MAJOR, resp->pfdev_info.minor_fp_hsi);
        }
 
-       return 0;
+exit:
+       qed_vf_pf_req_end(p_hwfn, rc);
+
+       return rc;
 }
 
 int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
@@ -286,31 +361,23 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
 
        /* Allocate vf sriov info */
        p_iov = kzalloc(sizeof(*p_iov), GFP_KERNEL);
-       if (!p_iov) {
-               DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n");
+       if (!p_iov)
                return -ENOMEM;
-       }
 
        /* Allocate vf2pf msg */
        p_iov->vf2pf_request = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
                                                  sizeof(union vfpf_tlvs),
                                                  &p_iov->vf2pf_request_phys,
                                                  GFP_KERNEL);
-       if (!p_iov->vf2pf_request) {
-               DP_NOTICE(p_hwfn,
-                         "Failed to allocate `vf2pf_request' DMA memory\n");
+       if (!p_iov->vf2pf_request)
                goto free_p_iov;
-       }
 
        p_iov->pf2vf_reply = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
                                                sizeof(union pfvf_tlvs),
                                                &p_iov->pf2vf_reply_phys,
                                                GFP_KERNEL);
-       if (!p_iov->pf2vf_reply) {
-               DP_NOTICE(p_hwfn,
-                         "Failed to allocate `pf2vf_reply' DMA memory\n");
+       if (!p_iov->pf2vf_reply)
                goto free_vf2pf_request;
-       }
 
        DP_VERBOSE(p_hwfn,
                   QED_MSG_IOV,
@@ -347,19 +414,22 @@ free_p_iov:
 
        return -ENOMEM;
 }
-
-int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
-                       u8 rx_qid,
-                       u16 sb,
-                       u8 sb_index,
-                       u16 bd_max_bytes,
-                       dma_addr_t bd_chain_phys_addr,
-                       dma_addr_t cqe_pbl_addr,
-                       u16 cqe_pbl_size, void __iomem **pp_prod)
+#define TSTORM_QZONE_START   PXP_VF_BAR0_START_SDM_ZONE_A
+#define MSTORM_QZONE_START(dev)   (TSTORM_QZONE_START +        \
+                                  (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
+
+int
+qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
+                   struct qed_queue_cid *p_cid,
+                   u16 bd_max_bytes,
+                   dma_addr_t bd_chain_phys_addr,
+                   dma_addr_t cqe_pbl_addr,
+                   u16 cqe_pbl_size, void __iomem **pp_prod)
 {
        struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
        struct pfvf_start_queue_resp_tlv *resp;
        struct vfpf_start_rxq_tlv *req;
+       u8 rx_qid = p_cid->rel.queue_id;
        int rc;
 
        /* clear mailbox and prep first tlv */
@@ -369,11 +439,27 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
        req->cqe_pbl_addr = cqe_pbl_addr;
        req->cqe_pbl_size = cqe_pbl_size;
        req->rxq_addr = bd_chain_phys_addr;
-       req->hw_sb = sb;
-       req->sb_index = sb_index;
+       req->hw_sb = p_cid->rel.sb;
+       req->sb_index = p_cid->rel.sb_idx;
        req->bd_max_bytes = bd_max_bytes;
        req->stat_id = -1;
 
+       /* If PF is legacy, we'll need to calculate producers ourselves
+        * as well as clean them.
+        */
+       if (p_iov->b_pre_fp_hsi) {
+               u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid];
+               u32 init_prod_val = 0;
+
+               *pp_prod = (u8 __iomem *)
+                   p_hwfn->regview +
+                   MSTORM_QZONE_START(p_hwfn->cdev) +
+                   hw_qid * MSTORM_QZONE_SIZE;
+
+               /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
+               __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
+                                 (u32 *)(&init_prod_val));
+       }
        /* add list termination tlv */
        qed_add_tlv(p_hwfn, &p_iov->offset,
                    CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
@@ -381,13 +467,15 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
        resp = &p_iov->pf2vf_reply->queue_start;
        rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
        if (rc)
-               return rc;
+               goto exit;
 
-       if (resp->hdr.status != PFVF_STATUS_SUCCESS)
-               return -EINVAL;
+       if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+               rc = -EINVAL;
+               goto exit;
+       }
 
        /* Learn the address of the producer from the response */
-       if (pp_prod) {
+       if (!p_iov->b_pre_fp_hsi) {
                u32 init_prod_val = 0;
 
                *pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset;
@@ -399,11 +487,14 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
                __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
                                  (u32 *)&init_prod_val);
        }
+exit:
+       qed_vf_pf_req_end(p_hwfn, rc);
 
        return rc;
 }
 
-int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, u16 rx_qid, bool cqe_completion)
+int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
+                      struct qed_queue_cid *p_cid, bool cqe_completion)
 {
        struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
        struct vfpf_stop_rxqs_tlv *req;
@@ -413,7 +504,7 @@ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, u16 rx_qid, bool cqe_completion)
        /* clear mailbox and prep first tlv */
        req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req));
 
-       req->rx_qid = rx_qid;
+       req->rx_qid = p_cid->rel.queue_id;
        req->num_rxqs = 1;
        req->cqe_completion = cqe_completion;
 
@@ -424,36 +515,41 @@ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, u16 rx_qid, bool cqe_completion)
        resp = &p_iov->pf2vf_reply->default_resp;
        rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
        if (rc)
-               return rc;
+               goto exit;
+
+       if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+               rc = -EINVAL;
+               goto exit;
+       }
 
-       if (resp->hdr.status != PFVF_STATUS_SUCCESS)
-               return -EINVAL;
+exit:
+       qed_vf_pf_req_end(p_hwfn, rc);
 
        return rc;
 }
 
-int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
-                       u16 tx_queue_id,
-                       u16 sb,
-                       u8 sb_index,
-                       dma_addr_t pbl_addr,
-                       u16 pbl_size, void __iomem **pp_doorbell)
+int
+qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
+                   struct qed_queue_cid *p_cid,
+                   dma_addr_t pbl_addr,
+                   u16 pbl_size, void __iomem **pp_doorbell)
 {
        struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
        struct pfvf_start_queue_resp_tlv *resp;
        struct vfpf_start_txq_tlv *req;
+       u16 qid = p_cid->rel.queue_id;
        int rc;
 
        /* clear mailbox and prep first tlv */
        req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req));
 
-       req->tx_qid = tx_queue_id;
+       req->tx_qid = qid;
 
        /* Tx */
        req->pbl_addr = pbl_addr;
        req->pbl_size = pbl_size;
-       req->hw_sb = sb;
-       req->sb_index = sb_index;
+       req->hw_sb = p_cid->rel.sb;
+       req->sb_index = p_cid->rel.sb_idx;
 
        /* add list termination tlv */
        qed_add_tlv(p_hwfn, &p_iov->offset,
@@ -469,19 +565,29 @@ int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
                goto exit;
        }
 
-       if (pp_doorbell) {
+       /* Modern PFs provide the actual offsets, while legacy
+        * provided only the queue id.
+        */
+       if (!p_iov->b_pre_fp_hsi) {
                *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + resp->offset;
+       } else {
+               u8 cid = p_iov->acquire_resp.resc.cid[qid];
 
-               DP_VERBOSE(p_hwfn, QED_MSG_IOV,
-                          "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
-                          tx_queue_id, *pp_doorbell, resp->offset);
+               *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
+                                            qed_db_addr_vf(cid,
+                                                           DQ_DEMS_LEGACY);
        }
+
+       DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                  "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
+                  qid, *pp_doorbell, resp->offset);
 exit:
+       qed_vf_pf_req_end(p_hwfn, rc);
 
        return rc;
 }
 
-int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid)
+int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid)
 {
        struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
        struct vfpf_stop_txqs_tlv *req;
@@ -491,7 +597,7 @@ int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid)
        /* clear mailbox and prep first tlv */
        req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req));
 
-       req->tx_qid = tx_qid;
+       req->tx_qid = p_cid->rel.queue_id;
        req->num_txqs = 1;
 
        /* add list termination tlv */
@@ -501,10 +607,15 @@ int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid)
        resp = &p_iov->pf2vf_reply->default_resp;
        rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
        if (rc)
-               return rc;
+               goto exit;
 
-       if (resp->hdr.status != PFVF_STATUS_SUCCESS)
-               return -EINVAL;
+       if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+               rc = -EINVAL;
+               goto exit;
+       }
+
+exit:
+       qed_vf_pf_req_end(p_hwfn, rc);
 
        return rc;
 }
@@ -543,10 +654,15 @@ int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
        resp = &p_iov->pf2vf_reply->default_resp;
        rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
        if (rc)
-               return rc;
+               goto exit;
+
+       if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+               rc = -EINVAL;
+               goto exit;
+       }
 
-       if (resp->hdr.status != PFVF_STATUS_SUCCESS)
-               return -EINVAL;
+exit:
+       qed_vf_pf_req_end(p_hwfn, rc);
 
        return rc;
 }
@@ -567,10 +683,15 @@ int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
 
        rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
        if (rc)
-               return rc;
+               goto exit;
+
+       if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+               rc = -EINVAL;
+               goto exit;
+       }
 
-       if (resp->hdr.status != PFVF_STATUS_SUCCESS)
-               return -EINVAL;
+exit:
+       qed_vf_pf_req_end(p_hwfn, rc);
 
        return rc;
 }
@@ -723,6 +844,7 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
        if (p_params->rss_params) {
                struct qed_rss_params *rss_params = p_params->rss_params;
                struct vfpf_vport_update_rss_tlv *p_rss_tlv;
+               int i, table_size;
 
                size = sizeof(struct vfpf_vport_update_rss_tlv);
                p_rss_tlv = qed_add_tlv(p_hwfn,
@@ -745,8 +867,15 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
                p_rss_tlv->rss_enable = rss_params->rss_enable;
                p_rss_tlv->rss_caps = rss_params->rss_caps;
                p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log;
-               memcpy(p_rss_tlv->rss_ind_table, rss_params->rss_ind_table,
-                      sizeof(rss_params->rss_ind_table));
+
+               table_size = min_t(int, T_ETH_INDIRECTION_TABLE_SIZE,
+                                  1 << p_rss_tlv->rss_table_size_log);
+               for (i = 0; i < table_size; i++) {
+                       struct qed_queue_cid *p_queue;
+
+                       p_queue = rss_params->rss_ind_table[i];
+                       p_rss_tlv->rss_ind_table[i] = p_queue->rel.queue_id;
+               }
                memcpy(p_rss_tlv->rss_key, rss_params->rss_key,
                       sizeof(rss_params->rss_key));
        }
@@ -770,13 +899,18 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
 
        rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size);
        if (rc)
-               return rc;
+               goto exit;
 
-       if (resp->hdr.status != PFVF_STATUS_SUCCESS)
-               return -EINVAL;
+       if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+               rc = -EINVAL;
+               goto exit;
+       }
 
        qed_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params);
 
+exit:
+       qed_vf_pf_req_end(p_hwfn, rc);
+
        return rc;
 }
 
@@ -797,14 +931,19 @@ int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
        resp = &p_iov->pf2vf_reply->default_resp;
        rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
        if (rc)
-               return rc;
+               goto exit;
 
-       if (resp->hdr.status != PFVF_STATUS_SUCCESS)
-               return -EAGAIN;
+       if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+               rc = -EAGAIN;
+               goto exit;
+       }
 
        p_hwfn->b_int_enabled = 0;
 
-       return 0;
+exit:
+       qed_vf_pf_req_end(p_hwfn, rc);
+
+       return rc;
 }
 
 int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
@@ -828,6 +967,8 @@ int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
        if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS)
                rc = -EAGAIN;
 
+       qed_vf_pf_req_end(p_hwfn, rc);
+
        p_hwfn->b_int_enabled = 0;
 
        if (p_iov->vf2pf_request)
@@ -896,12 +1037,17 @@ int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
        resp = &p_iov->pf2vf_reply->default_resp;
        rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
        if (rc)
-               return rc;
+               goto exit;
 
-       if (resp->hdr.status != PFVF_STATUS_SUCCESS)
-               return -EAGAIN;
+       if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+               rc = -EAGAIN;
+               goto exit;
+       }
 
-       return 0;
+exit:
+       qed_vf_pf_req_end(p_hwfn, rc);
+
+       return rc;
 }
 
 int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
@@ -920,12 +1066,17 @@ int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
 
        rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
        if (rc)
-               return rc;
+               goto exit;
+
+       if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+               rc = -EINVAL;
+               goto exit;
+       }
 
-       if (resp->hdr.status != PFVF_STATUS_SUCCESS)
-               return -EINVAL;
+exit:
+       qed_vf_pf_req_end(p_hwfn, rc);
 
-       return 0;
+       return rc;
 }
 
 u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
@@ -1056,6 +1207,13 @@ void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, u8 *num_vlan_filters)
        *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters;
 }
 
+void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters)
+{
+       struct qed_vf_iov *p_vf = p_hwfn->vf_iov_info;
+
+       *num_mac_filters = p_vf->acquire_resp.resc.num_mac_filters;
+}
+
 bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
 {
        struct qed_bulletin_content *bulletin;
@@ -1071,8 +1229,8 @@ bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
        return false;
 }
 
-bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn,
-                                   u8 *dst_mac, u8 *p_is_forced)
+static bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn,
+                                          u8 *dst_mac, u8 *p_is_forced)
 {
        struct qed_bulletin_content *bulletin;
 
@@ -1115,8 +1273,8 @@ static void qed_handle_bulletin_change(struct qed_hwfn *hwfn)
 
        is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac,
                                                      &is_mac_forced);
-       if (is_mac_exist && is_mac_forced && cookie)
-               ops->force_mac(cookie, mac);
+       if (is_mac_exist && cookie)
+               ops->force_mac(cookie, mac, !!is_mac_forced);
 
        /* Always update link configuration according to bulletin */
        qed_link_update(hwfn);
@@ -1133,6 +1291,9 @@ void qed_iov_vf_task(struct work_struct *work)
 
        /* Handle bulletin board changes */
        qed_vf_read_bulletin(hwfn, &change);
+       if (test_and_clear_bit(QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG,
+                              &hwfn->iov_task_flags))
+               change = 1;
        if (change)
                qed_handle_bulletin_change(hwfn);
 
index b23ce58e932fbc73fef1df1e754f0ac73b111e3c..105c0edd2a01eec6a2f6af0547551265838bd53f 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef _QED_VF_H
@@ -40,6 +64,7 @@ enum {
        PFVF_STATUS_NOT_SUPPORTED,
        PFVF_STATUS_NO_RESOURCE,
        PFVF_STATUS_FORCED,
+       PFVF_STATUS_MALICIOUS,
 };
 
 /* vf pf channel tlvs */
@@ -86,7 +111,7 @@ struct vfpf_acquire_tlv {
        struct vfpf_first_tlv first_tlv;
 
        struct vf_pf_vfdev_info {
-#define VFPF_ACQUIRE_CAP_OBSOLETE      (1 << 0)
+#define VFPF_ACQUIRE_CAP_PRE_FP_HSI     (1 << 0) /* VF pre-FP hsi version */
 #define VFPF_ACQUIRE_CAP_100G          (1 << 1) /* VF can support 100g */
                u64 capabilities;
                u8 fw_major;
@@ -250,6 +275,8 @@ struct vfpf_stop_rxqs_tlv {
        struct vfpf_first_tlv first_tlv;
 
        u16 rx_qid;
+
+       /* this field is deprecated and should *always* be set to '1' */
        u8 num_rxqs;
        u8 cqe_completion;
        u8 padding[4];
@@ -260,6 +287,8 @@ struct vfpf_stop_txqs_tlv {
        struct vfpf_first_tlv first_tlv;
 
        u16 tx_qid;
+
+       /* this field is deprecated and should *always* be set to '1' */
        u8 num_txqs;
        u8 padding[5];
 };
@@ -551,6 +580,11 @@ struct qed_vf_iov {
 
        /* we set aside a copy of the acquire response */
        struct pfvf_acquire_resp_tlv acquire_resp;
+
+       /* In case PF originates prior to the fp-hsi version comparison,
+        * this has to be propagated as it affects the fastpath.
+        */
+       bool b_pre_fp_hsi;
 };
 
 #ifdef CONFIG_QED_SRIOV
@@ -616,6 +650,14 @@ void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac);
 void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
                                 u8 *num_vlan_filters);
 
+/**
+ * @brief Get number of MAC filters allocated for VF by qed
+ *
+ *  @param p_hwfn
+ *  @param num_rxqs - allocated MAC filters
+ */
+void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters);
+
 /**
  * @brief Check if VF can set a MAC address
  *
@@ -652,10 +694,7 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn);
 /**
  * @brief VF - start the RX Queue by sending a message to the PF
  * @param p_hwfn
- * @param cid                   - zero based within the VF
- * @param rx_queue_id           - zero based within the VF
- * @param sb                    - VF status block for this queue
- * @param sb_index              - Index within the status block
+ * @param p_cid                        - Only relative fields are relevant
  * @param bd_max_bytes          - maximum number of bytes per bd
  * @param bd_chain_phys_addr    - physical address of bd chain
  * @param cqe_pbl_addr          - physical address of pbl
@@ -666,9 +705,7 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn);
  * @return int
  */
 int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
-                       u8 rx_queue_id,
-                       u16 sb,
-                       u8 sb_index,
+                       struct qed_queue_cid *p_cid,
                        u16 bd_max_bytes,
                        dma_addr_t bd_chain_phys_addr,
                        dma_addr_t cqe_pbl_addr,
@@ -688,24 +725,23 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
  *
  * @return int
  */
-int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
-                       u16 tx_queue_id,
-                       u16 sb,
-                       u8 sb_index,
-                       dma_addr_t pbl_addr,
-                       u16 pbl_size, void __iomem **pp_doorbell);
+int
+qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
+                   struct qed_queue_cid *p_cid,
+                   dma_addr_t pbl_addr,
+                   u16 pbl_size, void __iomem **pp_doorbell);
 
 /**
  * @brief VF - stop the RX queue by sending a message to the PF
  *
  * @param p_hwfn
- * @param rx_qid
+ * @param p_cid
  * @param cqe_completion
  *
  * @return int
  */
 int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
-                      u16 rx_qid, bool cqe_completion);
+                      struct qed_queue_cid *p_cid, bool cqe_completion);
 
 /**
  * @brief VF - stop the TX queue by sending a message to the PF
@@ -715,7 +751,7 @@ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
  *
  * @return int
  */
-int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid);
+int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid);
 
 /**
  * @brief VF - send a vport update command
@@ -866,6 +902,11 @@ static inline void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
 {
 }
 
+static inline void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn,
+                                             u8 *num_mac_filters)
+{
+}
+
 static inline bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
 {
        return false;
@@ -883,9 +924,7 @@ static inline int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
 }
 
 static inline int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
-                                     u8 rx_queue_id,
-                                     u16 sb,
-                                     u8 sb_index,
+                                     struct qed_queue_cid *p_cid,
                                      u16 bd_max_bytes,
                                      dma_addr_t bd_chain_phys_adr,
                                      dma_addr_t cqe_pbl_addr,
@@ -895,9 +934,7 @@ static inline int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
 }
 
 static inline int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
-                                     u16 tx_queue_id,
-                                     u16 sb,
-                                     u8 sb_index,
+                                     struct qed_queue_cid *p_cid,
                                      dma_addr_t pbl_addr,
                                      u16 pbl_size, void __iomem **pp_doorbell)
 {
@@ -905,12 +942,14 @@ static inline int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
 }
 
 static inline int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
-                                    u16 rx_qid, bool cqe_completion)
+                                    struct qed_queue_cid *p_cid,
+                                    bool cqe_completion)
 {
        return -EINVAL;
 }
 
-static inline int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid)
+static inline int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn,
+                                    struct qed_queue_cid *p_cid)
 {
        return -EINVAL;
 }
index 74a49850d74d9bc73392a46285647df11681f486..bc5f7c3b277de27a7f682170f26946070d45ca9d 100644 (file)
@@ -1,4 +1,5 @@
 obj-$(CONFIG_QEDE) := qede.o
 
-qede-y := qede_main.o qede_ethtool.o
+qede-y := qede_main.o qede_fp.o qede_filter.o qede_ethtool.o qede_ptp.o
 qede-$(CONFIG_DCB) += qede_dcbnl.o
+qede-$(CONFIG_QED_RDMA) += qede_roce.o
index 02b06d4e40ae17db233ef807ebf37df1b29f1e00..940778634e5b0faa54d8825badedc794122ed2e2 100644 (file)
@@ -1,11 +1,34 @@
 /* QLogic qede NIC Driver
-* Copyright (c) 2015 QLogic Corporation
-*
-* This software is available under the terms of the GNU General Public License
-* (GPL) Version 2, available from the file COPYING in the main directory of
-* this source tree.
-*/
-
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
 #ifndef _QEDE_H_
 #define _QEDE_H_
 #include <linux/compiler.h>
 #include <linux/bitmap.h>
 #include <linux/kernel.h>
 #include <linux/mutex.h>
+#include <linux/bpf.h>
 #include <linux/io.h>
+#ifdef CONFIG_RFS_ACCEL
+#include <linux/cpu_rmap.h>
+#endif
 #include <linux/qed/common_hsi.h>
 #include <linux/qed/eth_common.h>
 #include <linux/qed/qed_if.h>
@@ -25,8 +52,8 @@
 
 #define QEDE_MAJOR_VERSION             8
 #define QEDE_MINOR_VERSION             10
-#define QEDE_REVISION_VERSION          1
-#define QEDE_ENGINEERING_VERSION       20
+#define QEDE_REVISION_VERSION          10
+#define QEDE_ENGINEERING_VERSION       21
 #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
                __stringify(QEDE_MINOR_VERSION) "."             \
                __stringify(QEDE_REVISION_VERSION) "."          \
 
 #define DRV_MODULE_SYM         qede
 
-struct qede_stats {
+struct qede_stats_common {
        u64 no_buff_discards;
+       u64 packet_too_big_discard;
+       u64 ttl0_discard;
        u64 rx_ucast_bytes;
        u64 rx_mcast_bytes;
        u64 rx_bcast_bytes;
@@ -64,11 +93,6 @@ struct qede_stats {
        u64 rx_256_to_511_byte_packets;
        u64 rx_512_to_1023_byte_packets;
        u64 rx_1024_to_1518_byte_packets;
-       u64 rx_1519_to_1522_byte_packets;
-       u64 rx_1519_to_2047_byte_packets;
-       u64 rx_2048_to_4095_byte_packets;
-       u64 rx_4096_to_9216_byte_packets;
-       u64 rx_9217_to_16383_byte_packets;
        u64 rx_crc_errors;
        u64 rx_mac_crtl_frames;
        u64 rx_pause_frames;
@@ -85,17 +109,39 @@ struct qede_stats {
        u64 tx_256_to_511_byte_packets;
        u64 tx_512_to_1023_byte_packets;
        u64 tx_1024_to_1518_byte_packets;
+       u64 tx_pause_frames;
+       u64 tx_pfc_frames;
+       u64 brb_truncates;
+       u64 brb_discards;
+       u64 tx_mac_ctrl_frames;
+};
+
+struct qede_stats_bb {
+       u64 rx_1519_to_1522_byte_packets;
+       u64 rx_1519_to_2047_byte_packets;
+       u64 rx_2048_to_4095_byte_packets;
+       u64 rx_4096_to_9216_byte_packets;
+       u64 rx_9217_to_16383_byte_packets;
        u64 tx_1519_to_2047_byte_packets;
        u64 tx_2048_to_4095_byte_packets;
        u64 tx_4096_to_9216_byte_packets;
        u64 tx_9217_to_16383_byte_packets;
-       u64 tx_pause_frames;
-       u64 tx_pfc_frames;
        u64 tx_lpi_entry_count;
        u64 tx_total_collisions;
-       u64 brb_truncates;
-       u64 brb_discards;
-       u64 tx_mac_ctrl_frames;
+};
+
+struct qede_stats_ah {
+       u64 rx_1519_to_max_byte_packets;
+       u64 tx_1519_to_max_byte_packets;
+};
+
+struct qede_stats {
+       struct qede_stats_common common;
+
+       union {
+               struct qede_stats_bb bb;
+               struct qede_stats_ah ah;
+       };
 };
 
 struct qede_vlan {
@@ -104,6 +150,15 @@ struct qede_vlan {
        bool configured;
 };
 
+struct qede_rdma_dev {
+       struct qedr_dev *qedr_dev;
+       struct list_head entry;
+       struct list_head roce_event_list;
+       struct workqueue_struct *roce_wq;
+};
+
+struct qede_ptp;
+
 struct qede_dev {
        struct qed_dev                  *cdev;
        struct net_device               *ndev;
@@ -115,26 +170,30 @@ struct qede_dev {
        u32 flags;
 #define QEDE_FLAG_IS_VF        BIT(0)
 #define IS_VF(edev)    (!!((edev)->flags & QEDE_FLAG_IS_VF))
+#define QEDE_TX_TIMESTAMPING_EN                BIT(1)
 
        const struct qed_eth_ops        *ops;
+       struct qede_ptp                 *ptp;
 
-       struct qed_dev_eth_info dev_info;
+       struct qed_dev_eth_info dev_info;
 #define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues)
-#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues * \
-                                (edev)->dev_info.num_tc)
+#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues)
+#define QEDE_IS_BB(edev) \
+       ((edev)->dev_info.common.dev_type == QED_DEV_TYPE_BB)
+#define QEDE_IS_AH(edev) \
+       ((edev)->dev_info.common.dev_type == QED_DEV_TYPE_AH)
 
        struct qede_fastpath            *fp_array;
-       u16                             req_rss;
-       u16                             num_rss;
-       u8                              num_tc;
-#define QEDE_RSS_CNT(edev)             ((edev)->num_rss)
-#define QEDE_TSS_CNT(edev)             ((edev)->num_rss *      \
-                                        (edev)->num_tc)
-#define QEDE_TSS_IDX(edev, txqidx)     ((txqidx) % (edev)->num_rss)
-#define QEDE_TC_IDX(edev, txqidx)      ((txqidx) / (edev)->num_rss)
-#define QEDE_TX_QUEUE(edev, txqidx)    \
-       (&(edev)->fp_array[QEDE_TSS_IDX((edev), (txqidx))].txqs[QEDE_TC_IDX( \
-                                                       (edev), (txqidx))])
+       u8                              req_num_tx;
+       u8                              fp_num_tx;
+       u8                              req_num_rx;
+       u8                              fp_num_rx;
+       u16                             req_queues;
+       u16                             num_queues;
+#define QEDE_QUEUE_CNT(edev)   ((edev)->num_queues)
+#define QEDE_RSS_COUNT(edev)   ((edev)->num_queues - (edev)->fp_num_tx)
+#define QEDE_RX_QUEUE_IDX(edev, i)     (i)
+#define QEDE_TSS_COUNT(edev)   ((edev)->num_queues - (edev)->fp_num_rx)
 
        struct qed_int_info             int_info;
        unsigned char                   primary_mac[ETH_ALEN];
@@ -164,7 +223,10 @@ struct qede_dev {
 #define QEDE_RSS_KEY_INITED    BIT(1)
 #define QEDE_RSS_CAPS_INITED   BIT(2)
        u32 rss_params_inited; /* bit-field to track initialized rss params */
-       struct qed_update_vport_rss_params      rss_params;
+       u16 rss_ind_table[128];
+       u32 rss_key[10];
+       u8 rss_caps;
+
        u16                     q_num_rx_buffers; /* Must be a power of two */
        u16                     q_num_tx_buffers; /* Must be a power of two */
 
@@ -177,6 +239,15 @@ struct qede_dev {
        unsigned long                   sp_flags;
        u16                             vxlan_dst_port;
        u16                             geneve_dst_port;
+
+#ifdef CONFIG_RFS_ACCEL
+       struct qede_arfs                *arfs;
+#endif
+       bool                            wol_enabled;
+
+       struct qede_rdma_dev            rdma_info;
+
+       struct bpf_prog *xdp_prog;
 };
 
 enum QEDE_STATE {
@@ -206,38 +277,73 @@ enum qede_agg_state {
 };
 
 struct qede_agg_info {
-       struct sw_rx_data replace_buf;
-       dma_addr_t replace_buf_mapping;
-       struct sw_rx_data start_buf;
-       dma_addr_t start_buf_mapping;
-       struct eth_fast_path_rx_tpa_start_cqe start_cqe;
-       enum qede_agg_state agg_state;
+       /* rx_buf is a data buffer that can be placed / consumed from rx bd
+        * chain. It has two purposes: We will preallocate the data buffer
+        * for each aggregation when we open the interface and will place this
+        * buffer on the rx-bd-ring when we receive TPA_START. We don't want
+        * to be in a state where allocation fails, as we can't reuse the
+        * consumer buffer in the rx-chain since FW may still be writing to it
+        * (since header needs to be modified for TPA).
+        * The second purpose is to keep a pointer to the bd buffer during
+        * aggregation.
+        */
+       struct sw_rx_data buffer;
+       dma_addr_t buffer_mapping;
+
        struct sk_buff *skb;
-       int frag_id;
+
+       /* We need some structs from the start cookie until termination */
        u16 vlan_tag;
+       u16 start_cqe_bd_len;
+       u8 start_cqe_placement_offset;
+
+       u8 state;
+       u8 frag_id;
+
+       u8 tunnel_type;
 };
 
 struct qede_rx_queue {
-       __le16                  *hw_cons_ptr;
-       struct sw_rx_data       *sw_rx_ring;
-       u16                     sw_rx_cons;
-       u16                     sw_rx_prod;
-       struct qed_chain        rx_bd_ring;
-       struct qed_chain        rx_comp_ring;
-       void __iomem            *hw_rxq_prod_addr;
+       __le16 *hw_cons_ptr;
+       void __iomem *hw_rxq_prod_addr;
+
+       /* Required for the allocation of replacement buffers */
+       struct device *dev;
+
+       struct bpf_prog *xdp_prog;
+
+       u16 sw_rx_cons;
+       u16 sw_rx_prod;
+
+       u16 filled_buffers;
+       u8 data_direction;
+       u8 rxq_id;
+
+       /* Used once per each NAPI run */
+       u16 num_rx_buffers;
+
+       u16 rx_headroom;
+
+       u32 rx_buf_size;
+       u32 rx_buf_seg_size;
+
+       struct sw_rx_data *sw_rx_ring;
+       struct qed_chain rx_bd_ring;
+       struct qed_chain rx_comp_ring ____cacheline_aligned;
 
        /* GRO */
-       struct qede_agg_info    tpa_info[ETH_TPA_MAX_AGGS_NUM];
+       struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
+
+       /* Used once per each NAPI run */
+       u64 rcv_pkts;
 
-       int                     rx_buf_size;
-       unsigned int            rx_buf_seg_size;
+       u64 rx_hw_errors;
+       u64 rx_alloc_errors;
+       u64 rx_ip_frags;
 
-       u16                     num_rx_buffers;
-       u16                     rxq_id;
+       u64 xdp_no_pass;
 
-       u64                     rx_hw_errors;
-       u64                     rx_alloc_errors;
-       u64                     rx_ip_frags;
+       void *handle;
 };
 
 union db_prod {
@@ -252,17 +358,45 @@ struct sw_tx_bd {
 #define QEDE_TSO_SPLIT_BD              BIT(0)
 };
 
+struct sw_tx_xdp {
+       struct page *page;
+       dma_addr_t mapping;
+};
+
 struct qede_tx_queue {
-       int                     index; /* Queue index */
-       __le16                  *hw_cons_ptr;
-       struct sw_tx_bd         *sw_tx_ring;
-       u16                     sw_tx_cons;
-       u16                     sw_tx_prod;
-       struct qed_chain        tx_pbl;
-       void __iomem            *doorbell_addr;
-       union db_prod           tx_db;
-
-       u16                     num_tx_buffers;
+       u8 is_xdp;
+       bool is_legacy;
+       u16 sw_tx_cons;
+       u16 sw_tx_prod;
+       u16 num_tx_buffers; /* Slowpath only */
+
+       u64 xmit_pkts;
+       u64 stopped_cnt;
+
+       __le16 *hw_cons_ptr;
+
+       /* Needed for the mapping of packets */
+       struct device *dev;
+
+       void __iomem *doorbell_addr;
+       union db_prod tx_db;
+       int index; /* Slowpath only */
+#define QEDE_TXQ_XDP_TO_IDX(edev, txq) ((txq)->index - \
+                                        QEDE_MAX_TSS_CNT(edev))
+#define QEDE_TXQ_IDX_TO_XDP(edev, idx) ((idx) + QEDE_MAX_TSS_CNT(edev))
+
+       /* Regular Tx requires skb + metadata for release purpose,
+        * while XDP requires the pages and the mapped address.
+        */
+       union {
+               struct sw_tx_bd *skbs;
+               struct sw_tx_xdp *xdp;
+       } sw_tx_ring;
+
+       struct qed_chain tx_pbl;
+
+       /* Slowpath; Should be kept in end [unless missing padding] */
+       void *handle;
 };
 
 #define BD_UNMAP_ADDR(bd)              HILO_U64(le32_to_cpu((bd)->addr.hi), \
@@ -277,11 +411,18 @@ struct qede_tx_queue {
 
 struct qede_fastpath {
        struct qede_dev *edev;
-       u8                      rss_id;
+#define QEDE_FASTPATH_TX       BIT(0)
+#define QEDE_FASTPATH_RX       BIT(1)
+#define QEDE_FASTPATH_XDP      BIT(2)
+#define QEDE_FASTPATH_COMBINED (QEDE_FASTPATH_TX | QEDE_FASTPATH_RX)
+       u8                      type;
+       u8                      id;
+       u8                      xdp_xmit;
        struct napi_struct      napi;
        struct qed_sb_info      *sb_info;
        struct qede_rx_queue    *rxq;
-       struct qede_tx_queue    *txqs;
+       struct qede_tx_queue    *txq;
+       struct qede_tx_queue    *xdp_tx;
 
 #define VEC_NAME_SIZE  (sizeof(((struct net_device *)0)->name) + 8)
        char    name[VEC_NAME_SIZE];
@@ -294,6 +435,7 @@ struct qede_fastpath {
 #define XMIT_L4_CSUM           BIT(0)
 #define XMIT_LSO               BIT(1)
 #define XMIT_ENC               BIT(2)
+#define XMIT_ENC_GSO_L4_CSUM   BIT(3)
 
 #define QEDE_CSUM_ERROR                        BIT(0)
 #define QEDE_CSUM_UNNECESSARY          BIT(1)
@@ -303,31 +445,83 @@ struct qede_fastpath {
 #define QEDE_SP_VXLAN_PORT_CONFIG      2
 #define QEDE_SP_GENEVE_PORT_CONFIG     3
 
-union qede_reload_args {
-       u16 mtu;
+#ifdef CONFIG_RFS_ACCEL
+int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+                      u16 rxq_index, u32 flow_id);
+void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr);
+void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev);
+void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc);
+void qede_free_arfs(struct qede_dev *edev);
+int qede_alloc_arfs(struct qede_dev *edev);
+
+#define QEDE_SP_ARFS_CONFIG    4
+#define QEDE_SP_TASK_POLL_DELAY        (5 * HZ)
+#define QEDE_RFS_MAX_FLTR      256
+#endif
+
+struct qede_reload_args {
+       void (*func)(struct qede_dev *edev, struct qede_reload_args *args);
+       union {
+               netdev_features_t features;
+               struct bpf_prog *new_prog;
+               u16 mtu;
+       } u;
 };
 
+/* Datapath functions definition */
+netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+netdev_features_t qede_features_check(struct sk_buff *skb,
+                                     struct net_device *dev,
+                                     netdev_features_t features);
+void qede_tx_log_print(struct qede_dev *edev, struct qede_fastpath *fp);
+int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy);
+int qede_free_tx_pkt(struct qede_dev *edev,
+                    struct qede_tx_queue *txq, int *len);
+int qede_poll(struct napi_struct *napi, int budget);
+irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie);
+
+/* Filtering function definitions */
+void qede_force_mac(void *dev, u8 *mac, bool forced);
+int qede_set_mac_addr(struct net_device *ndev, void *p);
+
+int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid);
+int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid);
+void qede_vlan_mark_nonconfigured(struct qede_dev *edev);
+int qede_configure_vlan_filters(struct qede_dev *edev);
+
+int qede_set_features(struct net_device *dev, netdev_features_t features);
+void qede_set_rx_mode(struct net_device *ndev);
+void qede_config_rx_mode(struct net_device *ndev);
+void qede_fill_rss_params(struct qede_dev *edev,
+                         struct qed_update_vport_rss_params *rss, u8 *update);
+
+void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti);
+void qede_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti);
+
+int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp);
+
 #ifdef CONFIG_DCB
 void qede_set_dcbnl_ops(struct net_device *ndev);
 #endif
+
 void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level);
 void qede_set_ethtool_ops(struct net_device *netdev);
 void qede_reload(struct qede_dev *edev,
-                void (*func)(struct qede_dev *edev,
-                             union qede_reload_args *args),
-                union qede_reload_args *args);
+                struct qede_reload_args *args, bool is_locked);
 int qede_change_mtu(struct net_device *dev, int new_mtu);
 void qede_fill_by_demand_stats(struct qede_dev *edev);
+void __qede_lock(struct qede_dev *edev);
+void __qede_unlock(struct qede_dev *edev);
 bool qede_has_rx_work(struct qede_rx_queue *rxq);
 int qede_txq_has_work(struct qede_tx_queue *txq);
-void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev,
-                            u8 count);
+void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count);
+void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
 
 #define RX_RING_SIZE_POW       13
 #define RX_RING_SIZE           ((u16)BIT(RX_RING_SIZE_POW))
 #define NUM_RX_BDS_MAX         (RX_RING_SIZE - 1)
 #define NUM_RX_BDS_MIN         128
-#define NUM_RX_BDS_DEF         NUM_RX_BDS_MAX
+#define NUM_RX_BDS_DEF         ((u16)BIT(10) - 1)
 
 #define TX_RING_SIZE_POW       13
 #define TX_RING_SIZE           ((u16)BIT(TX_RING_SIZE_POW))
@@ -337,6 +531,6 @@ void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev,
 
 #define QEDE_MIN_PKT_LEN       64
 #define QEDE_RX_HDR_SIZE       256
-#define        for_each_rss(i) for (i = 0; i < edev->num_rss; i++)
+#define        for_each_queue(i) for (i = 0; i < edev->num_queues; i++)
 
 #endif /* _QEDE_H_ */
index f8492cac9290902c15d1c1481aa66f968b6b6af5..3abbf916ebf67c51d0780128ccc6cb97693672aa 100644 (file)
@@ -1,11 +1,34 @@
 /* QLogic qede NIC Driver
-* Copyright (c) 2015 QLogic Corporation
-*
-* This software is available under the terms of the GNU General Public License
-* (GPL) Version 2, available from the file COPYING in the main directory of
-* this source tree.
-*/
-
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
 #include <linux/version.h>
 #include <linux/types.h>
 #include <linux/netdevice.h>
 #include <linux/string.h>
 #include <linux/pci.h>
 #include <linux/capability.h>
+#include <linux/vmalloc.h>
 #include "qede.h"
-
-#define QEDE_STAT_OFFSET(stat_name) (offsetof(struct qede_stats, stat_name))
-#define QEDE_STAT_STRING(stat_name) (#stat_name)
-#define _QEDE_STAT(stat_name, pf_only) \
-        {QEDE_STAT_OFFSET(stat_name), QEDE_STAT_STRING(stat_name), pf_only}
-#define QEDE_PF_STAT(stat_name)                _QEDE_STAT(stat_name, true)
-#define QEDE_STAT(stat_name)           _QEDE_STAT(stat_name, false)
+#include "qede_ptp.h"
 
 #define QEDE_RQSTAT_OFFSET(stat_name) \
         (offsetof(struct qede_rx_queue, stat_name))
@@ -35,19 +53,55 @@ static const struct {
        u64 offset;
        char string[ETH_GSTRING_LEN];
 } qede_rqstats_arr[] = {
+       QEDE_RQSTAT(rcv_pkts),
        QEDE_RQSTAT(rx_hw_errors),
        QEDE_RQSTAT(rx_alloc_errors),
        QEDE_RQSTAT(rx_ip_frags),
+       QEDE_RQSTAT(xdp_no_pass),
 };
 
 #define QEDE_NUM_RQSTATS ARRAY_SIZE(qede_rqstats_arr)
-#define QEDE_RQSTATS_DATA(dev, sindex, rqindex) \
-       (*((u64 *)(((char *)(dev->fp_array[(rqindex)].rxq)) +\
-                   qede_rqstats_arr[(sindex)].offset)))
+#define QEDE_TQSTAT_OFFSET(stat_name) \
+       (offsetof(struct qede_tx_queue, stat_name))
+#define QEDE_TQSTAT_STRING(stat_name) (#stat_name)
+#define QEDE_TQSTAT(stat_name) \
+       {QEDE_TQSTAT_OFFSET(stat_name), QEDE_TQSTAT_STRING(stat_name)}
+#define QEDE_NUM_TQSTATS ARRAY_SIZE(qede_tqstats_arr)
+static const struct {
+       u64 offset;
+       char string[ETH_GSTRING_LEN];
+} qede_tqstats_arr[] = {
+       QEDE_TQSTAT(xmit_pkts),
+       QEDE_TQSTAT(stopped_cnt),
+};
+
+#define QEDE_STAT_OFFSET(stat_name, type, base) \
+       (offsetof(type, stat_name) + (base))
+#define QEDE_STAT_STRING(stat_name)    (#stat_name)
+#define _QEDE_STAT(stat_name, type, base, attr) \
+       {QEDE_STAT_OFFSET(stat_name, type, base), \
+        QEDE_STAT_STRING(stat_name), \
+        attr}
+#define QEDE_STAT(stat_name) \
+       _QEDE_STAT(stat_name, struct qede_stats_common, 0, 0x0)
+#define QEDE_PF_STAT(stat_name) \
+       _QEDE_STAT(stat_name, struct qede_stats_common, 0, \
+                  BIT(QEDE_STAT_PF_ONLY))
+#define QEDE_PF_BB_STAT(stat_name) \
+       _QEDE_STAT(stat_name, struct qede_stats_bb, \
+                  offsetof(struct qede_stats, bb), \
+                  BIT(QEDE_STAT_PF_ONLY) | BIT(QEDE_STAT_BB_ONLY))
+#define QEDE_PF_AH_STAT(stat_name) \
+       _QEDE_STAT(stat_name, struct qede_stats_ah, \
+                  offsetof(struct qede_stats, ah), \
+                  BIT(QEDE_STAT_PF_ONLY) | BIT(QEDE_STAT_AH_ONLY))
 static const struct {
        u64 offset;
        char string[ETH_GSTRING_LEN];
-       bool pf_only;
+       unsigned long attr;
+#define QEDE_STAT_PF_ONLY      0
+#define QEDE_STAT_BB_ONLY      1
+#define QEDE_STAT_AH_ONLY      2
 } qede_stats_arr[] = {
        QEDE_STAT(rx_ucast_bytes),
        QEDE_STAT(rx_mcast_bytes),
@@ -69,22 +123,23 @@ static const struct {
        QEDE_PF_STAT(rx_256_to_511_byte_packets),
        QEDE_PF_STAT(rx_512_to_1023_byte_packets),
        QEDE_PF_STAT(rx_1024_to_1518_byte_packets),
-       QEDE_PF_STAT(rx_1519_to_1522_byte_packets),
-       QEDE_PF_STAT(rx_1519_to_2047_byte_packets),
-       QEDE_PF_STAT(rx_2048_to_4095_byte_packets),
-       QEDE_PF_STAT(rx_4096_to_9216_byte_packets),
-       QEDE_PF_STAT(rx_9217_to_16383_byte_packets),
+       QEDE_PF_BB_STAT(rx_1519_to_1522_byte_packets),
+       QEDE_PF_BB_STAT(rx_1519_to_2047_byte_packets),
+       QEDE_PF_BB_STAT(rx_2048_to_4095_byte_packets),
+       QEDE_PF_BB_STAT(rx_4096_to_9216_byte_packets),
+       QEDE_PF_BB_STAT(rx_9217_to_16383_byte_packets),
+       QEDE_PF_AH_STAT(rx_1519_to_max_byte_packets),
        QEDE_PF_STAT(tx_64_byte_packets),
        QEDE_PF_STAT(tx_65_to_127_byte_packets),
        QEDE_PF_STAT(tx_128_to_255_byte_packets),
        QEDE_PF_STAT(tx_256_to_511_byte_packets),
        QEDE_PF_STAT(tx_512_to_1023_byte_packets),
        QEDE_PF_STAT(tx_1024_to_1518_byte_packets),
-       QEDE_PF_STAT(tx_1519_to_2047_byte_packets),
-       QEDE_PF_STAT(tx_2048_to_4095_byte_packets),
-       QEDE_PF_STAT(tx_4096_to_9216_byte_packets),
-       QEDE_PF_STAT(tx_9217_to_16383_byte_packets),
-
+       QEDE_PF_BB_STAT(tx_1519_to_2047_byte_packets),
+       QEDE_PF_BB_STAT(tx_2048_to_4095_byte_packets),
+       QEDE_PF_BB_STAT(tx_4096_to_9216_byte_packets),
+       QEDE_PF_BB_STAT(tx_9217_to_16383_byte_packets),
+       QEDE_PF_AH_STAT(tx_1519_to_max_byte_packets),
        QEDE_PF_STAT(rx_mac_crtl_frames),
        QEDE_PF_STAT(tx_mac_ctrl_frames),
        QEDE_PF_STAT(rx_pause_frames),
@@ -99,14 +154,16 @@ static const struct {
        QEDE_PF_STAT(rx_jabbers),
        QEDE_PF_STAT(rx_undersize_packets),
        QEDE_PF_STAT(rx_fragments),
-       QEDE_PF_STAT(tx_lpi_entry_count),
-       QEDE_PF_STAT(tx_total_collisions),
+       QEDE_PF_BB_STAT(tx_lpi_entry_count),
+       QEDE_PF_BB_STAT(tx_total_collisions),
        QEDE_PF_STAT(brb_truncates),
        QEDE_PF_STAT(brb_discards),
        QEDE_STAT(no_buff_discards),
        QEDE_PF_STAT(mftag_filter_discards),
        QEDE_PF_STAT(mac_filter_discards),
        QEDE_STAT(tx_err_drop_pkts),
+       QEDE_STAT(ttl0_discard),
+       QEDE_STAT(packet_too_big_discard),
 
        QEDE_STAT(coalesced_pkts),
        QEDE_STAT(coalesced_events),
@@ -115,11 +172,13 @@ static const struct {
        QEDE_STAT(coalesced_bytes),
 };
 
-#define QEDE_STATS_DATA(dev, index) \
-       (*((u64 *)(((char *)(dev)) + offsetof(struct qede_dev, stats) \
-                       + qede_stats_arr[(index)].offset)))
-
 #define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr)
+#define QEDE_STAT_IS_PF_ONLY(i) \
+       test_bit(QEDE_STAT_PF_ONLY, &qede_stats_arr[i].attr)
+#define QEDE_STAT_IS_BB_ONLY(i) \
+       test_bit(QEDE_STAT_BB_ONLY, &qede_stats_arr[i].attr)
+#define QEDE_STAT_IS_AH_ONLY(i) \
+       test_bit(QEDE_STAT_AH_ONLY, &qede_stats_arr[i].attr)
 
 enum {
        QEDE_PRI_FLAG_CMT,
@@ -136,6 +195,7 @@ enum qede_ethtool_tests {
        QEDE_ETHTOOL_MEMORY_TEST,
        QEDE_ETHTOOL_REGISTER_TEST,
        QEDE_ETHTOOL_CLOCK_TEST,
+       QEDE_ETHTOOL_NVRAM_TEST,
        QEDE_ETHTOOL_TEST_MAX
 };
 
@@ -145,23 +205,71 @@ static const char qede_tests_str_arr[QEDE_ETHTOOL_TEST_MAX][ETH_GSTRING_LEN] = {
        "Memory (online)\t\t",
        "Register (online)\t",
        "Clock (online)\t\t",
+       "Nvram (online)\t\t",
 };
 
+static void qede_get_strings_stats_txq(struct qede_dev *edev,
+                                      struct qede_tx_queue *txq, u8 **buf)
+{
+       int i;
+
+       for (i = 0; i < QEDE_NUM_TQSTATS; i++) {
+               if (txq->is_xdp)
+                       sprintf(*buf, "%d [XDP]: %s",
+                               QEDE_TXQ_XDP_TO_IDX(edev, txq),
+                               qede_tqstats_arr[i].string);
+               else
+                       sprintf(*buf, "%d: %s", txq->index,
+                               qede_tqstats_arr[i].string);
+               *buf += ETH_GSTRING_LEN;
+       }
+}
+
+static void qede_get_strings_stats_rxq(struct qede_dev *edev,
+                                      struct qede_rx_queue *rxq, u8 **buf)
+{
+       int i;
+
+       for (i = 0; i < QEDE_NUM_RQSTATS; i++) {
+               sprintf(*buf, "%d: %s", rxq->rxq_id,
+                       qede_rqstats_arr[i].string);
+               *buf += ETH_GSTRING_LEN;
+       }
+}
+
+static bool qede_is_irrelevant_stat(struct qede_dev *edev, int stat_index)
+{
+       return (IS_VF(edev) && QEDE_STAT_IS_PF_ONLY(stat_index)) ||
+              (QEDE_IS_BB(edev) && QEDE_STAT_IS_AH_ONLY(stat_index)) ||
+              (QEDE_IS_AH(edev) && QEDE_STAT_IS_BB_ONLY(stat_index));
+}
+
 static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
 {
-       int i, j, k;
+       struct qede_fastpath *fp;
+       int i;
 
-       for (i = 0, j = 0; i < QEDE_NUM_STATS; i++) {
-               if (IS_VF(edev) && qede_stats_arr[i].pf_only)
-                       continue;
-               strcpy(buf + j * ETH_GSTRING_LEN,
-                      qede_stats_arr[i].string);
-               j++;
+       /* Account for queue statistics */
+       for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
+               fp = &edev->fp_array[i];
+
+               if (fp->type & QEDE_FASTPATH_RX)
+                       qede_get_strings_stats_rxq(edev, fp->rxq, &buf);
+
+               if (fp->type & QEDE_FASTPATH_XDP)
+                       qede_get_strings_stats_txq(edev, fp->xdp_tx, &buf);
+
+               if (fp->type & QEDE_FASTPATH_TX)
+                       qede_get_strings_stats_txq(edev, fp->txq, &buf);
        }
 
-       for (k = 0; k < QEDE_NUM_RQSTATS; k++, j++)
-               strcpy(buf + j * ETH_GSTRING_LEN,
-                      qede_rqstats_arr[k].string);
+       /* Account for non-queue statistics */
+       for (i = 0; i < QEDE_NUM_STATS; i++) {
+               if (qede_is_irrelevant_stat(edev, i))
+                       continue;
+               strcpy(buf, qede_stats_arr[i].string);
+               buf += ETH_GSTRING_LEN;
+       }
 }
 
 static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
@@ -186,48 +294,85 @@ static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
        }
 }
 
+static void qede_get_ethtool_stats_txq(struct qede_tx_queue *txq, u64 **buf)
+{
+       int i;
+
+       for (i = 0; i < QEDE_NUM_TQSTATS; i++) {
+               **buf = *((u64 *)(((void *)txq) + qede_tqstats_arr[i].offset));
+               (*buf)++;
+       }
+}
+
+static void qede_get_ethtool_stats_rxq(struct qede_rx_queue *rxq, u64 **buf)
+{
+       int i;
+
+       for (i = 0; i < QEDE_NUM_RQSTATS; i++) {
+               **buf = *((u64 *)(((void *)rxq) + qede_rqstats_arr[i].offset));
+               (*buf)++;
+       }
+}
+
 static void qede_get_ethtool_stats(struct net_device *dev,
                                   struct ethtool_stats *stats, u64 *buf)
 {
        struct qede_dev *edev = netdev_priv(dev);
-       int sidx, cnt = 0;
-       int qid;
+       struct qede_fastpath *fp;
+       int i;
 
        qede_fill_by_demand_stats(edev);
 
-       mutex_lock(&edev->qede_lock);
+       /* Need to protect the access to the fastpath array */
+       __qede_lock(edev);
 
-       for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++) {
-               if (IS_VF(edev) && qede_stats_arr[sidx].pf_only)
-                       continue;
-               buf[cnt++] = QEDE_STATS_DATA(edev, sidx);
+       for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
+               fp = &edev->fp_array[i];
+
+               if (fp->type & QEDE_FASTPATH_RX)
+                       qede_get_ethtool_stats_rxq(fp->rxq, &buf);
+
+               if (fp->type & QEDE_FASTPATH_XDP)
+                       qede_get_ethtool_stats_txq(fp->xdp_tx, &buf);
+
+               if (fp->type & QEDE_FASTPATH_TX)
+                       qede_get_ethtool_stats_txq(fp->txq, &buf);
        }
 
-       for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++) {
-               buf[cnt] = 0;
-               for (qid = 0; qid < edev->num_rss; qid++)
-                       buf[cnt] += QEDE_RQSTATS_DATA(edev, sidx, qid);
-               cnt++;
+       for (i = 0; i < QEDE_NUM_STATS; i++) {
+               if (qede_is_irrelevant_stat(edev, i))
+                       continue;
+               *buf = *((u64 *)(((void *)&edev->stats) +
+                                qede_stats_arr[i].offset));
+
+               buf++;
        }
 
-       mutex_unlock(&edev->qede_lock);
+       __qede_unlock(edev);
 }
 
 static int qede_get_sset_count(struct net_device *dev, int stringset)
 {
        struct qede_dev *edev = netdev_priv(dev);
-       int num_stats = QEDE_NUM_STATS;
+       int num_stats = QEDE_NUM_STATS, i;
 
        switch (stringset) {
        case ETH_SS_STATS:
-               if (IS_VF(edev)) {
-                       int i;
+               for (i = 0; i < QEDE_NUM_STATS; i++)
+                       if (qede_is_irrelevant_stat(edev, i))
+                               num_stats--;
+
+               /* Account for the Regular Tx statistics */
+               num_stats += QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS;
+
+               /* Account for the Regular Rx statistics */
+               num_stats += QEDE_RSS_COUNT(edev) * QEDE_NUM_RQSTATS;
+
+               /* Account for XDP statistics [if needed] */
+               if (edev->xdp_prog)
+                       num_stats += QEDE_RSS_COUNT(edev) * QEDE_NUM_TQSTATS;
+               return num_stats;
 
-                       for (i = 0; i < QEDE_NUM_STATS; i++)
-                               if (qede_stats_arr[i].pf_only)
-                                       num_stats--;
-               }
-               return num_stats + QEDE_NUM_RQSTATS;
        case ETH_SS_PRIV_FLAGS:
                return QEDE_PRI_FLAG_LEN;
        case ETH_SS_TEST:
@@ -254,6 +399,8 @@ static int qede_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
        struct qede_dev *edev = netdev_priv(dev);
        struct qed_link_output current_link;
 
+       __qede_lock(edev);
+
        memset(&current_link, 0, sizeof(current_link));
        edev->ops->common->get_link(edev->cdev, &current_link);
 
@@ -266,6 +413,9 @@ static int qede_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                cmd->duplex = DUPLEX_UNKNOWN;
                ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
        }
+
+       __qede_unlock(edev);
+
        cmd->port = current_link.port;
        cmd->autoneg = (current_link.autoneg) ? AUTONEG_ENABLE :
                                                AUTONEG_DISABLE;
@@ -364,12 +514,50 @@ static void qede_get_drvinfo(struct net_device *ndev,
        strlcpy(info->bus_info, pci_name(edev->pdev), sizeof(info->bus_info));
 }
 
+static void qede_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+       struct qede_dev *edev = netdev_priv(ndev);
+
+       if (edev->dev_info.common.wol_support) {
+               wol->supported = WAKE_MAGIC;
+               wol->wolopts = edev->wol_enabled ? WAKE_MAGIC : 0;
+       }
+}
+
+static int qede_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+       struct qede_dev *edev = netdev_priv(ndev);
+       bool wol_requested;
+       int rc;
+
+       if (wol->wolopts & ~WAKE_MAGIC) {
+               DP_INFO(edev,
+                       "Can't support WoL options other than magic-packet\n");
+               return -EINVAL;
+       }
+
+       wol_requested = !!(wol->wolopts & WAKE_MAGIC);
+       if (wol_requested == edev->wol_enabled)
+               return 0;
+
+       /* Need to actually change configuration */
+       if (!edev->dev_info.common.wol_support) {
+               DP_INFO(edev, "Device doesn't support WoL\n");
+               return -EINVAL;
+       }
+
+       rc = edev->ops->common->update_wol(edev->cdev, wol_requested);
+       if (!rc)
+               edev->wol_enabled = wol_requested;
+
+       return rc;
+}
+
 static u32 qede_get_msglevel(struct net_device *ndev)
 {
        struct qede_dev *edev = netdev_priv(ndev);
 
-       return ((u32)edev->dp_level << QED_LOG_LEVEL_SHIFT) |
-              edev->dp_module;
+       return ((u32)edev->dp_level << QED_LOG_LEVEL_SHIFT) | edev->dp_module;
 }
 
 static void qede_set_msglevel(struct net_device *ndev, u32 level)
@@ -393,8 +581,7 @@ static int qede_nway_reset(struct net_device *dev)
        struct qed_link_params link_params;
 
        if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
-               DP_INFO(edev,
-                       "Link settings are not allowed to be changed\n");
+               DP_INFO(edev, "Link settings are not allowed to be changed\n");
                return -EOPNOTSUPP;
        }
 
@@ -467,7 +654,7 @@ static int qede_set_coalesce(struct net_device *dev,
 
        rxc = (u16)coal->rx_coalesce_usecs;
        txc = (u16)coal->tx_coalesce_usecs;
-       for_each_rss(i) {
+       for_each_queue(i) {
                sb_id = edev->fp_array[i].sb_info->igu_sb_id;
                rc = edev->ops->common->set_coalesce(edev->cdev, rxc, txc,
                                                     (u8)i, sb_id);
@@ -516,8 +703,7 @@ static int qede_set_ringparam(struct net_device *dev,
        edev->q_num_rx_buffers = ering->rx_pending;
        edev->q_num_tx_buffers = ering->tx_pending;
 
-       if (netif_running(edev->ndev))
-               qede_reload(edev, NULL, NULL);
+       qede_reload(edev, NULL, false);
 
        return 0;
 }
@@ -563,7 +749,7 @@ static int qede_set_pauseparam(struct net_device *dev,
        memset(&params, 0, sizeof(params));
        params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
        if (epause->autoneg) {
-               if (!(current_link.supported_caps & SUPPORTED_Autoneg)) {
+               if (!(current_link.supported_caps & QED_LM_Autoneg_BIT)) {
                        DP_INFO(edev, "autoneg not supported\n");
                        return -EINVAL;
                }
@@ -580,9 +766,32 @@ static int qede_set_pauseparam(struct net_device *dev,
        return 0;
 }
 
-static void qede_update_mtu(struct qede_dev *edev, union qede_reload_args *args)
+static void qede_get_regs(struct net_device *ndev,
+                         struct ethtool_regs *regs, void *buffer)
+{
+       struct qede_dev *edev = netdev_priv(ndev);
+
+       regs->version = 0;
+       memset(buffer, 0, regs->len);
+
+       if (edev->ops && edev->ops->common)
+               edev->ops->common->dbg_all_data(edev->cdev, buffer);
+}
+
+static int qede_get_regs_len(struct net_device *ndev)
 {
-       edev->ndev->mtu = args->mtu;
+       struct qede_dev *edev = netdev_priv(ndev);
+
+       if (edev->ops && edev->ops->common)
+               return edev->ops->common->dbg_all_data_size(edev->cdev);
+       else
+               return -EINVAL;
+}
+
+static void qede_update_mtu(struct qede_dev *edev,
+                           struct qede_reload_args *args)
+{
+       edev->ndev->mtu = args->u.mtu;
 }
 
 /* Netdevice NDOs */
@@ -591,7 +800,7 @@ static void qede_update_mtu(struct qede_dev *edev, union qede_reload_args *args)
 int qede_change_mtu(struct net_device *ndev, int new_mtu)
 {
        struct qede_dev *edev = netdev_priv(ndev);
-       union qede_reload_args args;
+       struct qede_reload_args args;
 
        if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
            ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
@@ -602,14 +811,13 @@ int qede_change_mtu(struct net_device *ndev, int new_mtu)
        DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
                   "Configuring MTU size of %d\n", new_mtu);
 
-       /* Set the mtu field and re-start the interface if needed*/
-       args.mtu = new_mtu;
-
-       if (netif_running(edev->ndev))
-               qede_reload(edev, &qede_update_mtu, &args);
-
-       qede_update_mtu(edev, &args);
+       /* Set the mtu field and re-start the interface if needed */
+       args.u.mtu = new_mtu;
+       args.func = &qede_update_mtu;
+       qede_reload(edev, &args, false);
 
+       edev->ops->common->update_mtu(edev->cdev, new_mtu);
        return 0;
 }
 
@@ -619,51 +827,91 @@ static void qede_get_channels(struct net_device *dev,
        struct qede_dev *edev = netdev_priv(dev);
 
        channels->max_combined = QEDE_MAX_RSS_CNT(edev);
-       channels->combined_count = QEDE_RSS_CNT(edev);
+       channels->max_rx = QEDE_MAX_RSS_CNT(edev);
+       channels->max_tx = QEDE_MAX_RSS_CNT(edev);
+       channels->combined_count = QEDE_QUEUE_CNT(edev) - edev->fp_num_tx -
+                                       edev->fp_num_rx;
+       channels->tx_count = edev->fp_num_tx;
+       channels->rx_count = edev->fp_num_rx;
 }
 
 static int qede_set_channels(struct net_device *dev,
                             struct ethtool_channels *channels)
 {
        struct qede_dev *edev = netdev_priv(dev);
+       u32 count;
 
        DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
                   "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n",
                   channels->rx_count, channels->tx_count,
                   channels->other_count, channels->combined_count);
 
-       /* We don't support separate rx / tx, nor `other' channels. */
-       if (channels->rx_count || channels->tx_count ||
-           channels->other_count || (channels->combined_count == 0) ||
-           (channels->combined_count > QEDE_MAX_RSS_CNT(edev))) {
+       count = channels->rx_count + channels->tx_count +
+                       channels->combined_count;
+
+       /* We don't support `other' channels */
+       if (channels->other_count) {
                DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
                           "command parameters not supported\n");
                return -EINVAL;
        }
 
+       if (!(channels->combined_count || (channels->rx_count &&
+                                          channels->tx_count))) {
+               DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+                          "need to request at least one transmit and one receive channel\n");
+               return -EINVAL;
+       }
+
+       if (count > QEDE_MAX_RSS_CNT(edev)) {
+               DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+                          "requested channels = %d max supported channels = %d\n",
+                          count, QEDE_MAX_RSS_CNT(edev));
+               return -EINVAL;
+       }
+
        /* Check if there was a change in the active parameters */
-       if (channels->combined_count == QEDE_RSS_CNT(edev)) {
+       if ((count == QEDE_QUEUE_CNT(edev)) &&
+           (channels->tx_count == edev->fp_num_tx) &&
+           (channels->rx_count == edev->fp_num_rx)) {
                DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
                           "No change in active parameters\n");
                return 0;
        }
 
        /* We need the number of queues to be divisible between the hwfns */
-       if (channels->combined_count % edev->dev_info.common.num_hwfns) {
+       if ((count % edev->dev_info.common.num_hwfns) ||
+           (channels->tx_count % edev->dev_info.common.num_hwfns) ||
+           (channels->rx_count % edev->dev_info.common.num_hwfns)) {
                DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
-                          "Number of channels must be divisable by %04x\n",
+                          "Number of channels must be divisible by %04x\n",
                           edev->dev_info.common.num_hwfns);
                return -EINVAL;
        }
 
        /* Set number of queues and reload if necessary */
-       edev->req_rss = channels->combined_count;
-       if (netif_running(dev))
-               qede_reload(edev, NULL, NULL);
+       edev->req_queues = count;
+       edev->req_num_tx = channels->tx_count;
+       edev->req_num_rx = channels->rx_count;
+       /* Reset the indirection table if rx queue count is updated */
+       if ((edev->req_queues - edev->req_num_tx) != QEDE_RSS_COUNT(edev)) {
+               edev->rss_params_inited &= ~QEDE_RSS_INDIR_INITED;
+               memset(edev->rss_ind_table, 0, sizeof(edev->rss_ind_table));
+       }
+
+       qede_reload(edev, NULL, false);
 
        return 0;
 }
 
+static int qede_get_ts_info(struct net_device *dev,
+                           struct ethtool_ts_info *info)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+
+       return qede_ptp_get_ts_info(edev, info);
+}
+
 static int qede_set_phys_id(struct net_device *dev,
                            enum ethtool_phys_id_state state)
 {
@@ -702,11 +950,11 @@ static int qede_get_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
                info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
                break;
        case UDP_V4_FLOW:
-               if (edev->rss_params.rss_caps & QED_RSS_IPV4_UDP)
+               if (edev->rss_caps & QED_RSS_IPV4_UDP)
                        info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
                break;
        case UDP_V6_FLOW:
-               if (edev->rss_params.rss_caps & QED_RSS_IPV6_UDP)
+               if (edev->rss_caps & QED_RSS_IPV6_UDP)
                        info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
                break;
        case IPV4_FLOW:
@@ -727,7 +975,7 @@ static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
 
        switch (info->cmd) {
        case ETHTOOL_GRXRINGS:
-               info->data = edev->num_rss;
+               info->data = QEDE_RSS_COUNT(edev);
                return 0;
        case ETHTOOL_GRXFH:
                return qede_get_rss_flags(edev, info);
@@ -739,8 +987,9 @@ static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
 
 static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
 {
-       struct qed_update_vport_params vport_update_params;
+       struct qed_update_vport_params *vport_update_params;
        u8 set_caps = 0, clr_caps = 0;
+       int rc = 0;
 
        DP_VERBOSE(edev, QED_MSG_DEBUG,
                   "Set rss flags command parameters: flow type = %d, data = %llu\n",
@@ -815,27 +1064,29 @@ static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
        }
 
        /* No action is needed if there is no change in the rss capability */
-       if (edev->rss_params.rss_caps == ((edev->rss_params.rss_caps &
-                                          ~clr_caps) | set_caps))
+       if (edev->rss_caps == ((edev->rss_caps & ~clr_caps) | set_caps))
                return 0;
 
        /* Update internal configuration */
-       edev->rss_params.rss_caps = (edev->rss_params.rss_caps & ~clr_caps) |
-                                   set_caps;
+       edev->rss_caps = ((edev->rss_caps & ~clr_caps) | set_caps);
        edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
 
        /* Re-configure if possible */
-       if (netif_running(edev->ndev)) {
-               memset(&vport_update_params, 0, sizeof(vport_update_params));
-               vport_update_params.update_rss_flg = 1;
-               vport_update_params.vport_id = 0;
-               memcpy(&vport_update_params.rss_params, &edev->rss_params,
-                      sizeof(vport_update_params.rss_params));
-               return edev->ops->vport_update(edev->cdev,
-                                              &vport_update_params);
+       __qede_lock(edev);
+       if (edev->state == QEDE_STATE_OPEN) {
+               vport_update_params = vzalloc(sizeof(*vport_update_params));
+               if (!vport_update_params) {
+                       __qede_unlock(edev);
+                       return -ENOMEM;
+               }
+               qede_fill_rss_params(edev, &vport_update_params->rss_params,
+                                    &vport_update_params->update_rss_flg);
+               rc = edev->ops->vport_update(edev->cdev, vport_update_params);
+               vfree(vport_update_params);
        }
+       __qede_unlock(edev);
 
-       return 0;
+       return rc;
 }
 
 static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
@@ -860,7 +1111,7 @@ static u32 qede_get_rxfh_key_size(struct net_device *dev)
 {
        struct qede_dev *edev = netdev_priv(dev);
 
-       return sizeof(edev->rss_params.rss_key);
+       return sizeof(edev->rss_key);
 }
 
 static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
@@ -875,11 +1126,10 @@ static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
                return 0;
 
        for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++)
-               indir[i] = edev->rss_params.rss_ind_table[i];
+               indir[i] = edev->rss_ind_table[i];
 
        if (key)
-               memcpy(key, edev->rss_params.rss_key,
-                      qede_get_rxfh_key_size(dev));
+               memcpy(key, edev->rss_key, qede_get_rxfh_key_size(dev));
 
        return 0;
 }
@@ -887,9 +1137,15 @@ static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
 static int qede_set_rxfh(struct net_device *dev, const u32 *indir,
                         const u8 *key, const u8 hfunc)
 {
-       struct qed_update_vport_params vport_update_params;
+       struct qed_update_vport_params *vport_update_params;
        struct qede_dev *edev = netdev_priv(dev);
-       int i;
+       int i, rc = 0;
+
+       if (edev->dev_info.common.num_hwfns > 1) {
+               DP_INFO(edev,
+                       "RSS configuration is not supported for 100G devices\n");
+               return -EOPNOTSUPP;
+       }
 
        if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
                return -EOPNOTSUPP;
@@ -899,27 +1155,30 @@ static int qede_set_rxfh(struct net_device *dev, const u32 *indir,
 
        if (indir) {
                for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++)
-                       edev->rss_params.rss_ind_table[i] = indir[i];
+                       edev->rss_ind_table[i] = indir[i];
                edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
        }
 
        if (key) {
-               memcpy(&edev->rss_params.rss_key, key,
-                      qede_get_rxfh_key_size(dev));
+               memcpy(&edev->rss_key, key, qede_get_rxfh_key_size(dev));
                edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
        }
 
-       if (netif_running(edev->ndev)) {
-               memset(&vport_update_params, 0, sizeof(vport_update_params));
-               vport_update_params.update_rss_flg = 1;
-               vport_update_params.vport_id = 0;
-               memcpy(&vport_update_params.rss_params, &edev->rss_params,
-                      sizeof(vport_update_params.rss_params));
-               return edev->ops->vport_update(edev->cdev,
-                                              &vport_update_params);
+       __qede_lock(edev);
+       if (edev->state == QEDE_STATE_OPEN) {
+               vport_update_params = vzalloc(sizeof(*vport_update_params));
+               if (!vport_update_params) {
+                       __qede_unlock(edev);
+                       return -ENOMEM;
+               }
+               qede_fill_rss_params(edev, &vport_update_params->rss_params,
+                                    &vport_update_params->update_rss_flg);
+               rc = edev->ops->vport_update(edev->cdev, vport_update_params);
+               vfree(vport_update_params);
        }
+       __qede_unlock(edev);
 
-       return 0;
+       return rc;
 }
 
 /* This function enables the interrupt generation and the NAPI on the device */
@@ -930,7 +1189,7 @@ static void qede_netif_start(struct qede_dev *edev)
        if (!netif_running(edev->ndev))
                return;
 
-       for_each_rss(i) {
+       for_each_queue(i) {
                /* Update and reenable interrupts */
                qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_ENABLE, 1);
                napi_enable(&edev->fp_array[i].napi);
@@ -942,7 +1201,7 @@ static void qede_netif_stop(struct qede_dev *edev)
 {
        int i;
 
-       for_each_rss(i) {
+       for_each_queue(i) {
                napi_disable(&edev->fp_array[i].napi);
                /* Disable interrupts */
                qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_DISABLE, 0);
@@ -952,14 +1211,26 @@ static void qede_netif_stop(struct qede_dev *edev)
 static int qede_selftest_transmit_traffic(struct qede_dev *edev,
                                          struct sk_buff *skb)
 {
-       struct qede_tx_queue *txq = &edev->fp_array[0].txqs[0];
+       struct qede_tx_queue *txq = NULL;
        struct eth_tx_1st_bd *first_bd;
        dma_addr_t mapping;
        int i, idx, val;
 
+       for_each_queue(i) {
+               if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
+                       txq = edev->fp_array[i].txq;
+                       break;
+               }
+       }
+
+       if (!txq) {
+               DP_NOTICE(edev, "Tx path is not available\n");
+               return -1;
+       }
+
        /* Fill the entry in the SW ring and the BDs in the FW ring */
        idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
-       txq->sw_tx_ring[idx].skb = skb;
+       txq->sw_tx_ring.skbs[idx].skb = skb;
        first_bd = qed_chain_produce(&txq->tx_pbl);
        memset(first_bd, 0, sizeof(*first_bd));
        val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
@@ -1010,69 +1281,95 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
        }
 
        first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
-       dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
-                      BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
+       dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+                        BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
        txq->sw_tx_cons++;
-       txq->sw_tx_ring[idx].skb = NULL;
+       txq->sw_tx_ring.skbs[idx].skb = NULL;
 
        return 0;
 }
 
 static int qede_selftest_receive_traffic(struct qede_dev *edev)
 {
-       struct qede_rx_queue *rxq = edev->fp_array[0].rxq;
        u16 hw_comp_cons, sw_comp_cons, sw_rx_index, len;
        struct eth_fast_path_rx_reg_cqe *fp_cqe;
+       struct qede_rx_queue *rxq = NULL;
        struct sw_rx_data *sw_rx_data;
        union eth_rx_cqe *cqe;
+       int i, iter, rc = 0;
        u8 *data_ptr;
-       int i;
+
+       for_each_queue(i) {
+               if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
+                       rxq = edev->fp_array[i].rxq;
+                       break;
+               }
+       }
+
+       if (!rxq) {
+               DP_NOTICE(edev, "Rx path is not available\n");
+               return -1;
+       }
 
        /* The packet is expected to receive on rx-queue 0 even though RSS is
         * enabled. This is because the queue 0 is configured as the default
         * queue and that the loopback traffic is not IP.
         */
-       for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
-               if (qede_has_rx_work(rxq))
+       for (iter = 0; iter < QEDE_SELFTEST_POLL_COUNT; iter++) {
+               if (!qede_has_rx_work(rxq)) {
+                       usleep_range(100, 200);
+                       continue;
+               }
+
+               hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+               sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+               /* Memory barrier to prevent the CPU from doing speculative
+                * reads of CQE/BD before reading hw_comp_cons. If the CQE is
+                * read before it is written by FW, then FW writes CQE and SB,
+                * and then the CPU reads the hw_comp_cons, it will use an old
+                * CQE.
+                */
+               rmb();
+
+               /* Get the CQE from the completion ring */
+               cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
+
+               /* Get the data from the SW ring */
+               sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+               sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
+               fp_cqe = &cqe->fast_path_regular;
+               len =  le16_to_cpu(fp_cqe->len_on_first_bd);
+               data_ptr = (u8 *)(page_address(sw_rx_data->data) +
+                                 fp_cqe->placement_offset +
+                                 sw_rx_data->page_offset);
+               if (ether_addr_equal(data_ptr,  edev->ndev->dev_addr) &&
+                   ether_addr_equal(data_ptr + ETH_ALEN,
+                                    edev->ndev->dev_addr)) {
+                       for (i = ETH_HLEN; i < len; i++)
+                               if (data_ptr[i] != (unsigned char)(i & 0xff)) {
+                                       rc = -1;
+                                       break;
+                               }
+
+                       qede_recycle_rx_bd_ring(rxq, 1);
+                       qed_chain_recycle_consumed(&rxq->rx_comp_ring);
                        break;
-               usleep_range(100, 200);
+               }
+
+               DP_INFO(edev, "Not the transmitted packet\n");
+               qede_recycle_rx_bd_ring(rxq, 1);
+               qed_chain_recycle_consumed(&rxq->rx_comp_ring);
        }
 
-       if (!qede_has_rx_work(rxq)) {
+       if (iter == QEDE_SELFTEST_POLL_COUNT) {
                DP_NOTICE(edev, "Failed to receive the traffic\n");
                return -1;
        }
 
-       hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
-       sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+       qede_update_rx_prod(edev, rxq);
 
-       /* Memory barrier to prevent the CPU from doing speculative reads of CQE
-        * / BD before reading hw_comp_cons. If the CQE is read before it is
-        * written by FW, then FW writes CQE and SB, and then the CPU reads the
-        * hw_comp_cons, it will use an old CQE.
-        */
-       rmb();
-
-       /* Get the CQE from the completion ring */
-       cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
-
-       /* Get the data from the SW ring */
-       sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
-       sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
-       fp_cqe = &cqe->fast_path_regular;
-       len =  le16_to_cpu(fp_cqe->len_on_first_bd);
-       data_ptr = (u8 *)(page_address(sw_rx_data->data) +
-                    fp_cqe->placement_offset + sw_rx_data->page_offset);
-       for (i = ETH_HLEN; i < len; i++)
-               if (data_ptr[i] != (unsigned char)(i & 0xff)) {
-                       DP_NOTICE(edev, "Loopback test failed\n");
-                       qede_recycle_rx_bd_ring(rxq, edev, 1);
-                       return -1;
-               }
-
-       qede_recycle_rx_bd_ring(rxq, edev, 1);
-
-       return 0;
+       return rc;
 }
 
 static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode)
@@ -1183,6 +1480,11 @@ static void qede_self_test(struct net_device *dev,
                buf[QEDE_ETHTOOL_CLOCK_TEST] = 1;
                etest->flags |= ETH_TEST_FL_FAILED;
        }
+
+       if (edev->ops->common->selftest->selftest_nvram(edev->cdev)) {
+               buf[QEDE_ETHTOOL_NVRAM_TEST] = 1;
+               etest->flags |= ETH_TEST_FL_FAILED;
+       }
 }
 
 static int qede_set_tunable(struct net_device *dev,
@@ -1231,6 +1533,10 @@ static const struct ethtool_ops qede_ethtool_ops = {
        .get_settings = qede_get_settings,
        .set_settings = qede_set_settings,
        .get_drvinfo = qede_get_drvinfo,
+       .get_regs_len = qede_get_regs_len,
+       .get_regs = qede_get_regs,
+       .get_wol = qede_get_wol,
+       .set_wol = qede_set_wol,
        .get_msglevel = qede_get_msglevel,
        .set_msglevel = qede_set_msglevel,
        .nway_reset = qede_nway_reset,
@@ -1252,6 +1558,7 @@ static const struct ethtool_ops qede_ethtool_ops = {
        .get_rxfh_key_size = qede_get_rxfh_key_size,
        .get_rxfh = qede_get_rxfh,
        .set_rxfh = qede_set_rxfh,
+       .get_ts_info = qede_get_ts_info,
        .get_channels = qede_get_channels,
        .set_channels = qede_set_channels,
        .self_test = qede_self_test,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
new file mode 100644 (file)
index 0000000..8c594a3
--- /dev/null
@@ -0,0 +1,1200 @@
+/* QLogic qede NIC Driver
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <net/udp_tunnel.h>
+#include <linux/bitops.h>
+#include <linux/vmalloc.h>
+
+#include <linux/qed/qed_if.h>
+#include "qede.h"
+
+#ifdef CONFIG_RFS_ACCEL
+struct qede_arfs_tuple {
+       union {
+               __be32 src_ipv4;
+               struct in6_addr src_ipv6;
+       };
+       union {
+               __be32 dst_ipv4;
+               struct in6_addr dst_ipv6;
+       };
+       __be16  src_port;
+       __be16  dst_port;
+       __be16  eth_proto;
+       u8      ip_proto;
+};
+
+struct qede_arfs_fltr_node {
+#define QEDE_FLTR_VALID         0
+       unsigned long state;
+
+       /* pointer to aRFS packet buffer */
+       void *data;
+
+       /* dma map address of aRFS packet buffer */
+       dma_addr_t mapping;
+
+       /* length of aRFS packet buffer */
+       int buf_len;
+
+       /* tuples to hold from aRFS packet buffer */
+       struct qede_arfs_tuple tuple;
+
+       u32 flow_id;
+       u16 sw_id;
+       u16 rxq_id;
+       u16 next_rxq_id;
+       bool filter_op;
+       bool used;
+       struct hlist_node node;
+};
+
+struct qede_arfs {
+#define QEDE_ARFS_POLL_COUNT   100
+#define QEDE_RFS_FLW_BITSHIFT  (4)
+#define QEDE_RFS_FLW_MASK      ((1 << QEDE_RFS_FLW_BITSHIFT) - 1)
+       struct hlist_head       arfs_hl_head[1 << QEDE_RFS_FLW_BITSHIFT];
+
+       /* lock for filter list access */
+       spinlock_t              arfs_list_lock;
+       unsigned long           *arfs_fltr_bmap;
+       int                     filter_count;
+       bool                    enable;
+};
+
+static void qede_configure_arfs_fltr(struct qede_dev *edev,
+                                    struct qede_arfs_fltr_node *n,
+                                    u16 rxq_id, bool add_fltr)
+{
+       const struct qed_eth_ops *op = edev->ops;
+
+       if (n->used)
+               return;
+
+       DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
+                  "%s arfs filter flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
+                  add_fltr ? "Adding" : "Deleting",
+                  n->flow_id, n->sw_id, ntohs(n->tuple.src_port),
+                  ntohs(n->tuple.dst_port), rxq_id);
+
+       n->used = true;
+       n->filter_op = add_fltr;
+       op->ntuple_filter_config(edev->cdev, n, n->mapping, n->buf_len, 0,
+                                rxq_id, add_fltr);
+}
+
+static void
+qede_free_arfs_filter(struct qede_dev *edev,  struct qede_arfs_fltr_node *fltr)
+{
+       kfree(fltr->data);
+       clear_bit(fltr->sw_id, edev->arfs->arfs_fltr_bmap);
+       kfree(fltr);
+}
+
+void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc)
+{
+       struct qede_arfs_fltr_node *fltr = filter;
+       struct qede_dev *edev = dev;
+
+       if (fw_rc) {
+               DP_NOTICE(edev,
+                         "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
+                         fw_rc, fltr->flow_id, fltr->sw_id,
+                         ntohs(fltr->tuple.src_port),
+                         ntohs(fltr->tuple.dst_port), fltr->rxq_id);
+
+               spin_lock_bh(&edev->arfs->arfs_list_lock);
+
+               fltr->used = false;
+               clear_bit(QEDE_FLTR_VALID, &fltr->state);
+
+               spin_unlock_bh(&edev->arfs->arfs_list_lock);
+               return;
+       }
+
+       spin_lock_bh(&edev->arfs->arfs_list_lock);
+
+       fltr->used = false;
+
+       if (fltr->filter_op) {
+               set_bit(QEDE_FLTR_VALID, &fltr->state);
+               if (fltr->rxq_id != fltr->next_rxq_id)
+                       qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id,
+                                                false);
+       } else {
+               clear_bit(QEDE_FLTR_VALID, &fltr->state);
+               if (fltr->rxq_id != fltr->next_rxq_id) {
+                       fltr->rxq_id = fltr->next_rxq_id;
+                       qede_configure_arfs_fltr(edev, fltr,
+                                                fltr->rxq_id, true);
+               }
+       }
+
+       spin_unlock_bh(&edev->arfs->arfs_list_lock);
+}
+
+/* Should be called while qede_lock is held */
+void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr)
+{
+       int i;
+
+       for (i = 0; i <= QEDE_RFS_FLW_MASK; i++) {
+               struct hlist_node *temp;
+               struct hlist_head *head;
+               struct qede_arfs_fltr_node *fltr;
+
+               head = &edev->arfs->arfs_hl_head[i];
+
+               hlist_for_each_entry_safe(fltr, temp, head, node) {
+                       bool del = false;
+
+                       if (edev->state != QEDE_STATE_OPEN)
+                               del = true;
+
+                       spin_lock_bh(&edev->arfs->arfs_list_lock);
+
+                       if ((!test_bit(QEDE_FLTR_VALID, &fltr->state) &&
+                            !fltr->used) || free_fltr) {
+                               hlist_del(&fltr->node);
+                               dma_unmap_single(&edev->pdev->dev,
+                                                fltr->mapping,
+                                                fltr->buf_len, DMA_TO_DEVICE);
+                               qede_free_arfs_filter(edev, fltr);
+                               edev->arfs->filter_count--;
+                       } else {
+                               if ((rps_may_expire_flow(edev->ndev,
+                                                        fltr->rxq_id,
+                                                        fltr->flow_id,
+                                                        fltr->sw_id) || del) &&
+                                                        !free_fltr)
+                                       qede_configure_arfs_fltr(edev, fltr,
+                                                                fltr->rxq_id,
+                                                                false);
+                       }
+
+                       spin_unlock_bh(&edev->arfs->arfs_list_lock);
+               }
+       }
+
+       spin_lock_bh(&edev->arfs->arfs_list_lock);
+
+       if (!edev->arfs->filter_count) {
+               if (edev->arfs->enable) {
+                       edev->arfs->enable = false;
+                       edev->ops->configure_arfs_searcher(edev->cdev, false);
+               }
+       } else {
+               set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
+               schedule_delayed_work(&edev->sp_task,
+                                     QEDE_SP_TASK_POLL_DELAY);
+       }
+
+       spin_unlock_bh(&edev->arfs->arfs_list_lock);
+}
+
+/* This function waits until all aRFS filters get deleted and freed.
+ * On timeout it frees all filters forcefully.
+ */
+void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev)
+{
+       int count = QEDE_ARFS_POLL_COUNT;
+
+       while (count) {
+               qede_process_arfs_filters(edev, false);
+
+               if (!edev->arfs->filter_count)
+                       break;
+
+               msleep(100);
+               count--;
+       }
+
+       if (!count) {
+               DP_NOTICE(edev, "Timeout in polling for arfs filter free\n");
+
+               /* Something is terribly wrong, free forcefully */
+               qede_process_arfs_filters(edev, true);
+       }
+}
+
+int qede_alloc_arfs(struct qede_dev *edev)
+{
+       int i;
+
+       edev->arfs = vzalloc(sizeof(*edev->arfs));
+       if (!edev->arfs)
+               return -ENOMEM;
+
+       spin_lock_init(&edev->arfs->arfs_list_lock);
+
+       for (i = 0; i <= QEDE_RFS_FLW_MASK; i++)
+               INIT_HLIST_HEAD(&edev->arfs->arfs_hl_head[i]);
+
+       edev->ndev->rx_cpu_rmap = alloc_irq_cpu_rmap(QEDE_RSS_COUNT(edev));
+       if (!edev->ndev->rx_cpu_rmap) {
+               vfree(edev->arfs);
+               edev->arfs = NULL;
+               return -ENOMEM;
+       }
+
+       edev->arfs->arfs_fltr_bmap = vzalloc(BITS_TO_LONGS(QEDE_RFS_MAX_FLTR));
+       if (!edev->arfs->arfs_fltr_bmap) {
+               free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
+               edev->ndev->rx_cpu_rmap = NULL;
+               vfree(edev->arfs);
+               edev->arfs = NULL;
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+void qede_free_arfs(struct qede_dev *edev)
+{
+       if (!edev->arfs)
+               return;
+
+       if (edev->ndev->rx_cpu_rmap)
+               free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
+
+       edev->ndev->rx_cpu_rmap = NULL;
+       vfree(edev->arfs->arfs_fltr_bmap);
+       edev->arfs->arfs_fltr_bmap = NULL;
+       vfree(edev->arfs);
+       edev->arfs = NULL;
+}
+
+static bool qede_compare_ip_addr(struct qede_arfs_fltr_node *tpos,
+                                const struct sk_buff *skb)
+{
+       if (skb->protocol == htons(ETH_P_IP)) {
+               if (tpos->tuple.src_ipv4 == ip_hdr(skb)->saddr &&
+                   tpos->tuple.dst_ipv4 == ip_hdr(skb)->daddr)
+                       return true;
+               else
+                       return false;
+       } else {
+               struct in6_addr *src = &tpos->tuple.src_ipv6;
+               u8 size = sizeof(struct in6_addr);
+
+               if (!memcmp(src, &ipv6_hdr(skb)->saddr, size) &&
+                   !memcmp(&tpos->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr, size))
+                       return true;
+               else
+                       return false;
+       }
+}
+
+static struct qede_arfs_fltr_node *
+qede_arfs_htbl_key_search(struct hlist_head *h, const struct sk_buff *skb,
+                         __be16 src_port, __be16 dst_port, u8 ip_proto)
+{
+       struct qede_arfs_fltr_node *tpos;
+
+       hlist_for_each_entry(tpos, h, node)
+               if (tpos->tuple.ip_proto == ip_proto &&
+                   tpos->tuple.eth_proto == skb->protocol &&
+                   qede_compare_ip_addr(tpos, skb) &&
+                   tpos->tuple.src_port == src_port &&
+                   tpos->tuple.dst_port == dst_port)
+                       return tpos;
+
+       return NULL;
+}
+
+static struct qede_arfs_fltr_node *
+qede_alloc_filter(struct qede_dev *edev, int min_hlen)
+{
+       struct qede_arfs_fltr_node *n;
+       int bit_id;
+
+       bit_id = find_first_zero_bit(edev->arfs->arfs_fltr_bmap,
+                                    QEDE_RFS_MAX_FLTR);
+
+       if (bit_id >= QEDE_RFS_MAX_FLTR)
+               return NULL;
+
+       n = kzalloc(sizeof(*n), GFP_ATOMIC);
+       if (!n)
+               return NULL;
+
+       n->data = kzalloc(min_hlen, GFP_ATOMIC);
+       if (!n->data) {
+               kfree(n);
+               return NULL;
+       }
+
+       n->sw_id = (u16)bit_id;
+       set_bit(bit_id, edev->arfs->arfs_fltr_bmap);
+       return n;
+}
+
+int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+                      u16 rxq_index, u32 flow_id)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       struct qede_arfs_fltr_node *n;
+       int min_hlen, rc, tp_offset;
+       struct ethhdr *eth;
+       __be16 *ports;
+       u16 tbl_idx;
+       u8 ip_proto;
+
+       if (skb->encapsulation)
+               return -EPROTONOSUPPORT;
+
+       if (skb->protocol != htons(ETH_P_IP) &&
+           skb->protocol != htons(ETH_P_IPV6))
+               return -EPROTONOSUPPORT;
+
+       if (skb->protocol == htons(ETH_P_IP)) {
+               ip_proto = ip_hdr(skb)->protocol;
+               tp_offset = sizeof(struct iphdr);
+       } else {
+               ip_proto = ipv6_hdr(skb)->nexthdr;
+               tp_offset = sizeof(struct ipv6hdr);
+       }
+
+       if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP)
+               return -EPROTONOSUPPORT;
+
+       ports = (__be16 *)(skb->data + tp_offset);
+       tbl_idx = skb_get_hash_raw(skb) & QEDE_RFS_FLW_MASK;
+
+       spin_lock_bh(&edev->arfs->arfs_list_lock);
+
+       n = qede_arfs_htbl_key_search(&edev->arfs->arfs_hl_head[tbl_idx],
+                                     skb, ports[0], ports[1], ip_proto);
+
+       if (n) {
+               /* Filter match */
+               n->next_rxq_id = rxq_index;
+
+               if (test_bit(QEDE_FLTR_VALID, &n->state)) {
+                       if (n->rxq_id != rxq_index)
+                               qede_configure_arfs_fltr(edev, n, n->rxq_id,
+                                                        false);
+               } else {
+                       if (!n->used) {
+                               n->rxq_id = rxq_index;
+                               qede_configure_arfs_fltr(edev, n, n->rxq_id,
+                                                        true);
+                       }
+               }
+
+               rc = n->sw_id;
+               goto ret_unlock;
+       }
+
+       min_hlen = ETH_HLEN + skb_headlen(skb);
+
+       n = qede_alloc_filter(edev, min_hlen);
+       if (!n) {
+               rc = -ENOMEM;
+               goto ret_unlock;
+       }
+
+       n->buf_len = min_hlen;
+       n->rxq_id = rxq_index;
+       n->next_rxq_id = rxq_index;
+       n->tuple.src_port = ports[0];
+       n->tuple.dst_port = ports[1];
+       n->flow_id = flow_id;
+
+       if (skb->protocol == htons(ETH_P_IP)) {
+               n->tuple.src_ipv4 = ip_hdr(skb)->saddr;
+               n->tuple.dst_ipv4 = ip_hdr(skb)->daddr;
+       } else {
+               memcpy(&n->tuple.src_ipv6, &ipv6_hdr(skb)->saddr,
+                      sizeof(struct in6_addr));
+               memcpy(&n->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr,
+                      sizeof(struct in6_addr));
+       }
+
+       eth = (struct ethhdr *)n->data;
+       eth->h_proto = skb->protocol;
+       n->tuple.eth_proto = skb->protocol;
+       n->tuple.ip_proto = ip_proto;
+       memcpy(n->data + ETH_HLEN, skb->data, skb_headlen(skb));
+
+       n->mapping = dma_map_single(&edev->pdev->dev, n->data,
+                                   n->buf_len, DMA_TO_DEVICE);
+       if (dma_mapping_error(&edev->pdev->dev, n->mapping)) {
+               DP_NOTICE(edev, "Failed to map DMA memory for arfs\n");
+               qede_free_arfs_filter(edev, n);
+               rc = -ENOMEM;
+               goto ret_unlock;
+       }
+
+       INIT_HLIST_NODE(&n->node);
+       hlist_add_head(&n->node, &edev->arfs->arfs_hl_head[tbl_idx]);
+       edev->arfs->filter_count++;
+
+       if (edev->arfs->filter_count == 1 && !edev->arfs->enable) {
+               edev->ops->configure_arfs_searcher(edev->cdev, true);
+               edev->arfs->enable = true;
+       }
+
+       qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
+
+       spin_unlock_bh(&edev->arfs->arfs_list_lock);
+
+       set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
+       schedule_delayed_work(&edev->sp_task, 0);
+       return n->sw_id;
+
+ret_unlock:
+       spin_unlock_bh(&edev->arfs->arfs_list_lock);
+       return rc;
+}
+#endif
+
+void qede_force_mac(void *dev, u8 *mac, bool forced)
+{
+       struct qede_dev *edev = dev;
+
+       /* MAC hints take effect only if we haven't set one already */
+       if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced)
+               return;
+
+       ether_addr_copy(edev->ndev->dev_addr, mac);
+       ether_addr_copy(edev->primary_mac, mac);
+}
+
+void qede_fill_rss_params(struct qede_dev *edev,
+                         struct qed_update_vport_rss_params *rss, u8 *update)
+{
+       bool need_reset = false;
+       int i;
+
+       if (QEDE_RSS_COUNT(edev) <= 1) {
+               memset(rss, 0, sizeof(*rss));
+               *update = 0;
+               return;
+       }
+
+       /* Need to validate current RSS config uses valid entries */
+       for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+               if (edev->rss_ind_table[i] >= QEDE_RSS_COUNT(edev)) {
+                       need_reset = true;
+                       break;
+               }
+       }
+
+       if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) || need_reset) {
+               for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+                       u16 indir_val, val;
+
+                       val = QEDE_RSS_COUNT(edev);
+                       indir_val = ethtool_rxfh_indir_default(i, val);
+                       edev->rss_ind_table[i] = indir_val;
+               }
+               edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
+       }
+
+       /* Now that we have the queue-indirection, prepare the handles */
+       for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+               u16 idx = QEDE_RX_QUEUE_IDX(edev, edev->rss_ind_table[i]);
+
+               rss->rss_ind_table[i] = edev->fp_array[idx].rxq->handle;
+       }
+
+       if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) {
+               netdev_rss_key_fill(edev->rss_key, sizeof(edev->rss_key));
+               edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
+       }
+       memcpy(rss->rss_key, edev->rss_key, sizeof(rss->rss_key));
+
+       if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) {
+               edev->rss_caps = QED_RSS_IPV4 | QED_RSS_IPV6 |
+                   QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP;
+               edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
+       }
+       rss->rss_caps = edev->rss_caps;
+
+       *update = 1;
+}
+
+static int qede_set_ucast_rx_mac(struct qede_dev *edev,
+                                enum qed_filter_xcast_params_type opcode,
+                                unsigned char mac[ETH_ALEN])
+{
+       struct qed_filter_params filter_cmd;
+
+       memset(&filter_cmd, 0, sizeof(filter_cmd));
+       filter_cmd.type = QED_FILTER_TYPE_UCAST;
+       filter_cmd.filter.ucast.type = opcode;
+       filter_cmd.filter.ucast.mac_valid = 1;
+       ether_addr_copy(filter_cmd.filter.ucast.mac, mac);
+
+       return edev->ops->filter_config(edev->cdev, &filter_cmd);
+}
+
+static int qede_set_ucast_rx_vlan(struct qede_dev *edev,
+                                 enum qed_filter_xcast_params_type opcode,
+                                 u16 vid)
+{
+       struct qed_filter_params filter_cmd;
+
+       memset(&filter_cmd, 0, sizeof(filter_cmd));
+       filter_cmd.type = QED_FILTER_TYPE_UCAST;
+       filter_cmd.filter.ucast.type = opcode;
+       filter_cmd.filter.ucast.vlan_valid = 1;
+       filter_cmd.filter.ucast.vlan = vid;
+
+       return edev->ops->filter_config(edev->cdev, &filter_cmd);
+}
+
+static int qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
+{
+       struct qed_update_vport_params *params;
+       int rc;
+
+       /* Proceed only if action actually needs to be performed */
+       if (edev->accept_any_vlan == action)
+               return 0;
+
+       params = vzalloc(sizeof(*params));
+       if (!params)
+               return -ENOMEM;
+
+       params->vport_id = 0;
+       params->accept_any_vlan = action;
+       params->update_accept_any_vlan_flg = 1;
+
+       rc = edev->ops->vport_update(edev->cdev, params);
+       if (rc) {
+               DP_ERR(edev, "Failed to %s accept-any-vlan\n",
+                      action ? "enable" : "disable");
+       } else {
+               DP_INFO(edev, "%s accept-any-vlan\n",
+                       action ? "enabled" : "disabled");
+               edev->accept_any_vlan = action;
+       }
+
+       vfree(params);
+       return 0;
+}
+
+int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       struct qede_vlan *vlan, *tmp;
+       int rc = 0;
+
+       DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid);
+
+       vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+       if (!vlan) {
+               DP_INFO(edev, "Failed to allocate struct for vlan\n");
+               return -ENOMEM;
+       }
+       INIT_LIST_HEAD(&vlan->list);
+       vlan->vid = vid;
+       vlan->configured = false;
+
+       /* Verify vlan isn't already configured */
+       list_for_each_entry(tmp, &edev->vlan_list, list) {
+               if (tmp->vid == vlan->vid) {
+                       DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+                                  "vlan already configured\n");
+                       kfree(vlan);
+                       return -EEXIST;
+               }
+       }
+
+       /* If interface is down, cache this VLAN ID and return */
+       __qede_lock(edev);
+       if (edev->state != QEDE_STATE_OPEN) {
+               DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
+                          "Interface is down, VLAN %d will be configured when interface is up\n",
+                          vid);
+               if (vid != 0)
+                       edev->non_configured_vlans++;
+               list_add(&vlan->list, &edev->vlan_list);
+               goto out;
+       }
+
+       /* Check for the filter limit.
+        * Note - vlan0 has a reserved filter and can be added without
+        * worrying about quota
+        */
+       if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) ||
+           (vlan->vid == 0)) {
+               rc = qede_set_ucast_rx_vlan(edev,
+                                           QED_FILTER_XCAST_TYPE_ADD,
+                                           vlan->vid);
+               if (rc) {
+                       DP_ERR(edev, "Failed to configure VLAN %d\n",
+                              vlan->vid);
+                       kfree(vlan);
+                       goto out;
+               }
+               vlan->configured = true;
+
+               /* vlan0 filter isn't consuming out of our quota */
+               if (vlan->vid != 0)
+                       edev->configured_vlans++;
+       } else {
+               /* Out of quota; Activate accept-any-VLAN mode */
+               if (!edev->non_configured_vlans) {
+                       rc = qede_config_accept_any_vlan(edev, true);
+                       if (rc) {
+                               kfree(vlan);
+                               goto out;
+                       }
+               }
+
+               edev->non_configured_vlans++;
+       }
+
+       list_add(&vlan->list, &edev->vlan_list);
+
+out:
+       __qede_unlock(edev);
+       return rc;
+}
+
+static void qede_del_vlan_from_list(struct qede_dev *edev,
+                                   struct qede_vlan *vlan)
+{
+       /* vlan0 filter isn't consuming out of our quota */
+       if (vlan->vid != 0) {
+               if (vlan->configured)
+                       edev->configured_vlans--;
+               else
+                       edev->non_configured_vlans--;
+       }
+
+       list_del(&vlan->list);
+       kfree(vlan);
+}
+
+int qede_configure_vlan_filters(struct qede_dev *edev)
+{
+       int rc = 0, real_rc = 0, accept_any_vlan = 0;
+       struct qed_dev_eth_info *dev_info;
+       struct qede_vlan *vlan = NULL;
+
+       if (list_empty(&edev->vlan_list))
+               return 0;
+
+       dev_info = &edev->dev_info;
+
+       /* Configure non-configured vlans */
+       list_for_each_entry(vlan, &edev->vlan_list, list) {
+               if (vlan->configured)
+                       continue;
+
+               /* We have used all our credits, now enable accept_any_vlan */
+               if ((vlan->vid != 0) &&
+                   (edev->configured_vlans == dev_info->num_vlan_filters)) {
+                       accept_any_vlan = 1;
+                       continue;
+               }
+
+               DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid);
+
+               rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD,
+                                           vlan->vid);
+               if (rc) {
+                       DP_ERR(edev, "Failed to configure VLAN %u\n",
+                              vlan->vid);
+                       real_rc = rc;
+                       continue;
+               }
+
+               vlan->configured = true;
+               /* vlan0 filter doesn't consume our VLAN filter's quota */
+               if (vlan->vid != 0) {
+                       edev->non_configured_vlans--;
+                       edev->configured_vlans++;
+               }
+       }
+
+       /* enable accept_any_vlan mode if we have more VLANs than credits,
+        * or remove accept_any_vlan mode if we've actually removed
+        * a non-configured vlan, and all remaining vlans are truly configured.
+        */
+
+       if (accept_any_vlan)
+               rc = qede_config_accept_any_vlan(edev, true);
+       else if (!edev->non_configured_vlans)
+               rc = qede_config_accept_any_vlan(edev, false);
+
+       if (rc && !real_rc)
+               real_rc = rc;
+
+       return real_rc;
+}
+
+int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       struct qede_vlan *vlan = NULL;
+       int rc = 0;
+
+       DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
+
+       /* Find whether entry exists */
+       __qede_lock(edev);
+       list_for_each_entry(vlan, &edev->vlan_list, list)
+               if (vlan->vid == vid)
+                       break;
+
+       if (!vlan || (vlan->vid != vid)) {
+               DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+                          "Vlan isn't configured\n");
+               goto out;
+       }
+
+       if (edev->state != QEDE_STATE_OPEN) {
+               /* As interface is already down, we don't have a VPORT
+                * instance to remove vlan filter. So just update vlan list
+                */
+               DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
+                          "Interface is down, removing VLAN from list only\n");
+               qede_del_vlan_from_list(edev, vlan);
+               goto out;
+       }
+
+       /* Remove vlan */
+       if (vlan->configured) {
+               rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL,
+                                           vid);
+               if (rc) {
+                       DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
+                       goto out;
+               }
+       }
+
+       qede_del_vlan_from_list(edev, vlan);
+
+       /* We have removed a VLAN - try to see if we can
+        * configure non-configured VLAN from the list.
+        */
+       rc = qede_configure_vlan_filters(edev);
+
+out:
+       __qede_unlock(edev);
+       return rc;
+}
+
+void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
+{
+       struct qede_vlan *vlan = NULL;
+
+       if (list_empty(&edev->vlan_list))
+               return;
+
+       list_for_each_entry(vlan, &edev->vlan_list, list) {
+               if (!vlan->configured)
+                       continue;
+
+               vlan->configured = false;
+
+               /* vlan0 filter isn't consuming out of our quota */
+               if (vlan->vid != 0) {
+                       edev->non_configured_vlans++;
+                       edev->configured_vlans--;
+               }
+
+               DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
+                          "marked vlan %d as non-configured\n", vlan->vid);
+       }
+
+       edev->accept_any_vlan = false;
+}
+
+static void qede_set_features_reload(struct qede_dev *edev,
+                                    struct qede_reload_args *args)
+{
+       edev->ndev->features = args->u.features;
+}
+
+int qede_set_features(struct net_device *dev, netdev_features_t features)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       netdev_features_t changes = features ^ dev->features;
+       bool need_reload = false;
+
+       /* No action needed if hardware GRO is disabled during driver load */
+       if (changes & NETIF_F_GRO) {
+               if (dev->features & NETIF_F_GRO)
+                       need_reload = !edev->gro_disable;
+               else
+                       need_reload = edev->gro_disable;
+       }
+
+       if (need_reload) {
+               struct qede_reload_args args;
+
+               args.u.features = features;
+               args.func = &qede_set_features_reload;
+
+               /* Make sure that we definitely need to reload.
+                * In case of an eBPF attached program, there will be no FW
+                * aggregations, so no need to actually reload.
+                */
+               __qede_lock(edev);
+               if (edev->xdp_prog)
+                       args.func(edev, &args);
+               else
+                       qede_reload(edev, &args, true);
+               __qede_unlock(edev);
+
+               return 1;
+       }
+
+       return 0;
+}
+
+void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       u16 t_port = ntohs(ti->port);
+
+       switch (ti->type) {
+       case UDP_TUNNEL_TYPE_VXLAN:
+               if (edev->vxlan_dst_port)
+                       return;
+
+               edev->vxlan_dst_port = t_port;
+
+               DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n",
+                          t_port);
+
+               set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
+               break;
+       case UDP_TUNNEL_TYPE_GENEVE:
+               if (edev->geneve_dst_port)
+                       return;
+
+               edev->geneve_dst_port = t_port;
+
+               DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d\n",
+                          t_port);
+               set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
+               break;
+       default:
+               return;
+       }
+
+       schedule_delayed_work(&edev->sp_task, 0);
+}
+
+void qede_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       u16 t_port = ntohs(ti->port);
+
+       switch (ti->type) {
+       case UDP_TUNNEL_TYPE_VXLAN:
+               if (t_port != edev->vxlan_dst_port)
+                       return;
+
+               edev->vxlan_dst_port = 0;
+
+               DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n",
+                          t_port);
+
+               set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
+               break;
+       case UDP_TUNNEL_TYPE_GENEVE:
+               if (t_port != edev->geneve_dst_port)
+                       return;
+
+               edev->geneve_dst_port = 0;
+
+               DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n",
+                          t_port);
+               set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
+               break;
+       default:
+               return;
+       }
+
+       schedule_delayed_work(&edev->sp_task, 0);
+}
+
+static void qede_xdp_reload_func(struct qede_dev *edev,
+                                struct qede_reload_args *args)
+{
+       struct bpf_prog *old;
+
+       old = xchg(&edev->xdp_prog, args->u.new_prog);
+       if (old)
+               bpf_prog_put(old);
+}
+
+static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog)
+{
+       struct qede_reload_args args;
+
+       /* If we're called, there was already a bpf reference increment */
+       args.func = &qede_xdp_reload_func;
+       args.u.new_prog = prog;
+       qede_reload(edev, &args, false);
+
+       return 0;
+}
+
+int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+
+       if (IS_VF(edev)) {
+               DP_NOTICE(edev, "VFs don't support XDP\n");
+               return -EOPNOTSUPP;
+       }
+
+       switch (xdp->command) {
+       case XDP_SETUP_PROG:
+               return qede_xdp_set(edev, xdp->prog);
+       case XDP_QUERY_PROG:
+               xdp->prog_attached = !!edev->xdp_prog;
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int qede_set_mcast_rx_mac(struct qede_dev *edev,
+                                enum qed_filter_xcast_params_type opcode,
+                                unsigned char *mac, int num_macs)
+{
+       struct qed_filter_params filter_cmd;
+       int i;
+
+       memset(&filter_cmd, 0, sizeof(filter_cmd));
+       filter_cmd.type = QED_FILTER_TYPE_MCAST;
+       filter_cmd.filter.mcast.type = opcode;
+       filter_cmd.filter.mcast.num = num_macs;
+
+       for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
+               ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac);
+
+       return edev->ops->filter_config(edev->cdev, &filter_cmd);
+}
+
+int qede_set_mac_addr(struct net_device *ndev, void *p)
+{
+       struct qede_dev *edev = netdev_priv(ndev);
+       struct sockaddr *addr = p;
+       int rc;
+
+       ASSERT_RTNL(); /* @@@TBD To be removed */
+
+       DP_INFO(edev, "Set_mac_addr called\n");
+
+       if (!is_valid_ether_addr(addr->sa_data)) {
+               DP_NOTICE(edev, "The MAC address is not valid\n");
+               return -EFAULT;
+       }
+
+       if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
+               DP_NOTICE(edev, "qed prevents setting MAC\n");
+               return -EINVAL;
+       }
+
+       ether_addr_copy(ndev->dev_addr, addr->sa_data);
+
+       if (!netif_running(ndev))  {
+               DP_NOTICE(edev, "The device is currently down\n");
+               return 0;
+       }
+
+       /* Remove the previous primary mac */
+       rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
+                                  edev->primary_mac);
+       if (rc)
+               return rc;
+
+       edev->ops->common->update_mac(edev->cdev, addr->sa_data);
+
+       /* Add MAC filter according to the new unicast HW MAC address */
+       ether_addr_copy(edev->primary_mac, ndev->dev_addr);
+       return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
+                                     edev->primary_mac);
+}
+
+static int
+qede_configure_mcast_filtering(struct net_device *ndev,
+                              enum qed_filter_rx_mode_type *accept_flags)
+{
+       struct qede_dev *edev = netdev_priv(ndev);
+       unsigned char *mc_macs, *temp;
+       struct netdev_hw_addr *ha;
+       int rc = 0, mc_count;
+       size_t size;
+
+       size = 64 * ETH_ALEN;
+
+       mc_macs = kzalloc(size, GFP_KERNEL);
+       if (!mc_macs) {
+               DP_NOTICE(edev,
+                         "Failed to allocate memory for multicast MACs\n");
+               rc = -ENOMEM;
+               goto exit;
+       }
+
+       temp = mc_macs;
+
+       /* Remove all previously configured MAC filters */
+       rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
+                                  mc_macs, 1);
+       if (rc)
+               goto exit;
+
+       netif_addr_lock_bh(ndev);
+
+       mc_count = netdev_mc_count(ndev);
+       if (mc_count < 64) {
+               netdev_for_each_mc_addr(ha, ndev) {
+                       ether_addr_copy(temp, ha->addr);
+                       temp += ETH_ALEN;
+               }
+       }
+
+       netif_addr_unlock_bh(ndev);
+
+       /* Check for all multicast @@@TBD resource allocation */
+       if ((ndev->flags & IFF_ALLMULTI) || (mc_count > 64)) {
+               if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR)
+                       *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
+       } else {
+               /* Add all multicast MAC filters */
+               rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
+                                          mc_macs, mc_count);
+       }
+
+exit:
+       kfree(mc_macs);
+       return rc;
+}
+
+void qede_set_rx_mode(struct net_device *ndev)
+{
+       struct qede_dev *edev = netdev_priv(ndev);
+
+       set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
+       schedule_delayed_work(&edev->sp_task, 0);
+}
+
+/* Must be called with qede_lock held */
+void qede_config_rx_mode(struct net_device *ndev)
+{
+       enum qed_filter_rx_mode_type accept_flags;
+       struct qede_dev *edev = netdev_priv(ndev);
+       struct qed_filter_params rx_mode;
+       unsigned char *uc_macs, *temp;
+       struct netdev_hw_addr *ha;
+       int rc, uc_count;
+       size_t size;
+
+       netif_addr_lock_bh(ndev);
+
+       uc_count = netdev_uc_count(ndev);
+       size = uc_count * ETH_ALEN;
+
+       uc_macs = kzalloc(size, GFP_ATOMIC);
+       if (!uc_macs) {
+               DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n");
+               netif_addr_unlock_bh(ndev);
+               return;
+       }
+
+       temp = uc_macs;
+       netdev_for_each_uc_addr(ha, ndev) {
+               ether_addr_copy(temp, ha->addr);
+               temp += ETH_ALEN;
+       }
+
+       netif_addr_unlock_bh(ndev);
+
+       /* Configure the struct for the Rx mode */
+       memset(&rx_mode, 0, sizeof(struct qed_filter_params));
+       rx_mode.type = QED_FILTER_TYPE_RX_MODE;
+
+       /* Remove all previous unicast secondary macs and multicast macs
+        * (configrue / leave the primary mac)
+        */
+       rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
+                                  edev->primary_mac);
+       if (rc)
+               goto out;
+
+       /* Check for promiscuous */
+       if (ndev->flags & IFF_PROMISC)
+               accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
+       else
+               accept_flags = QED_FILTER_RX_MODE_TYPE_REGULAR;
+
+       /* Configure all filters regardless, in case promisc is rejected */
+       if (uc_count < edev->dev_info.num_mac_filters) {
+               int i;
+
+               temp = uc_macs;
+               for (i = 0; i < uc_count; i++) {
+                       rc = qede_set_ucast_rx_mac(edev,
+                                                  QED_FILTER_XCAST_TYPE_ADD,
+                                                  temp);
+                       if (rc)
+                               goto out;
+
+                       temp += ETH_ALEN;
+               }
+       } else {
+               accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
+       }
+
+       rc = qede_configure_mcast_filtering(ndev, &accept_flags);
+       if (rc)
+               goto out;
+
+       /* take care of VLAN mode */
+       if (ndev->flags & IFF_PROMISC) {
+               qede_config_accept_any_vlan(edev, true);
+       } else if (!edev->non_configured_vlans) {
+               /* It's possible that accept_any_vlan mode is set due to a
+                * previous setting of IFF_PROMISC. If vlan credits are
+                * sufficient, disable accept_any_vlan.
+                */
+               qede_config_accept_any_vlan(edev, false);
+       }
+
+       rx_mode.filter.accept_flags = accept_flags;
+       edev->ops->filter_config(edev->cdev, &rx_mode);
+out:
+       kfree(uc_macs);
+}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
new file mode 100644 (file)
index 0000000..e8a0cc0
--- /dev/null
@@ -0,0 +1,1706 @@
+/* QLogic qede NIC Driver
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <net/udp_tunnel.h>
+#include <linux/ip.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <net/ip6_checksum.h>
+#include "qede_ptp.h"
+
+#include <linux/qed/qed_if.h>
+#include "qede.h"
+/*********************************
+ * Content also used by slowpath *
+ *********************************/
+
+int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy)
+{
+       struct sw_rx_data *sw_rx_data;
+       struct eth_rx_bd *rx_bd;
+       dma_addr_t mapping;
+       struct page *data;
+
+       /* In case lazy-allocation is allowed, postpone allocation until the
+        * end of the NAPI run. We'd still need to make sure the Rx ring has
+        * sufficient buffers to guarantee an additional Rx interrupt.
+        */
+       if (allow_lazy && likely(rxq->filled_buffers > 12)) {
+               rxq->filled_buffers--;
+               return 0;
+       }
+
+       data = alloc_pages(GFP_ATOMIC, 0);
+       if (unlikely(!data))
+               return -ENOMEM;
+
+       /* Map the entire page as it would be used
+        * for multiple RX buffer segment size mapping.
+        */
+       mapping = dma_map_page(rxq->dev, data, 0,
+                              PAGE_SIZE, rxq->data_direction);
+       if (unlikely(dma_mapping_error(rxq->dev, mapping))) {
+               __free_page(data);
+               return -ENOMEM;
+       }
+
+       sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+       sw_rx_data->page_offset = 0;
+       sw_rx_data->data = data;
+       sw_rx_data->mapping = mapping;
+
+       /* Advance PROD and get BD pointer */
+       rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
+       WARN_ON(!rx_bd);
+       rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
+       rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping) +
+                                    rxq->rx_headroom);
+
+       rxq->sw_rx_prod++;
+       rxq->filled_buffers++;
+
+       return 0;
+}
+
+/* Unmap the data and free skb */
+int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len)
+{
+       u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
+       struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
+       struct eth_tx_1st_bd *first_bd;
+       struct eth_tx_bd *tx_data_bd;
+       int bds_consumed = 0;
+       int nbds;
+       bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD;
+       int i, split_bd_len = 0;
+
+       if (unlikely(!skb)) {
+               DP_ERR(edev,
+                      "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n",
+                      idx, txq->sw_tx_cons, txq->sw_tx_prod);
+               return -1;
+       }
+
+       *len = skb->len;
+
+       first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
+
+       bds_consumed++;
+
+       nbds = first_bd->data.nbds;
+
+       if (data_split) {
+               struct eth_tx_bd *split = (struct eth_tx_bd *)
+                       qed_chain_consume(&txq->tx_pbl);
+               split_bd_len = BD_UNMAP_LEN(split);
+               bds_consumed++;
+       }
+       dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+                        BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
+
+       /* Unmap the data of the skb frags */
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
+               tx_data_bd = (struct eth_tx_bd *)
+                       qed_chain_consume(&txq->tx_pbl);
+               dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
+                              BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
+       }
+
+       while (bds_consumed++ < nbds)
+               qed_chain_consume(&txq->tx_pbl);
+
+       /* Free skb */
+       dev_kfree_skb_any(skb);
+       txq->sw_tx_ring.skbs[idx].skb = NULL;
+       txq->sw_tx_ring.skbs[idx].flags = 0;
+
+       return 0;
+}
+
+/* Unmap the data and free skb when mapping failed during start_xmit */
+static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq,
+                                   struct eth_tx_1st_bd *first_bd,
+                                   int nbd, bool data_split)
+{
+       u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+       struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
+       struct eth_tx_bd *tx_data_bd;
+       int i, split_bd_len = 0;
+
+       /* Return prod to its position before this skb was handled */
+       qed_chain_set_prod(&txq->tx_pbl,
+                          le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
+
+       first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
+
+       if (data_split) {
+               struct eth_tx_bd *split = (struct eth_tx_bd *)
+                                         qed_chain_produce(&txq->tx_pbl);
+               split_bd_len = BD_UNMAP_LEN(split);
+               nbd--;
+       }
+
+       dma_unmap_single(txq->dev, BD_UNMAP_ADDR(first_bd),
+                        BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
+
+       /* Unmap the data of the skb frags */
+       for (i = 0; i < nbd; i++) {
+               tx_data_bd = (struct eth_tx_bd *)
+                       qed_chain_produce(&txq->tx_pbl);
+               if (tx_data_bd->nbytes)
+                       dma_unmap_page(txq->dev,
+                                      BD_UNMAP_ADDR(tx_data_bd),
+                                      BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
+       }
+
+       /* Return again prod to its position before this skb was handled */
+       qed_chain_set_prod(&txq->tx_pbl,
+                          le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
+
+       /* Free skb */
+       dev_kfree_skb_any(skb);
+       txq->sw_tx_ring.skbs[idx].skb = NULL;
+       txq->sw_tx_ring.skbs[idx].flags = 0;
+}
+
+static u32 qede_xmit_type(struct sk_buff *skb, int *ipv6_ext)
+{
+       u32 rc = XMIT_L4_CSUM;
+       __be16 l3_proto;
+
+       if (skb->ip_summed != CHECKSUM_PARTIAL)
+               return XMIT_PLAIN;
+
+       l3_proto = vlan_get_protocol(skb);
+       if (l3_proto == htons(ETH_P_IPV6) &&
+           (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
+               *ipv6_ext = 1;
+
+       if (skb->encapsulation) {
+               rc |= XMIT_ENC;
+               if (skb_is_gso(skb)) {
+                       unsigned short gso_type = skb_shinfo(skb)->gso_type;
+
+                       if ((gso_type & SKB_GSO_UDP_TUNNEL_CSUM) ||
+                           (gso_type & SKB_GSO_GRE_CSUM))
+                               rc |= XMIT_ENC_GSO_L4_CSUM;
+
+                       rc |= XMIT_LSO;
+                       return rc;
+               }
+       }
+
+       if (skb_is_gso(skb))
+               rc |= XMIT_LSO;
+
+       return rc;
+}
+
+static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
+                                        struct eth_tx_2nd_bd *second_bd,
+                                        struct eth_tx_3rd_bd *third_bd)
+{
+       u8 l4_proto;
+       u16 bd2_bits1 = 0, bd2_bits2 = 0;
+
+       bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
+
+       bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
+                    ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
+                   << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
+
+       bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
+                     ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT);
+
+       if (vlan_get_protocol(skb) == htons(ETH_P_IPV6))
+               l4_proto = ipv6_hdr(skb)->nexthdr;
+       else
+               l4_proto = ip_hdr(skb)->protocol;
+
+       if (l4_proto == IPPROTO_UDP)
+               bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
+
+       if (third_bd)
+               third_bd->data.bitfields |=
+                       cpu_to_le16(((tcp_hdrlen(skb) / 4) &
+                               ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
+                               ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT);
+
+       second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1);
+       second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
+}
+
+static int map_frag_to_bd(struct qede_tx_queue *txq,
+                         skb_frag_t *frag, struct eth_tx_bd *bd)
+{
+       dma_addr_t mapping;
+
+       /* Map skb non-linear frag data for DMA */
+       mapping = skb_frag_dma_map(txq->dev, frag, 0,
+                                  skb_frag_size(frag), DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(txq->dev, mapping)))
+               return -ENOMEM;
+
+       /* Setup the data pointer of the frag data */
+       BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag));
+
+       return 0;
+}
+
+static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
+{
+       if (is_encap_pkt)
+               return (skb_inner_transport_header(skb) +
+                       inner_tcp_hdrlen(skb) - skb->data);
+       else
+               return (skb_transport_header(skb) +
+                       tcp_hdrlen(skb) - skb->data);
+}
+
+/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
+#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
+static bool qede_pkt_req_lin(struct sk_buff *skb, u8 xmit_type)
+{
+       int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1;
+
+       if (xmit_type & XMIT_LSO) {
+               int hlen;
+
+               hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC);
+
+               /* linear payload would require its own BD */
+               if (skb_headlen(skb) > hlen)
+                       allowed_frags--;
+       }
+
+       return (skb_shinfo(skb)->nr_frags > allowed_frags);
+}
+#endif
+
+static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
+{
+       /* wmb makes sure that the BDs data is updated before updating the
+        * producer, otherwise FW may read old data from the BDs.
+        */
+       wmb();
+       barrier();
+       writel(txq->tx_db.raw, txq->doorbell_addr);
+
+       /* mmiowb is needed to synchronize doorbell writes from more than one
+        * processor. It guarantees that the write arrives to the device before
+        * the queue lock is released and another start_xmit is called (possibly
+        * on another CPU). Without this barrier, the next doorbell can bypass
+        * this doorbell. This is applicable to IA64/Altix systems.
+        */
+       mmiowb();
+}
+
+static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
+                        struct sw_rx_data *metadata, u16 padding, u16 length)
+{
+       struct qede_tx_queue *txq = fp->xdp_tx;
+       u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+       struct eth_tx_1st_bd *first_bd;
+
+       if (!qed_chain_get_elem_left(&txq->tx_pbl)) {
+               txq->stopped_cnt++;
+               return -ENOMEM;
+       }
+
+       first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
+
+       memset(first_bd, 0, sizeof(*first_bd));
+       first_bd->data.bd_flags.bitfields =
+           BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT);
+       first_bd->data.bitfields |=
+           (length & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
+           ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
+       first_bd->data.nbds = 1;
+
+       /* We can safely ignore the offset, as it's 0 for XDP */
+       BD_SET_UNMAP_ADDR_LEN(first_bd, metadata->mapping + padding, length);
+
+       /* Synchronize the buffer back to device, as program [probably]
+        * has changed it.
+        */
+       dma_sync_single_for_device(&edev->pdev->dev,
+                                  metadata->mapping + padding,
+                                  length, PCI_DMA_TODEVICE);
+
+       txq->sw_tx_ring.xdp[idx].page = metadata->data;
+       txq->sw_tx_ring.xdp[idx].mapping = metadata->mapping;
+       txq->sw_tx_prod++;
+
+       /* Mark the fastpath for future XDP doorbell */
+       fp->xdp_xmit = 1;
+
+       return 0;
+}
+
+int qede_txq_has_work(struct qede_tx_queue *txq)
+{
+       u16 hw_bd_cons;
+
+       /* Tell compiler that consumer and producer can change */
+       barrier();
+       hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
+       if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1)
+               return 0;
+
+       return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
+}
+
+static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
+{
+       u16 hw_bd_cons, idx;
+
+       hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
+       barrier();
+
+       while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
+               qed_chain_consume(&txq->tx_pbl);
+               idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
+
+               dma_unmap_page(&edev->pdev->dev,
+                              txq->sw_tx_ring.xdp[idx].mapping,
+                              PAGE_SIZE, DMA_BIDIRECTIONAL);
+               __free_page(txq->sw_tx_ring.xdp[idx].page);
+
+               txq->sw_tx_cons++;
+               txq->xmit_pkts++;
+       }
+}
+
+static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
+{
+       struct netdev_queue *netdev_txq;
+       u16 hw_bd_cons;
+       unsigned int pkts_compl = 0, bytes_compl = 0;
+       int rc;
+
+       netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index);
+
+       hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
+       barrier();
+
+       while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
+               int len = 0;
+
+               rc = qede_free_tx_pkt(edev, txq, &len);
+               if (rc) {
+                       DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n",
+                                 hw_bd_cons,
+                                 qed_chain_get_cons_idx(&txq->tx_pbl));
+                       break;
+               }
+
+               bytes_compl += len;
+               pkts_compl++;
+               txq->sw_tx_cons++;
+               txq->xmit_pkts++;
+       }
+
+       netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
+
+       /* Need to make the tx_bd_cons update visible to start_xmit()
+        * before checking for netif_tx_queue_stopped().  Without the
+        * memory barrier, there is a small possibility that
+        * start_xmit() will miss it and cause the queue to be stopped
+        * forever.
+        * On the other hand we need an rmb() here to ensure the proper
+        * ordering of bit testing in the following
+        * netif_tx_queue_stopped(txq) call.
+        */
+       smp_mb();
+
+       if (unlikely(netif_tx_queue_stopped(netdev_txq))) {
+               /* Taking tx_lock is needed to prevent reenabling the queue
+                * while it's empty. This could have happen if rx_action() gets
+                * suspended in qede_tx_int() after the condition before
+                * netif_tx_wake_queue(), while tx_action (qede_start_xmit()):
+                *
+                * stops the queue->sees fresh tx_bd_cons->releases the queue->
+                * sends some packets consuming the whole queue again->
+                * stops the queue
+                */
+
+               __netif_tx_lock(netdev_txq, smp_processor_id());
+
+               if ((netif_tx_queue_stopped(netdev_txq)) &&
+                   (edev->state == QEDE_STATE_OPEN) &&
+                   (qed_chain_get_elem_left(&txq->tx_pbl)
+                     >= (MAX_SKB_FRAGS + 1))) {
+                       netif_tx_wake_queue(netdev_txq);
+                       DP_VERBOSE(edev, NETIF_MSG_TX_DONE,
+                                  "Wake queue was called\n");
+               }
+
+               __netif_tx_unlock(netdev_txq);
+       }
+
+       return 0;
+}
+
+bool qede_has_rx_work(struct qede_rx_queue *rxq)
+{
+       u16 hw_comp_cons, sw_comp_cons;
+
+       /* Tell compiler that status block fields can change */
+       barrier();
+
+       hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+       sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+       return hw_comp_cons != sw_comp_cons;
+}
+
+static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
+{
+       qed_chain_consume(&rxq->rx_bd_ring);
+       rxq->sw_rx_cons++;
+}
+
+/* This function reuses the buffer(from an offset) from
+ * consumer index to producer index in the bd ring
+ */
+static inline void qede_reuse_page(struct qede_rx_queue *rxq,
+                                  struct sw_rx_data *curr_cons)
+{
+       struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
+       struct sw_rx_data *curr_prod;
+       dma_addr_t new_mapping;
+
+       curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+       *curr_prod = *curr_cons;
+
+       new_mapping = curr_prod->mapping + curr_prod->page_offset;
+
+       rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping));
+       rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping) +
+                                         rxq->rx_headroom);
+
+       rxq->sw_rx_prod++;
+       curr_cons->data = NULL;
+}
+
+/* In case of allocation failures reuse buffers
+ * from consumer index to produce buffers for firmware
+ */
+void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count)
+{
+       struct sw_rx_data *curr_cons;
+
+       for (; count > 0; count--) {
+               curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
+               qede_reuse_page(rxq, curr_cons);
+               qede_rx_bd_ring_consume(rxq);
+       }
+}
+
+static inline int qede_realloc_rx_buffer(struct qede_rx_queue *rxq,
+                                        struct sw_rx_data *curr_cons)
+{
+       /* Move to the next segment in the page */
+       curr_cons->page_offset += rxq->rx_buf_seg_size;
+
+       if (curr_cons->page_offset == PAGE_SIZE) {
+               if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
+                       /* Since we failed to allocate new buffer
+                        * current buffer can be used again.
+                        */
+                       curr_cons->page_offset -= rxq->rx_buf_seg_size;
+
+                       return -ENOMEM;
+               }
+
+               dma_unmap_page(rxq->dev, curr_cons->mapping,
+                              PAGE_SIZE, rxq->data_direction);
+       } else {
+               /* Increment refcount of the page as we don't want
+                * network stack to take the ownership of the page
+                * which can be recycled multiple times by the driver.
+                */
+               page_ref_inc(curr_cons->data);
+               qede_reuse_page(rxq, curr_cons);
+       }
+
+       return 0;
+}
+
+void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
+{
+       u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
+       u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
+       struct eth_rx_prod_data rx_prods = {0};
+
+       /* Update producers */
+       rx_prods.bd_prod = cpu_to_le16(bd_prod);
+       rx_prods.cqe_prod = cpu_to_le16(cqe_prod);
+
+       /* Make sure that the BD and SGE data is updated before updating the
+        * producers since FW might read the BD/SGE right after the producer
+        * is updated.
+        */
+       wmb();
+
+       internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
+                       (u32 *)&rx_prods);
+
+       /* mmiowb is needed to synchronize doorbell writes from more than one
+        * processor. It guarantees that the write arrives to the device before
+        * the napi lock is released and another qede_poll is called (possibly
+        * on another CPU). Without this barrier, the next doorbell can bypass
+        * this doorbell. This is applicable to IA64/Altix systems.
+        */
+       mmiowb();
+}
+
+static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash)
+{
+       enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE;
+       enum rss_hash_type htype;
+       u32 hash = 0;
+
+       htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
+       if (htype) {
+               hash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
+                            (htype == RSS_HASH_TYPE_IPV6)) ?
+                           PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
+               hash = le32_to_cpu(rss_hash);
+       }
+       skb_set_hash(skb, hash, hash_type);
+}
+
+static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
+{
+       skb_checksum_none_assert(skb);
+
+       if (csum_flag & QEDE_CSUM_UNNECESSARY)
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+       if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY) {
+               skb->csum_level = 1;
+               skb->encapsulation = 1;
+       }
+}
+
+static inline void qede_skb_receive(struct qede_dev *edev,
+                                   struct qede_fastpath *fp,
+                                   struct qede_rx_queue *rxq,
+                                   struct sk_buff *skb, u16 vlan_tag)
+{
+       if (vlan_tag)
+               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+
+       napi_gro_receive(&fp->napi, skb);
+}
+
+static void qede_set_gro_params(struct qede_dev *edev,
+                               struct sk_buff *skb,
+                               struct eth_fast_path_rx_tpa_start_cqe *cqe)
+{
+       u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags);
+
+       if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) &
+           PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2)
+               skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+       else
+               skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+
+       skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) -
+                                   cqe->header_len;
+}
+
+static int qede_fill_frag_skb(struct qede_dev *edev,
+                             struct qede_rx_queue *rxq,
+                             u8 tpa_agg_index, u16 len_on_bd)
+{
+       struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons &
+                                                        NUM_RX_BDS_MAX];
+       struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index];
+       struct sk_buff *skb = tpa_info->skb;
+
+       if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
+               goto out;
+
+       /* Add one frag and update the appropriate fields in the skb */
+       skb_fill_page_desc(skb, tpa_info->frag_id++,
+                          current_bd->data, current_bd->page_offset,
+                          len_on_bd);
+
+       if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) {
+               /* Incr page ref count to reuse on allocation failure
+                * so that it doesn't get freed while freeing SKB.
+                */
+               page_ref_inc(current_bd->data);
+               goto out;
+       }
+
+       qed_chain_consume(&rxq->rx_bd_ring);
+       rxq->sw_rx_cons++;
+
+       skb->data_len += len_on_bd;
+       skb->truesize += rxq->rx_buf_seg_size;
+       skb->len += len_on_bd;
+
+       return 0;
+
+out:
+       tpa_info->state = QEDE_AGG_STATE_ERROR;
+       qede_recycle_rx_bd_ring(rxq, 1);
+
+       return -ENOMEM;
+}
+
+static bool qede_tunn_exist(u16 flag)
+{
+       return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
+                         PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT));
+}
+
+static u8 qede_check_tunn_csum(u16 flag)
+{
+       u16 csum_flag = 0;
+       u8 tcsum = 0;
+
+       if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
+                   PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT))
+               csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
+                            PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
+
+       if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+                   PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
+               csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
+                            PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
+               tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
+       }
+
+       csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
+                    PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
+                    PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
+                    PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
+
+       if (csum_flag & flag)
+               return QEDE_CSUM_ERROR;
+
+       return QEDE_CSUM_UNNECESSARY | tcsum;
+}
+
+static void qede_tpa_start(struct qede_dev *edev,
+                          struct qede_rx_queue *rxq,
+                          struct eth_fast_path_rx_tpa_start_cqe *cqe)
+{
+       struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
+       struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring);
+       struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
+       struct sw_rx_data *replace_buf = &tpa_info->buffer;
+       dma_addr_t mapping = tpa_info->buffer_mapping;
+       struct sw_rx_data *sw_rx_data_cons;
+       struct sw_rx_data *sw_rx_data_prod;
+
+       sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
+       sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+
+       /* Use pre-allocated replacement buffer - we can't release the agg.
+        * start until its over and we don't want to risk allocation failing
+        * here, so re-allocate when aggregation will be over.
+        */
+       sw_rx_data_prod->mapping = replace_buf->mapping;
+
+       sw_rx_data_prod->data = replace_buf->data;
+       rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping));
+       rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(mapping));
+       sw_rx_data_prod->page_offset = replace_buf->page_offset;
+
+       rxq->sw_rx_prod++;
+
+       /* move partial skb from cons to pool (don't unmap yet)
+        * save mapping, incase we drop the packet later on.
+        */
+       tpa_info->buffer = *sw_rx_data_cons;
+       mapping = HILO_U64(le32_to_cpu(rx_bd_cons->addr.hi),
+                          le32_to_cpu(rx_bd_cons->addr.lo));
+
+       tpa_info->buffer_mapping = mapping;
+       rxq->sw_rx_cons++;
+
+       /* set tpa state to start only if we are able to allocate skb
+        * for this aggregation, otherwise mark as error and aggregation will
+        * be dropped
+        */
+       tpa_info->skb = netdev_alloc_skb(edev->ndev,
+                                        le16_to_cpu(cqe->len_on_first_bd));
+       if (unlikely(!tpa_info->skb)) {
+               DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
+               tpa_info->state = QEDE_AGG_STATE_ERROR;
+               goto cons_buf;
+       }
+
+       /* Start filling in the aggregation info */
+       skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
+       tpa_info->frag_id = 0;
+       tpa_info->state = QEDE_AGG_STATE_START;
+
+       /* Store some information from first CQE */
+       tpa_info->start_cqe_placement_offset = cqe->placement_offset;
+       tpa_info->start_cqe_bd_len = le16_to_cpu(cqe->len_on_first_bd);
+       if ((le16_to_cpu(cqe->pars_flags.flags) >>
+            PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) &
+           PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK)
+               tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
+       else
+               tpa_info->vlan_tag = 0;
+
+       qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash);
+
+       /* This is needed in order to enable forwarding support */
+       qede_set_gro_params(edev, tpa_info->skb, cqe);
+
+cons_buf: /* We still need to handle bd_len_list to consume buffers */
+       if (likely(cqe->ext_bd_len_list[0]))
+               qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
+                                  le16_to_cpu(cqe->ext_bd_len_list[0]));
+
+       if (unlikely(cqe->ext_bd_len_list[1])) {
+               DP_ERR(edev,
+                      "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n");
+               tpa_info->state = QEDE_AGG_STATE_ERROR;
+       }
+}
+
+#ifdef CONFIG_INET
+static void qede_gro_ip_csum(struct sk_buff *skb)
+{
+       const struct iphdr *iph = ip_hdr(skb);
+       struct tcphdr *th;
+
+       skb_set_transport_header(skb, sizeof(struct iphdr));
+       th = tcp_hdr(skb);
+
+       th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
+                                 iph->saddr, iph->daddr, 0);
+
+       tcp_gro_complete(skb);
+}
+
+static void qede_gro_ipv6_csum(struct sk_buff *skb)
+{
+       struct ipv6hdr *iph = ipv6_hdr(skb);
+       struct tcphdr *th;
+
+       skb_set_transport_header(skb, sizeof(struct ipv6hdr));
+       th = tcp_hdr(skb);
+
+       th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
+                                 &iph->saddr, &iph->daddr, 0);
+       tcp_gro_complete(skb);
+}
+#endif
+
+static void qede_gro_receive(struct qede_dev *edev,
+                            struct qede_fastpath *fp,
+                            struct sk_buff *skb,
+                            u16 vlan_tag)
+{
+       /* FW can send a single MTU sized packet from gro flow
+        * due to aggregation timeout/last segment etc. which
+        * is not expected to be a gro packet. If a skb has zero
+        * frags then simply push it in the stack as non gso skb.
+        */
+       if (unlikely(!skb->data_len)) {
+               skb_shinfo(skb)->gso_type = 0;
+               skb_shinfo(skb)->gso_size = 0;
+               goto send_skb;
+       }
+
+#ifdef CONFIG_INET
+       if (skb_shinfo(skb)->gso_size) {
+               skb_reset_network_header(skb);
+
+               switch (skb->protocol) {
+               case htons(ETH_P_IP):
+                       qede_gro_ip_csum(skb);
+                       break;
+               case htons(ETH_P_IPV6):
+                       qede_gro_ipv6_csum(skb);
+                       break;
+               default:
+                       DP_ERR(edev,
+                              "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
+                              ntohs(skb->protocol));
+               }
+       }
+#endif
+
+send_skb:
+       skb_record_rx_queue(skb, fp->rxq->rxq_id);
+       qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag);
+}
+
+static inline void qede_tpa_cont(struct qede_dev *edev,
+                                struct qede_rx_queue *rxq,
+                                struct eth_fast_path_rx_tpa_cont_cqe *cqe)
+{
+       int i;
+
+       for (i = 0; cqe->len_list[i]; i++)
+               qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
+                                  le16_to_cpu(cqe->len_list[i]));
+
+       if (unlikely(i > 1))
+               DP_ERR(edev,
+                      "Strange - TPA cont with more than a single len_list entry\n");
+}
+
+static int qede_tpa_end(struct qede_dev *edev,
+                       struct qede_fastpath *fp,
+                       struct eth_fast_path_rx_tpa_end_cqe *cqe)
+{
+       struct qede_rx_queue *rxq = fp->rxq;
+       struct qede_agg_info *tpa_info;
+       struct sk_buff *skb;
+       int i;
+
+       tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
+       skb = tpa_info->skb;
+
+       for (i = 0; cqe->len_list[i]; i++)
+               qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
+                                  le16_to_cpu(cqe->len_list[i]));
+       if (unlikely(i > 1))
+               DP_ERR(edev,
+                      "Strange - TPA emd with more than a single len_list entry\n");
+
+       if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
+               goto err;
+
+       /* Sanity */
+       if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1))
+               DP_ERR(edev,
+                      "Strange - TPA had %02x BDs, but SKB has only %d frags\n",
+                      cqe->num_of_bds, tpa_info->frag_id);
+       if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len)))
+               DP_ERR(edev,
+                      "Strange - total packet len [cqe] is %4x but SKB has len %04x\n",
+                      le16_to_cpu(cqe->total_packet_len), skb->len);
+
+       memcpy(skb->data,
+              page_address(tpa_info->buffer.data) +
+              tpa_info->start_cqe_placement_offset +
+              tpa_info->buffer.page_offset, tpa_info->start_cqe_bd_len);
+
+       /* Finalize the SKB */
+       skb->protocol = eth_type_trans(skb, edev->ndev);
+       skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+       /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
+        * to skb_shinfo(skb)->gso_segs
+        */
+       NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs);
+
+       qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag);
+
+       tpa_info->state = QEDE_AGG_STATE_NONE;
+
+       return 1;
+err:
+       tpa_info->state = QEDE_AGG_STATE_NONE;
+       dev_kfree_skb_any(tpa_info->skb);
+       tpa_info->skb = NULL;
+       return 0;
+}
+
+static u8 qede_check_notunn_csum(u16 flag)
+{
+       u16 csum_flag = 0;
+       u8 csum = 0;
+
+       if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+                   PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
+               csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
+                            PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
+               csum = QEDE_CSUM_UNNECESSARY;
+       }
+
+       csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
+                    PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
+
+       if (csum_flag & flag)
+               return QEDE_CSUM_ERROR;
+
+       return csum;
+}
+
+static u8 qede_check_csum(u16 flag)
+{
+       if (!qede_tunn_exist(flag))
+               return qede_check_notunn_csum(flag);
+       else
+               return qede_check_tunn_csum(flag);
+}
+
+static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe,
+                                     u16 flag)
+{
+       u8 tun_pars_flg = cqe->tunnel_pars_flags.flags;
+
+       if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK <<
+                            ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) ||
+           (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
+                    PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT)))
+               return true;
+
+       return false;
+}
+
+/* Return true iff packet is to be passed to stack */
+static bool qede_rx_xdp(struct qede_dev *edev,
+                       struct qede_fastpath *fp,
+                       struct qede_rx_queue *rxq,
+                       struct bpf_prog *prog,
+                       struct sw_rx_data *bd,
+                       struct eth_fast_path_rx_reg_cqe *cqe,
+                       u16 *data_offset, u16 *len)
+{
+       struct xdp_buff xdp;
+       enum xdp_action act;
+
+       xdp.data_hard_start = page_address(bd->data);
+       xdp.data = xdp.data_hard_start + *data_offset;
+       xdp.data_end = xdp.data + *len;
+
+       /* Queues always have a full reset currently, so for the time
+        * being until there's atomic program replace just mark read
+        * side for map helpers.
+        */
+       rcu_read_lock();
+       act = bpf_prog_run_xdp(prog, &xdp);
+       rcu_read_unlock();
+
+       /* Recalculate, as XDP might have changed the headers */
+       *data_offset = xdp.data - xdp.data_hard_start;
+       *len = xdp.data_end - xdp.data;
+
+       if (act == XDP_PASS)
+               return true;
+
+       /* Count number of packets not to be passed to stack */
+       rxq->xdp_no_pass++;
+
+       switch (act) {
+       case XDP_TX:
+               /* We need the replacement buffer before transmit. */
+               if (qede_alloc_rx_buffer(rxq, true)) {
+                       qede_recycle_rx_bd_ring(rxq, 1);
+                       return false;
+               }
+
+               /* Now if there's a transmission problem, we'd still have to
+                * throw current buffer, as replacement was already allocated.
+                */
+               if (qede_xdp_xmit(edev, fp, bd, *data_offset, *len)) {
+                       dma_unmap_page(rxq->dev, bd->mapping,
+                                      PAGE_SIZE, DMA_BIDIRECTIONAL);
+                       __free_page(bd->data);
+               }
+
+               /* Regardless, we've consumed an Rx BD */
+               qede_rx_bd_ring_consume(rxq);
+               return false;
+
+       default:
+               bpf_warn_invalid_xdp_action(act);
+       case XDP_ABORTED:
+       case XDP_DROP:
+               qede_recycle_rx_bd_ring(rxq, cqe->bd_num);
+       }
+
+       return false;
+}
+
+static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev,
+                                           struct qede_rx_queue *rxq,
+                                           struct sw_rx_data *bd, u16 len,
+                                           u16 pad)
+{
+       unsigned int offset = bd->page_offset + pad;
+       struct skb_frag_struct *frag;
+       struct page *page = bd->data;
+       unsigned int pull_len;
+       struct sk_buff *skb;
+       unsigned char *va;
+
+       /* Allocate a new SKB with a sufficient large header len */
+       skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
+       if (unlikely(!skb))
+               return NULL;
+
+       /* Copy data into SKB - if it's small, we can simply copy it and
+        * re-use the already allcoated & mapped memory.
+        */
+       if (len + pad <= edev->rx_copybreak) {
+               memcpy(skb_put(skb, len),
+                      page_address(page) + offset, len);
+               qede_reuse_page(rxq, bd);
+               goto out;
+       }
+
+       frag = &skb_shinfo(skb)->frags[0];
+
+       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+                       page, offset, len, rxq->rx_buf_seg_size);
+
+       va = skb_frag_address(frag);
+       pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE);
+
+       /* Align the pull_len to optimize memcpy */
+       memcpy(skb->data, va, ALIGN(pull_len, sizeof(long)));
+
+       /* Correct the skb & frag sizes offset after the pull */
+       skb_frag_size_sub(frag, pull_len);
+       frag->page_offset += pull_len;
+       skb->data_len -= pull_len;
+       skb->tail += pull_len;
+
+       if (unlikely(qede_realloc_rx_buffer(rxq, bd))) {
+               /* Incr page ref count to reuse on allocation failure so
+                * that it doesn't get freed while freeing SKB [as its
+                * already mapped there].
+                */
+               page_ref_inc(page);
+               dev_kfree_skb_any(skb);
+               return NULL;
+       }
+
+out:
+       /* We've consumed the first BD and prepared an SKB */
+       qede_rx_bd_ring_consume(rxq);
+       return skb;
+}
+
+static int qede_rx_build_jumbo(struct qede_dev *edev,
+                              struct qede_rx_queue *rxq,
+                              struct sk_buff *skb,
+                              struct eth_fast_path_rx_reg_cqe *cqe,
+                              u16 first_bd_len)
+{
+       u16 pkt_len = le16_to_cpu(cqe->pkt_len);
+       struct sw_rx_data *bd;
+       u16 bd_cons_idx;
+       u8 num_frags;
+
+       pkt_len -= first_bd_len;
+
+       /* We've already used one BD for the SKB. Now take care of the rest */
+       for (num_frags = cqe->bd_num - 1; num_frags > 0; num_frags--) {
+               u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
+                   pkt_len;
+
+               if (unlikely(!cur_size)) {
+                       DP_ERR(edev,
+                              "Still got %d BDs for mapping jumbo, but length became 0\n",
+                              num_frags);
+                       goto out;
+               }
+
+               /* We need a replacement buffer for each BD */
+               if (unlikely(qede_alloc_rx_buffer(rxq, true)))
+                       goto out;
+
+               /* Now that we've allocated the replacement buffer,
+                * we can safely consume the next BD and map it to the SKB.
+                */
+               bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+               bd = &rxq->sw_rx_ring[bd_cons_idx];
+               qede_rx_bd_ring_consume(rxq);
+
+               dma_unmap_page(rxq->dev, bd->mapping,
+                              PAGE_SIZE, DMA_FROM_DEVICE);
+
+               skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
+                                  bd->data, 0, cur_size);
+
+               skb->truesize += PAGE_SIZE;
+               skb->data_len += cur_size;
+               skb->len += cur_size;
+               pkt_len -= cur_size;
+       }
+
+       if (unlikely(pkt_len))
+               DP_ERR(edev,
+                      "Mapped all BDs of jumbo, but still have %d bytes\n",
+                      pkt_len);
+
+out:
+       return num_frags;
+}
+
+static int qede_rx_process_tpa_cqe(struct qede_dev *edev,
+                                  struct qede_fastpath *fp,
+                                  struct qede_rx_queue *rxq,
+                                  union eth_rx_cqe *cqe,
+                                  enum eth_rx_cqe_type type)
+{
+       switch (type) {
+       case ETH_RX_CQE_TYPE_TPA_START:
+               qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start);
+               return 0;
+       case ETH_RX_CQE_TYPE_TPA_CONT:
+               qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont);
+               return 0;
+       case ETH_RX_CQE_TYPE_TPA_END:
+               return qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end);
+       default:
+               return 0;
+       }
+}
+
+static int qede_rx_process_cqe(struct qede_dev *edev,
+                              struct qede_fastpath *fp,
+                              struct qede_rx_queue *rxq)
+{
+       struct bpf_prog *xdp_prog = READ_ONCE(rxq->xdp_prog);
+       struct eth_fast_path_rx_reg_cqe *fp_cqe;
+       u16 len, pad, bd_cons_idx, parse_flag;
+       enum eth_rx_cqe_type cqe_type;
+       union eth_rx_cqe *cqe;
+       struct sw_rx_data *bd;
+       struct sk_buff *skb;
+       __le16 flags;
+       u8 csum_flag;
+
+       /* Get the CQE from the completion ring */
+       cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
+       cqe_type = cqe->fast_path_regular.type;
+
+       /* Process an unlikely slowpath event */
+       if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
+               struct eth_slow_path_rx_cqe *sp_cqe;
+
+               sp_cqe = (struct eth_slow_path_rx_cqe *)cqe;
+               edev->ops->eth_cqe_completion(edev->cdev, fp->id, sp_cqe);
+               return 0;
+       }
+
+       /* Handle TPA cqes */
+       if (cqe_type != ETH_RX_CQE_TYPE_REGULAR)
+               return qede_rx_process_tpa_cqe(edev, fp, rxq, cqe, cqe_type);
+
+       /* Get the data from the SW ring; Consume it only after it's evident
+        * we wouldn't recycle it.
+        */
+       bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+       bd = &rxq->sw_rx_ring[bd_cons_idx];
+
+       fp_cqe = &cqe->fast_path_regular;
+       len = le16_to_cpu(fp_cqe->len_on_first_bd);
+       pad = fp_cqe->placement_offset + rxq->rx_headroom;
+
+       /* Run eBPF program if one is attached */
+       if (xdp_prog)
+               if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe,
+                                &pad, &len))
+                       return 0;
+
+       /* If this is an error packet then drop it */
+       flags = cqe->fast_path_regular.pars_flags.flags;
+       parse_flag = le16_to_cpu(flags);
+
+       csum_flag = qede_check_csum(parse_flag);
+       if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
+               if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag)) {
+                       rxq->rx_ip_frags++;
+               } else {
+                       DP_NOTICE(edev,
+                                 "CQE has error, flags = %x, dropping incoming packet\n",
+                                 parse_flag);
+                       rxq->rx_hw_errors++;
+                       qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
+                       return 0;
+               }
+       }
+
+       /* Basic validation passed; Need to prepare an SKB. This would also
+        * guarantee to finally consume the first BD upon success.
+        */
+       skb = qede_rx_allocate_skb(edev, rxq, bd, len, pad);
+       if (!skb) {
+               rxq->rx_alloc_errors++;
+               qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
+               return 0;
+       }
+
+       /* In case of Jumbo packet, several PAGE_SIZEd buffers will be pointed
+        * by a single cqe.
+        */
+       if (fp_cqe->bd_num > 1) {
+               u16 unmapped_frags = qede_rx_build_jumbo(edev, rxq, skb,
+                                                        fp_cqe, len);
+
+               if (unlikely(unmapped_frags > 0)) {
+                       qede_recycle_rx_bd_ring(rxq, unmapped_frags);
+                       dev_kfree_skb_any(skb);
+                       return 0;
+               }
+       }
+
+       /* The SKB contains all the data. Now prepare meta-magic */
+       skb->protocol = eth_type_trans(skb, edev->ndev);
+       qede_get_rxhash(skb, fp_cqe->bitfields, fp_cqe->rss_hash);
+       qede_set_skb_csum(skb, csum_flag);
+       skb_record_rx_queue(skb, rxq->rxq_id);
+       qede_ptp_record_rx_ts(edev, cqe, skb);
+
+       /* SKB is prepared - pass it to stack */
+       qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag));
+
+       return 1;
+}
+
+static int qede_rx_int(struct qede_fastpath *fp, int budget)
+{
+       struct qede_rx_queue *rxq = fp->rxq;
+       struct qede_dev *edev = fp->edev;
+       int work_done = 0, rcv_pkts = 0;
+       u16 hw_comp_cons, sw_comp_cons;
+
+       hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+       sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+       /* Memory barrier to prevent the CPU from doing speculative reads of CQE
+        * / BD in the while-loop before reading hw_comp_cons. If the CQE is
+        * read before it is written by FW, then FW writes CQE and SB, and then
+        * the CPU reads the hw_comp_cons, it will use an old CQE.
+        */
+       rmb();
+
+       /* Loop to complete all indicated BDs */
+       while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) {
+               rcv_pkts += qede_rx_process_cqe(edev, fp, rxq);
+               qed_chain_recycle_consumed(&rxq->rx_comp_ring);
+               sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+               work_done++;
+       }
+
+       rxq->rcv_pkts += rcv_pkts;
+
+       /* Allocate replacement buffers */
+       while (rxq->num_rx_buffers - rxq->filled_buffers)
+               if (qede_alloc_rx_buffer(rxq, false))
+                       break;
+
+       /* Update producers */
+       qede_update_rx_prod(edev, rxq);
+
+       return work_done;
+}
+
+static bool qede_poll_is_more_work(struct qede_fastpath *fp)
+{
+       qed_sb_update_sb_idx(fp->sb_info);
+
+       /* *_has_*_work() reads the status block, thus we need to ensure that
+        * status block indices have been actually read (qed_sb_update_sb_idx)
+        * prior to this check (*_has_*_work) so that we won't write the
+        * "newer" value of the status block to HW (if there was a DMA right
+        * after qede_has_rx_work and if there is no rmb, the memory reading
+        * (qed_sb_update_sb_idx) may be postponed to right before *_ack_sb).
+        * In this case there will never be another interrupt until there is
+        * another update of the status block, while there is still unhandled
+        * work.
+        */
+       rmb();
+
+       if (likely(fp->type & QEDE_FASTPATH_RX))
+               if (qede_has_rx_work(fp->rxq))
+                       return true;
+
+       if (fp->type & QEDE_FASTPATH_XDP)
+               if (qede_txq_has_work(fp->xdp_tx))
+                       return true;
+
+       if (likely(fp->type & QEDE_FASTPATH_TX))
+               if (qede_txq_has_work(fp->txq))
+                       return true;
+
+       return false;
+}
+
+/*********************
+ * NDO & API related *
+ *********************/
+int qede_poll(struct napi_struct *napi, int budget)
+{
+       struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
+                                               napi);
+       struct qede_dev *edev = fp->edev;
+       int rx_work_done = 0;
+
+       if (likely(fp->type & QEDE_FASTPATH_TX) && qede_txq_has_work(fp->txq))
+               qede_tx_int(edev, fp->txq);
+
+       if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx))
+               qede_xdp_tx_int(edev, fp->xdp_tx);
+
+       rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
+                       qede_has_rx_work(fp->rxq)) ?
+                       qede_rx_int(fp, budget) : 0;
+       if (rx_work_done < budget) {
+               if (!qede_poll_is_more_work(fp)) {
+                       napi_complete(napi);
+
+                       /* Update and reenable interrupts */
+                       qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
+               } else {
+                       rx_work_done = budget;
+               }
+       }
+
+       if (fp->xdp_xmit) {
+               u16 xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl);
+
+               fp->xdp_xmit = 0;
+               fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod);
+               qede_update_tx_producer(fp->xdp_tx);
+       }
+
+       return rx_work_done;
+}
+
+irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
+{
+       struct qede_fastpath *fp = fp_cookie;
+
+       qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
+
+       napi_schedule_irqoff(&fp->napi);
+       return IRQ_HANDLED;
+}
+
+/* Main transmit function */
+netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+       struct qede_dev *edev = netdev_priv(ndev);
+       struct netdev_queue *netdev_txq;
+       struct qede_tx_queue *txq;
+       struct eth_tx_1st_bd *first_bd;
+       struct eth_tx_2nd_bd *second_bd = NULL;
+       struct eth_tx_3rd_bd *third_bd = NULL;
+       struct eth_tx_bd *tx_data_bd = NULL;
+       u16 txq_index;
+       u8 nbd = 0;
+       dma_addr_t mapping;
+       int rc, frag_idx = 0, ipv6_ext = 0;
+       u8 xmit_type;
+       u16 idx;
+       u16 hlen;
+       bool data_split = false;
+
+       /* Get tx-queue context and netdev index */
+       txq_index = skb_get_queue_mapping(skb);
+       WARN_ON(txq_index >= QEDE_TSS_COUNT(edev));
+       txq = edev->fp_array[edev->fp_num_rx + txq_index].txq;
+       netdev_txq = netdev_get_tx_queue(ndev, txq_index);
+
+       WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1));
+
+       xmit_type = qede_xmit_type(skb, &ipv6_ext);
+
+#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
+       if (qede_pkt_req_lin(skb, xmit_type)) {
+               if (skb_linearize(skb)) {
+                       DP_NOTICE(edev,
+                                 "SKB linearization failed - silently dropping this SKB\n");
+                       dev_kfree_skb_any(skb);
+                       return NETDEV_TX_OK;
+               }
+       }
+#endif
+
+       /* Fill the entry in the SW ring and the BDs in the FW ring */
+       idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+       txq->sw_tx_ring.skbs[idx].skb = skb;
+       first_bd = (struct eth_tx_1st_bd *)
+                  qed_chain_produce(&txq->tx_pbl);
+       memset(first_bd, 0, sizeof(*first_bd));
+       first_bd->data.bd_flags.bitfields =
+               1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
+
+       if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+               qede_ptp_tx_ts(edev, skb);
+
+       /* Map skb linear data for DMA and set in the first BD */
+       mapping = dma_map_single(txq->dev, skb->data,
+                                skb_headlen(skb), DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(txq->dev, mapping))) {
+               DP_NOTICE(edev, "SKB mapping failed\n");
+               qede_free_failed_tx_pkt(txq, first_bd, 0, false);
+               qede_update_tx_producer(txq);
+               return NETDEV_TX_OK;
+       }
+       nbd++;
+       BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
+
+       /* In case there is IPv6 with extension headers or LSO we need 2nd and
+        * 3rd BDs.
+        */
+       if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) {
+               second_bd = (struct eth_tx_2nd_bd *)
+                       qed_chain_produce(&txq->tx_pbl);
+               memset(second_bd, 0, sizeof(*second_bd));
+
+               nbd++;
+               third_bd = (struct eth_tx_3rd_bd *)
+                       qed_chain_produce(&txq->tx_pbl);
+               memset(third_bd, 0, sizeof(*third_bd));
+
+               nbd++;
+               /* We need to fill in additional data in second_bd... */
+               tx_data_bd = (struct eth_tx_bd *)second_bd;
+       }
+
+       if (skb_vlan_tag_present(skb)) {
+               first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
+               first_bd->data.bd_flags.bitfields |=
+                       1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
+       }
+
+       /* Fill the parsing flags & params according to the requested offload */
+       if (xmit_type & XMIT_L4_CSUM) {
+               /* We don't re-calculate IP checksum as it is already done by
+                * the upper stack
+                */
+               first_bd->data.bd_flags.bitfields |=
+                       1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
+
+               if (xmit_type & XMIT_ENC) {
+                       first_bd->data.bd_flags.bitfields |=
+                               1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+                       first_bd->data.bitfields |=
+                           1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
+               }
+
+               /* Legacy FW had flipped behavior in regard to this bit -
+                * I.e., needed to set to prevent FW from touching encapsulated
+                * packets when it didn't need to.
+                */
+               if (unlikely(txq->is_legacy))
+                       first_bd->data.bitfields ^=
+                           1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
+
+               /* If the packet is IPv6 with extension header, indicate that
+                * to FW and pass few params, since the device cracker doesn't
+                * support parsing IPv6 with extension header/s.
+                */
+               if (unlikely(ipv6_ext))
+                       qede_set_params_for_ipv6_ext(skb, second_bd, third_bd);
+       }
+
+       if (xmit_type & XMIT_LSO) {
+               first_bd->data.bd_flags.bitfields |=
+                       (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
+               third_bd->data.lso_mss =
+                       cpu_to_le16(skb_shinfo(skb)->gso_size);
+
+               if (unlikely(xmit_type & XMIT_ENC)) {
+                       first_bd->data.bd_flags.bitfields |=
+                               1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
+
+                       if (xmit_type & XMIT_ENC_GSO_L4_CSUM) {
+                               u8 tmp = ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
+
+                               first_bd->data.bd_flags.bitfields |= 1 << tmp;
+                       }
+                       hlen = qede_get_skb_hlen(skb, true);
+               } else {
+                       first_bd->data.bd_flags.bitfields |=
+                               1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+                       hlen = qede_get_skb_hlen(skb, false);
+               }
+
+               /* @@@TBD - if will not be removed need to check */
+               third_bd->data.bitfields |=
+                       cpu_to_le16(1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
+
+               /* Make life easier for FW guys who can't deal with header and
+                * data on same BD. If we need to split, use the second bd...
+                */
+               if (unlikely(skb_headlen(skb) > hlen)) {
+                       DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
+                                  "TSO split header size is %d (%x:%x)\n",
+                                  first_bd->nbytes, first_bd->addr.hi,
+                                  first_bd->addr.lo);
+
+                       mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi),
+                                          le32_to_cpu(first_bd->addr.lo)) +
+                                          hlen;
+
+                       BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping,
+                                             le16_to_cpu(first_bd->nbytes) -
+                                             hlen);
+
+                       /* this marks the BD as one that has no
+                        * individual mapping
+                        */
+                       txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD;
+
+                       first_bd->nbytes = cpu_to_le16(hlen);
+
+                       tx_data_bd = (struct eth_tx_bd *)third_bd;
+                       data_split = true;
+               }
+       } else {
+               first_bd->data.bitfields |=
+                   (skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
+                   ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
+       }
+
+       /* Handle fragmented skb */
+       /* special handle for frags inside 2nd and 3rd bds.. */
+       while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
+               rc = map_frag_to_bd(txq,
+                                   &skb_shinfo(skb)->frags[frag_idx],
+                                   tx_data_bd);
+               if (rc) {
+                       qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
+                       qede_update_tx_producer(txq);
+                       return NETDEV_TX_OK;
+               }
+
+               if (tx_data_bd == (struct eth_tx_bd *)second_bd)
+                       tx_data_bd = (struct eth_tx_bd *)third_bd;
+               else
+                       tx_data_bd = NULL;
+
+               frag_idx++;
+       }
+
+       /* map last frags into 4th, 5th .... */
+       for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) {
+               tx_data_bd = (struct eth_tx_bd *)
+                            qed_chain_produce(&txq->tx_pbl);
+
+               memset(tx_data_bd, 0, sizeof(*tx_data_bd));
+
+               rc = map_frag_to_bd(txq,
+                                   &skb_shinfo(skb)->frags[frag_idx],
+                                   tx_data_bd);
+               if (rc) {
+                       qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
+                       qede_update_tx_producer(txq);
+                       return NETDEV_TX_OK;
+               }
+       }
+
+       /* update the first BD with the actual num BDs */
+       first_bd->data.nbds = nbd;
+
+       netdev_tx_sent_queue(netdev_txq, skb->len);
+
+       skb_tx_timestamp(skb);
+
+       /* Advance packet producer only before sending the packet since mapping
+        * of pages may fail.
+        */
+       txq->sw_tx_prod++;
+
+       /* 'next page' entries are counted in the producer value */
+       txq->tx_db.data.bd_prod =
+               cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
+
+       if (!skb->xmit_more || netif_xmit_stopped(netdev_txq))
+               qede_update_tx_producer(txq);
+
+       if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
+                     < (MAX_SKB_FRAGS + 1))) {
+               if (skb->xmit_more)
+                       qede_update_tx_producer(txq);
+
+               netif_tx_stop_queue(netdev_txq);
+               txq->stopped_cnt++;
+               DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
+                          "Stop queue was called\n");
+               /* paired memory barrier is in qede_tx_int(), we have to keep
+                * ordering of set_bit() in netif_tx_stop_queue() and read of
+                * fp->bd_tx_cons
+                */
+               smp_mb();
+
+               if ((qed_chain_get_elem_left(&txq->tx_pbl) >=
+                    (MAX_SKB_FRAGS + 1)) &&
+                   (edev->state == QEDE_STATE_OPEN)) {
+                       netif_tx_wake_queue(netdev_txq);
+                       DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
+                                  "Wake queue was called\n");
+               }
+       }
+
+       return NETDEV_TX_OK;
+}
+
+/* 8B udp header + 8B base tunnel header + 32B option length */
+#define QEDE_MAX_TUN_HDR_LEN 48
+
+netdev_features_t qede_features_check(struct sk_buff *skb,
+                                     struct net_device *dev,
+                                     netdev_features_t features)
+{
+       if (skb->encapsulation) {
+               u8 l4_proto = 0;
+
+               switch (vlan_get_protocol(skb)) {
+               case htons(ETH_P_IP):
+                       l4_proto = ip_hdr(skb)->protocol;
+                       break;
+               case htons(ETH_P_IPV6):
+                       l4_proto = ipv6_hdr(skb)->nexthdr;
+                       break;
+               default:
+                       return features;
+               }
+
+               /* Disable offloads for geneve tunnels, as HW can't parse
+                * the geneve header which has option length greater than 32B.
+                */
+               if ((l4_proto == IPPROTO_UDP) &&
+                   ((skb_inner_mac_header(skb) -
+                     skb_transport_header(skb)) > QEDE_MAX_TUN_HDR_LEN))
+                       return features & ~(NETIF_F_CSUM_MASK |
+                                           NETIF_F_GSO_MASK);
+       }
+
+       return features;
+}
index 9544e4c4135901ee177393c6e2845a18e231fffb..56dfb51afdb04ad3d2ef5d833194bdafb56a01ee 100644 (file)
@@ -1,11 +1,34 @@
 /* QLogic qede NIC Driver
-* Copyright (c) 2015 QLogic Corporation
-*
-* This software is available under the terms of the GNU General Public License
-* (GPL) Version 2, available from the file COPYING in the main directory of
-* this source tree.
-*/
-
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/version.h>
 #include <linux/random.h>
 #include <net/ip6_checksum.h>
 #include <linux/bitops.h>
-
+#include <linux/vmalloc.h>
+#include <linux/qed/qede_roce.h>
 #include "qede.h"
+#include "qede_ptp.h"
 
 static char version[] =
        "QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION "\n";
@@ -59,6 +84,8 @@ static const struct qed_eth_ops *qed_ops;
 #define CHIP_NUM_57980S_50             0x1654
 #define CHIP_NUM_57980S_25             0x1656
 #define CHIP_NUM_57980S_IOV            0x1664
+#define CHIP_NUM_AH                    0x8070
+#define CHIP_NUM_AH_IOV                        0x8090
 
 #ifndef PCI_DEVICE_ID_NX2_57980E
 #define PCI_DEVICE_ID_57980S_40                CHIP_NUM_57980S_40
@@ -68,6 +95,9 @@ static const struct qed_eth_ops *qed_ops;
 #define PCI_DEVICE_ID_57980S_50                CHIP_NUM_57980S_50
 #define PCI_DEVICE_ID_57980S_25                CHIP_NUM_57980S_25
 #define PCI_DEVICE_ID_57980S_IOV       CHIP_NUM_57980S_IOV
+#define PCI_DEVICE_ID_AH               CHIP_NUM_AH
+#define PCI_DEVICE_ID_AH_IOV           CHIP_NUM_AH_IOV
+
 #endif
 
 enum qede_pci_private {
@@ -84,6 +114,10 @@ static const struct pci_device_id qede_pci_tbl[] = {
        {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
 #ifdef CONFIG_QED_SRIOV
        {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
+#endif
+       {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH), QEDE_PRIVATE_PF},
+#ifdef CONFIG_QED_SRIOV
+       {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH_IOV), QEDE_PRIVATE_VF},
 #endif
        { 0 }
 };
@@ -94,11 +128,26 @@ static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
 
 #define TX_TIMEOUT             (5 * HZ)
 
+/* Utilize last protocol index for XDP */
+#define XDP_PI 11
+
 static void qede_remove(struct pci_dev *pdev);
-static int qede_alloc_rx_buffer(struct qede_dev *edev,
-                               struct qede_rx_queue *rxq);
+static void qede_shutdown(struct pci_dev *pdev);
 static void qede_link_update(void *dev, struct qed_link_output *link);
 
+/* The qede lock is used to protect driver state change and driver flows that
+ * are not reentrant.
+ */
+void __qede_lock(struct qede_dev *edev)
+{
+       mutex_lock(&edev->qede_lock);
+}
+
+void __qede_unlock(struct qede_dev *edev)
+{
+       mutex_unlock(&edev->qede_lock);
+}
+
 #ifdef CONFIG_QED_SRIOV
 static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos)
 {
@@ -135,8 +184,12 @@ static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
 {
        struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
        struct qed_dev_info *qed_info = &edev->dev_info.common;
+       struct qed_update_vport_params *vport_params;
        int rc;
 
+       vport_params = vzalloc(sizeof(*vport_params));
+       if (!vport_params)
+               return -ENOMEM;
        DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
 
        rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
@@ -144,15 +197,13 @@ static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
        /* Enable/Disable Tx switching for PF */
        if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
            qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) {
-               struct qed_update_vport_params params;
-
-               memset(&params, 0, sizeof(params));
-               params.vport_id = 0;
-               params.update_tx_switching_flg = 1;
-               params.tx_switching_flg = num_vfs_param ? 1 : 0;
-               edev->ops->vport_update(edev->cdev, &params);
+               vport_params->vport_id = 0;
+               vport_params->update_tx_switching_flg = 1;
+               vport_params->tx_switching_flg = num_vfs_param ? 1 : 0;
+               edev->ops->vport_update(edev->cdev, vport_params);
        }
 
+       vfree(vport_params);
        return rc;
 }
 #endif
@@ -162,21 +213,17 @@ static struct pci_driver qede_pci_driver = {
        .id_table = qede_pci_tbl,
        .probe = qede_probe,
        .remove = qede_remove,
+       .shutdown = qede_shutdown,
 #ifdef CONFIG_QED_SRIOV
        .sriov_configure = qede_sriov_configure,
 #endif
 };
 
-static void qede_force_mac(void *dev, u8 *mac)
-{
-       struct qede_dev *edev = dev;
-
-       ether_addr_copy(edev->ndev->dev_addr, mac);
-       ether_addr_copy(edev->primary_mac, mac);
-}
-
 static struct qed_eth_cb_ops qede_ll_ops = {
        {
+#ifdef CONFIG_RFS_ACCEL
+               .arfs_filter_op = qede_arfs_filter_op,
+#endif
                .link_update = qede_link_update,
        },
        .force_mac = qede_force_mac,
@@ -189,8 +236,7 @@ static int qede_netdev_event(struct notifier_block *this, unsigned long event,
        struct ethtool_drvinfo drvinfo;
        struct qede_dev *edev;
 
-       /* Currently only support name change */
-       if (event != NETDEV_CHANGENAME)
+       if (event != NETDEV_CHANGENAME && event != NETDEV_CHANGEADDR)
                goto done;
 
        /* Check whether this is a qede device */
@@ -203,11 +249,18 @@ static int qede_netdev_event(struct notifier_block *this, unsigned long event,
                goto done;
        edev = netdev_priv(ndev);
 
-       /* Notify qed of the name change */
-       if (!edev->ops || !edev->ops->common)
-               goto done;
-       edev->ops->common->set_id(edev->cdev, edev->ndev->name,
-                                 "qede");
+       switch (event) {
+       case NETDEV_CHANGENAME:
+               /* Notify qed of the name change */
+               if (!edev->ops || !edev->ops->common)
+                       goto done;
+               edev->ops->common->set_id(edev->cdev, edev->ndev->name, "qede");
+               break;
+       case NETDEV_CHANGEADDR:
+               edev = netdev_priv(ndev);
+               qede_roce_event_changeaddr(edev);
+               break;
+       }
 
 done:
        return NOTIFY_DONE;
@@ -222,1606 +275,181 @@ int __init qede_init(void)
 {
        int ret;
 
-       pr_notice("qede_init: %s\n", version);
+       pr_info("qede_init: %s\n", version);
 
        qed_ops = qed_get_eth_ops();
-       if (!qed_ops) {
-               pr_notice("Failed to get qed ethtool operations\n");
-               return -EINVAL;
-       }
-
-       /* Must register notifier before pci ops, since we might miss
-        * interface rename after pci probe and netdev registeration.
-        */
-       ret = register_netdevice_notifier(&qede_netdev_notifier);
-       if (ret) {
-               pr_notice("Failed to register netdevice_notifier\n");
-               qed_put_eth_ops();
-               return -EINVAL;
-       }
-
-       ret = pci_register_driver(&qede_pci_driver);
-       if (ret) {
-               pr_notice("Failed to register driver\n");
-               unregister_netdevice_notifier(&qede_netdev_notifier);
-               qed_put_eth_ops();
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static void __exit qede_cleanup(void)
-{
-       pr_notice("qede_cleanup called\n");
-
-       unregister_netdevice_notifier(&qede_netdev_notifier);
-       pci_unregister_driver(&qede_pci_driver);
-       qed_put_eth_ops();
-}
-
-module_init(qede_init);
-module_exit(qede_cleanup);
-
-/* -------------------------------------------------------------------------
- * START OF FAST-PATH
- * -------------------------------------------------------------------------
- */
-
-/* Unmap the data and free skb */
-static int qede_free_tx_pkt(struct qede_dev *edev,
-                           struct qede_tx_queue *txq,
-                           int *len)
-{
-       u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
-       struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
-       struct eth_tx_1st_bd *first_bd;
-       struct eth_tx_bd *tx_data_bd;
-       int bds_consumed = 0;
-       int nbds;
-       bool data_split = txq->sw_tx_ring[idx].flags & QEDE_TSO_SPLIT_BD;
-       int i, split_bd_len = 0;
-
-       if (unlikely(!skb)) {
-               DP_ERR(edev,
-                      "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n",
-                      idx, txq->sw_tx_cons, txq->sw_tx_prod);
-               return -1;
-       }
-
-       *len = skb->len;
-
-       first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
-
-       bds_consumed++;
-
-       nbds = first_bd->data.nbds;
-
-       if (data_split) {
-               struct eth_tx_bd *split = (struct eth_tx_bd *)
-                       qed_chain_consume(&txq->tx_pbl);
-               split_bd_len = BD_UNMAP_LEN(split);
-               bds_consumed++;
-       }
-       dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
-                      BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
-
-       /* Unmap the data of the skb frags */
-       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
-               tx_data_bd = (struct eth_tx_bd *)
-                       qed_chain_consume(&txq->tx_pbl);
-               dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
-                              BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
-       }
-
-       while (bds_consumed++ < nbds)
-               qed_chain_consume(&txq->tx_pbl);
-
-       /* Free skb */
-       dev_kfree_skb_any(skb);
-       txq->sw_tx_ring[idx].skb = NULL;
-       txq->sw_tx_ring[idx].flags = 0;
-
-       return 0;
-}
-
-/* Unmap the data and free skb when mapping failed during start_xmit */
-static void qede_free_failed_tx_pkt(struct qede_dev *edev,
-                                   struct qede_tx_queue *txq,
-                                   struct eth_tx_1st_bd *first_bd,
-                                   int nbd,
-                                   bool data_split)
-{
-       u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
-       struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
-       struct eth_tx_bd *tx_data_bd;
-       int i, split_bd_len = 0;
-
-       /* Return prod to its position before this skb was handled */
-       qed_chain_set_prod(&txq->tx_pbl,
-                          le16_to_cpu(txq->tx_db.data.bd_prod),
-                          first_bd);
-
-       first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
-
-       if (data_split) {
-               struct eth_tx_bd *split = (struct eth_tx_bd *)
-                                         qed_chain_produce(&txq->tx_pbl);
-               split_bd_len = BD_UNMAP_LEN(split);
-               nbd--;
-       }
-
-       dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
-                      BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
-
-       /* Unmap the data of the skb frags */
-       for (i = 0; i < nbd; i++) {
-               tx_data_bd = (struct eth_tx_bd *)
-                       qed_chain_produce(&txq->tx_pbl);
-               if (tx_data_bd->nbytes)
-                       dma_unmap_page(&edev->pdev->dev,
-                                      BD_UNMAP_ADDR(tx_data_bd),
-                                      BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
-       }
-
-       /* Return again prod to its position before this skb was handled */
-       qed_chain_set_prod(&txq->tx_pbl,
-                          le16_to_cpu(txq->tx_db.data.bd_prod),
-                          first_bd);
-
-       /* Free skb */
-       dev_kfree_skb_any(skb);
-       txq->sw_tx_ring[idx].skb = NULL;
-       txq->sw_tx_ring[idx].flags = 0;
-}
-
-static u32 qede_xmit_type(struct qede_dev *edev,
-                         struct sk_buff *skb,
-                         int *ipv6_ext)
-{
-       u32 rc = XMIT_L4_CSUM;
-       __be16 l3_proto;
-
-       if (skb->ip_summed != CHECKSUM_PARTIAL)
-               return XMIT_PLAIN;
-
-       l3_proto = vlan_get_protocol(skb);
-       if (l3_proto == htons(ETH_P_IPV6) &&
-           (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
-               *ipv6_ext = 1;
-
-       if (skb->encapsulation)
-               rc |= XMIT_ENC;
-
-       if (skb_is_gso(skb))
-               rc |= XMIT_LSO;
-
-       return rc;
-}
-
-static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
-                                        struct eth_tx_2nd_bd *second_bd,
-                                        struct eth_tx_3rd_bd *third_bd)
-{
-       u8 l4_proto;
-       u16 bd2_bits1 = 0, bd2_bits2 = 0;
-
-       bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
-
-       bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
-                    ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
-                   << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
-
-       bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
-                     ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT);
-
-       if (vlan_get_protocol(skb) == htons(ETH_P_IPV6))
-               l4_proto = ipv6_hdr(skb)->nexthdr;
-       else
-               l4_proto = ip_hdr(skb)->protocol;
-
-       if (l4_proto == IPPROTO_UDP)
-               bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
-
-       if (third_bd)
-               third_bd->data.bitfields |=
-                       cpu_to_le16(((tcp_hdrlen(skb) / 4) &
-                               ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
-                               ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT);
-
-       second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1);
-       second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
-}
-
-static int map_frag_to_bd(struct qede_dev *edev,
-                         skb_frag_t *frag,
-                         struct eth_tx_bd *bd)
-{
-       dma_addr_t mapping;
-
-       /* Map skb non-linear frag data for DMA */
-       mapping = skb_frag_dma_map(&edev->pdev->dev, frag, 0,
-                                  skb_frag_size(frag),
-                                  DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
-               DP_NOTICE(edev, "Unable to map frag - dropping packet\n");
-               return -ENOMEM;
-       }
-
-       /* Setup the data pointer of the frag data */
-       BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag));
-
-       return 0;
-}
-
-static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
-{
-       if (is_encap_pkt)
-               return (skb_inner_transport_header(skb) +
-                       inner_tcp_hdrlen(skb) - skb->data);
-       else
-               return (skb_transport_header(skb) +
-                       tcp_hdrlen(skb) - skb->data);
-}
-
-/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
-#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
-static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
-                            u8 xmit_type)
-{
-       int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1;
-
-       if (xmit_type & XMIT_LSO) {
-               int hlen;
-
-               hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC);
-
-               /* linear payload would require its own BD */
-               if (skb_headlen(skb) > hlen)
-                       allowed_frags--;
-       }
-
-       return (skb_shinfo(skb)->nr_frags > allowed_frags);
-}
-#endif
-
-static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
-{
-       /* wmb makes sure that the BDs data is updated before updating the
-        * producer, otherwise FW may read old data from the BDs.
-        */
-       wmb();
-       barrier();
-       writel(txq->tx_db.raw, txq->doorbell_addr);
-
-       /* mmiowb is needed to synchronize doorbell writes from more than one
-        * processor. It guarantees that the write arrives to the device before
-        * the queue lock is released and another start_xmit is called (possibly
-        * on another CPU). Without this barrier, the next doorbell can bypass
-        * this doorbell. This is applicable to IA64/Altix systems.
-        */
-       mmiowb();
-}
-
-/* Main transmit function */
-static
-netdev_tx_t qede_start_xmit(struct sk_buff *skb,
-                           struct net_device *ndev)
-{
-       struct qede_dev *edev = netdev_priv(ndev);
-       struct netdev_queue *netdev_txq;
-       struct qede_tx_queue *txq;
-       struct eth_tx_1st_bd *first_bd;
-       struct eth_tx_2nd_bd *second_bd = NULL;
-       struct eth_tx_3rd_bd *third_bd = NULL;
-       struct eth_tx_bd *tx_data_bd = NULL;
-       u16 txq_index;
-       u8 nbd = 0;
-       dma_addr_t mapping;
-       int rc, frag_idx = 0, ipv6_ext = 0;
-       u8 xmit_type;
-       u16 idx;
-       u16 hlen;
-       bool data_split = false;
-
-       /* Get tx-queue context and netdev index */
-       txq_index = skb_get_queue_mapping(skb);
-       WARN_ON(txq_index >= QEDE_TSS_CNT(edev));
-       txq = QEDE_TX_QUEUE(edev, txq_index);
-       netdev_txq = netdev_get_tx_queue(ndev, txq_index);
-
-       WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) <
-                              (MAX_SKB_FRAGS + 1));
-
-       xmit_type = qede_xmit_type(edev, skb, &ipv6_ext);
-
-#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
-       if (qede_pkt_req_lin(edev, skb, xmit_type)) {
-               if (skb_linearize(skb)) {
-                       DP_NOTICE(edev,
-                                 "SKB linearization failed - silently dropping this SKB\n");
-                       dev_kfree_skb_any(skb);
-                       return NETDEV_TX_OK;
-               }
-       }
-#endif
-
-       /* Fill the entry in the SW ring and the BDs in the FW ring */
-       idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
-       txq->sw_tx_ring[idx].skb = skb;
-       first_bd = (struct eth_tx_1st_bd *)
-                  qed_chain_produce(&txq->tx_pbl);
-       memset(first_bd, 0, sizeof(*first_bd));
-       first_bd->data.bd_flags.bitfields =
-               1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
-
-       /* Map skb linear data for DMA and set in the first BD */
-       mapping = dma_map_single(&edev->pdev->dev, skb->data,
-                                skb_headlen(skb), DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
-               DP_NOTICE(edev, "SKB mapping failed\n");
-               qede_free_failed_tx_pkt(edev, txq, first_bd, 0, false);
-               qede_update_tx_producer(txq);
-               return NETDEV_TX_OK;
-       }
-       nbd++;
-       BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
-
-       /* In case there is IPv6 with extension headers or LSO we need 2nd and
-        * 3rd BDs.
-        */
-       if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) {
-               second_bd = (struct eth_tx_2nd_bd *)
-                       qed_chain_produce(&txq->tx_pbl);
-               memset(second_bd, 0, sizeof(*second_bd));
-
-               nbd++;
-               third_bd = (struct eth_tx_3rd_bd *)
-                       qed_chain_produce(&txq->tx_pbl);
-               memset(third_bd, 0, sizeof(*third_bd));
-
-               nbd++;
-               /* We need to fill in additional data in second_bd... */
-               tx_data_bd = (struct eth_tx_bd *)second_bd;
-       }
-
-       if (skb_vlan_tag_present(skb)) {
-               first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
-               first_bd->data.bd_flags.bitfields |=
-                       1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
-       }
-
-       /* Fill the parsing flags & params according to the requested offload */
-       if (xmit_type & XMIT_L4_CSUM) {
-               /* We don't re-calculate IP checksum as it is already done by
-                * the upper stack
-                */
-               first_bd->data.bd_flags.bitfields |=
-                       1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
-
-               if (xmit_type & XMIT_ENC) {
-                       first_bd->data.bd_flags.bitfields |=
-                               1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
-                       first_bd->data.bitfields |=
-                           1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
-               }
-
-               /* If the packet is IPv6 with extension header, indicate that
-                * to FW and pass few params, since the device cracker doesn't
-                * support parsing IPv6 with extension header/s.
-                */
-               if (unlikely(ipv6_ext))
-                       qede_set_params_for_ipv6_ext(skb, second_bd, third_bd);
-       }
-
-       if (xmit_type & XMIT_LSO) {
-               first_bd->data.bd_flags.bitfields |=
-                       (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
-               third_bd->data.lso_mss =
-                       cpu_to_le16(skb_shinfo(skb)->gso_size);
-
-               if (unlikely(xmit_type & XMIT_ENC)) {
-                       first_bd->data.bd_flags.bitfields |=
-                               1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
-                       hlen = qede_get_skb_hlen(skb, true);
-               } else {
-                       first_bd->data.bd_flags.bitfields |=
-                               1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
-                       hlen = qede_get_skb_hlen(skb, false);
-               }
-
-               /* @@@TBD - if will not be removed need to check */
-               third_bd->data.bitfields |=
-                       cpu_to_le16((1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT));
-
-               /* Make life easier for FW guys who can't deal with header and
-                * data on same BD. If we need to split, use the second bd...
-                */
-               if (unlikely(skb_headlen(skb) > hlen)) {
-                       DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
-                                  "TSO split header size is %d (%x:%x)\n",
-                                  first_bd->nbytes, first_bd->addr.hi,
-                                  first_bd->addr.lo);
-
-                       mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi),
-                                          le32_to_cpu(first_bd->addr.lo)) +
-                                          hlen;
-
-                       BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping,
-                                             le16_to_cpu(first_bd->nbytes) -
-                                             hlen);
-
-                       /* this marks the BD as one that has no
-                        * individual mapping
-                        */
-                       txq->sw_tx_ring[idx].flags |= QEDE_TSO_SPLIT_BD;
-
-                       first_bd->nbytes = cpu_to_le16(hlen);
-
-                       tx_data_bd = (struct eth_tx_bd *)third_bd;
-                       data_split = true;
-               }
-       } else {
-               first_bd->data.bitfields |=
-                   (skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
-                   ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
-       }
-
-       /* Handle fragmented skb */
-       /* special handle for frags inside 2nd and 3rd bds.. */
-       while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
-               rc = map_frag_to_bd(edev,
-                                   &skb_shinfo(skb)->frags[frag_idx],
-                                   tx_data_bd);
-               if (rc) {
-                       qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
-                                               data_split);
-                       qede_update_tx_producer(txq);
-                       return NETDEV_TX_OK;
-               }
-
-               if (tx_data_bd == (struct eth_tx_bd *)second_bd)
-                       tx_data_bd = (struct eth_tx_bd *)third_bd;
-               else
-                       tx_data_bd = NULL;
-
-               frag_idx++;
-       }
-
-       /* map last frags into 4th, 5th .... */
-       for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) {
-               tx_data_bd = (struct eth_tx_bd *)
-                            qed_chain_produce(&txq->tx_pbl);
-
-               memset(tx_data_bd, 0, sizeof(*tx_data_bd));
-
-               rc = map_frag_to_bd(edev,
-                                   &skb_shinfo(skb)->frags[frag_idx],
-                                   tx_data_bd);
-               if (rc) {
-                       qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
-                                               data_split);
-                       qede_update_tx_producer(txq);
-                       return NETDEV_TX_OK;
-               }
-       }
-
-       /* update the first BD with the actual num BDs */
-       first_bd->data.nbds = nbd;
-
-       netdev_tx_sent_queue(netdev_txq, skb->len);
-
-       skb_tx_timestamp(skb);
-
-       /* Advance packet producer only before sending the packet since mapping
-        * of pages may fail.
-        */
-       txq->sw_tx_prod++;
-
-       /* 'next page' entries are counted in the producer value */
-       txq->tx_db.data.bd_prod =
-               cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
-
-       if (!skb->xmit_more || netif_xmit_stopped(netdev_txq))
-               qede_update_tx_producer(txq);
-
-       if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
-                     < (MAX_SKB_FRAGS + 1))) {
-               if (skb->xmit_more)
-                       qede_update_tx_producer(txq);
-
-               netif_tx_stop_queue(netdev_txq);
-               DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
-                          "Stop queue was called\n");
-               /* paired memory barrier is in qede_tx_int(), we have to keep
-                * ordering of set_bit() in netif_tx_stop_queue() and read of
-                * fp->bd_tx_cons
-                */
-               smp_mb();
-
-               if (qed_chain_get_elem_left(&txq->tx_pbl)
-                    >= (MAX_SKB_FRAGS + 1) &&
-                   (edev->state == QEDE_STATE_OPEN)) {
-                       netif_tx_wake_queue(netdev_txq);
-                       DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
-                                  "Wake queue was called\n");
-               }
-       }
-
-       return NETDEV_TX_OK;
-}
-
-int qede_txq_has_work(struct qede_tx_queue *txq)
-{
-       u16 hw_bd_cons;
-
-       /* Tell compiler that consumer and producer can change */
-       barrier();
-       hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
-       if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1)
-               return 0;
-
-       return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
-}
-
-static int qede_tx_int(struct qede_dev *edev,
-                      struct qede_tx_queue *txq)
-{
-       struct netdev_queue *netdev_txq;
-       u16 hw_bd_cons;
-       unsigned int pkts_compl = 0, bytes_compl = 0;
-       int rc;
-
-       netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index);
-
-       hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
-       barrier();
-
-       while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
-               int len = 0;
-
-               rc = qede_free_tx_pkt(edev, txq, &len);
-               if (rc) {
-                       DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n",
-                                 hw_bd_cons,
-                                 qed_chain_get_cons_idx(&txq->tx_pbl));
-                       break;
-               }
-
-               bytes_compl += len;
-               pkts_compl++;
-               txq->sw_tx_cons++;
-       }
-
-       netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
-
-       /* Need to make the tx_bd_cons update visible to start_xmit()
-        * before checking for netif_tx_queue_stopped().  Without the
-        * memory barrier, there is a small possibility that
-        * start_xmit() will miss it and cause the queue to be stopped
-        * forever.
-        * On the other hand we need an rmb() here to ensure the proper
-        * ordering of bit testing in the following
-        * netif_tx_queue_stopped(txq) call.
-        */
-       smp_mb();
-
-       if (unlikely(netif_tx_queue_stopped(netdev_txq))) {
-               /* Taking tx_lock is needed to prevent reenabling the queue
-                * while it's empty. This could have happen if rx_action() gets
-                * suspended in qede_tx_int() after the condition before
-                * netif_tx_wake_queue(), while tx_action (qede_start_xmit()):
-                *
-                * stops the queue->sees fresh tx_bd_cons->releases the queue->
-                * sends some packets consuming the whole queue again->
-                * stops the queue
-                */
-
-               __netif_tx_lock(netdev_txq, smp_processor_id());
-
-               if ((netif_tx_queue_stopped(netdev_txq)) &&
-                   (edev->state == QEDE_STATE_OPEN) &&
-                   (qed_chain_get_elem_left(&txq->tx_pbl)
-                     >= (MAX_SKB_FRAGS + 1))) {
-                       netif_tx_wake_queue(netdev_txq);
-                       DP_VERBOSE(edev, NETIF_MSG_TX_DONE,
-                                  "Wake queue was called\n");
-               }
-
-               __netif_tx_unlock(netdev_txq);
-       }
-
-       return 0;
-}
-
-bool qede_has_rx_work(struct qede_rx_queue *rxq)
-{
-       u16 hw_comp_cons, sw_comp_cons;
-
-       /* Tell compiler that status block fields can change */
-       barrier();
-
-       hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
-       sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
-
-       return hw_comp_cons != sw_comp_cons;
-}
-
-static bool qede_has_tx_work(struct qede_fastpath *fp)
-{
-       u8 tc;
-
-       for (tc = 0; tc < fp->edev->num_tc; tc++)
-               if (qede_txq_has_work(&fp->txqs[tc]))
-                       return true;
-       return false;
-}
-
-static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
-{
-       qed_chain_consume(&rxq->rx_bd_ring);
-       rxq->sw_rx_cons++;
-}
-
-/* This function reuses the buffer(from an offset) from
- * consumer index to producer index in the bd ring
- */
-static inline void qede_reuse_page(struct qede_dev *edev,
-                                  struct qede_rx_queue *rxq,
-                                  struct sw_rx_data *curr_cons)
-{
-       struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
-       struct sw_rx_data *curr_prod;
-       dma_addr_t new_mapping;
-
-       curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
-       *curr_prod = *curr_cons;
-
-       new_mapping = curr_prod->mapping + curr_prod->page_offset;
-
-       rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping));
-       rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping));
-
-       rxq->sw_rx_prod++;
-       curr_cons->data = NULL;
-}
-
-/* In case of allocation failures reuse buffers
- * from consumer index to produce buffers for firmware
- */
-void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
-                            struct qede_dev *edev, u8 count)
-{
-       struct sw_rx_data *curr_cons;
-
-       for (; count > 0; count--) {
-               curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
-               qede_reuse_page(edev, rxq, curr_cons);
-               qede_rx_bd_ring_consume(rxq);
-       }
-}
-
-static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
-                                        struct qede_rx_queue *rxq,
-                                        struct sw_rx_data *curr_cons)
-{
-       /* Move to the next segment in the page */
-       curr_cons->page_offset += rxq->rx_buf_seg_size;
-
-       if (curr_cons->page_offset == PAGE_SIZE) {
-               if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
-                       /* Since we failed to allocate new buffer
-                        * current buffer can be used again.
-                        */
-                       curr_cons->page_offset -= rxq->rx_buf_seg_size;
-
-                       return -ENOMEM;
-               }
-
-               dma_unmap_page(&edev->pdev->dev, curr_cons->mapping,
-                              PAGE_SIZE, DMA_FROM_DEVICE);
-       } else {
-               /* Increment refcount of the page as we don't want
-                * network stack to take the ownership of the page
-                * which can be recycled multiple times by the driver.
-                */
-               page_ref_inc(curr_cons->data);
-               qede_reuse_page(edev, rxq, curr_cons);
-       }
-
-       return 0;
-}
-
-static inline void qede_update_rx_prod(struct qede_dev *edev,
-                                      struct qede_rx_queue *rxq)
-{
-       u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
-       u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
-       struct eth_rx_prod_data rx_prods = {0};
-
-       /* Update producers */
-       rx_prods.bd_prod = cpu_to_le16(bd_prod);
-       rx_prods.cqe_prod = cpu_to_le16(cqe_prod);
-
-       /* Make sure that the BD and SGE data is updated before updating the
-        * producers since FW might read the BD/SGE right after the producer
-        * is updated.
-        */
-       wmb();
-
-       internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
-                       (u32 *)&rx_prods);
-
-       /* mmiowb is needed to synchronize doorbell writes from more than one
-        * processor. It guarantees that the write arrives to the device before
-        * the napi lock is released and another qede_poll is called (possibly
-        * on another CPU). Without this barrier, the next doorbell can bypass
-        * this doorbell. This is applicable to IA64/Altix systems.
-        */
-       mmiowb();
-}
-
-static u32 qede_get_rxhash(struct qede_dev *edev,
-                          u8 bitfields,
-                          __le32 rss_hash,
-                          enum pkt_hash_types *rxhash_type)
-{
-       enum rss_hash_type htype;
-
-       htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
-
-       if ((edev->ndev->features & NETIF_F_RXHASH) && htype) {
-               *rxhash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
-                               (htype == RSS_HASH_TYPE_IPV6)) ?
-                               PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
-               return le32_to_cpu(rss_hash);
-       }
-       *rxhash_type = PKT_HASH_TYPE_NONE;
-       return 0;
-}
-
-static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
-{
-       skb_checksum_none_assert(skb);
-
-       if (csum_flag & QEDE_CSUM_UNNECESSARY)
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
-
-       if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY)
-               skb->csum_level = 1;
-}
-
-static inline void qede_skb_receive(struct qede_dev *edev,
-                                   struct qede_fastpath *fp,
-                                   struct sk_buff *skb,
-                                   u16 vlan_tag)
-{
-       if (vlan_tag)
-               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
-                                      vlan_tag);
-
-       napi_gro_receive(&fp->napi, skb);
-}
-
-static void qede_set_gro_params(struct qede_dev *edev,
-                               struct sk_buff *skb,
-                               struct eth_fast_path_rx_tpa_start_cqe *cqe)
-{
-       u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags);
-
-       if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) &
-           PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2)
-               skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
-       else
-               skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
-
-       skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) -
-                                       cqe->header_len;
-}
-
-static int qede_fill_frag_skb(struct qede_dev *edev,
-                             struct qede_rx_queue *rxq,
-                             u8 tpa_agg_index,
-                             u16 len_on_bd)
-{
-       struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons &
-                                                        NUM_RX_BDS_MAX];
-       struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index];
-       struct sk_buff *skb = tpa_info->skb;
-
-       if (unlikely(tpa_info->agg_state != QEDE_AGG_STATE_START))
-               goto out;
-
-       /* Add one frag and update the appropriate fields in the skb */
-       skb_fill_page_desc(skb, tpa_info->frag_id++,
-                          current_bd->data, current_bd->page_offset,
-                          len_on_bd);
-
-       if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) {
-               /* Incr page ref count to reuse on allocation failure
-                * so that it doesn't get freed while freeing SKB.
-                */
-               page_ref_inc(current_bd->data);
-               goto out;
-       }
-
-       qed_chain_consume(&rxq->rx_bd_ring);
-       rxq->sw_rx_cons++;
-
-       skb->data_len += len_on_bd;
-       skb->truesize += rxq->rx_buf_seg_size;
-       skb->len += len_on_bd;
-
-       return 0;
-
-out:
-       tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
-       qede_recycle_rx_bd_ring(rxq, edev, 1);
-       return -ENOMEM;
-}
-
-static void qede_tpa_start(struct qede_dev *edev,
-                          struct qede_rx_queue *rxq,
-                          struct eth_fast_path_rx_tpa_start_cqe *cqe)
-{
-       struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
-       struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring);
-       struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
-       struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
-       dma_addr_t mapping = tpa_info->replace_buf_mapping;
-       struct sw_rx_data *sw_rx_data_cons;
-       struct sw_rx_data *sw_rx_data_prod;
-       enum pkt_hash_types rxhash_type;
-       u32 rxhash;
-
-       sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
-       sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
-
-       /* Use pre-allocated replacement buffer - we can't release the agg.
-        * start until its over and we don't want to risk allocation failing
-        * here, so re-allocate when aggregation will be over.
-        */
-       sw_rx_data_prod->mapping = replace_buf->mapping;
-
-       sw_rx_data_prod->data = replace_buf->data;
-       rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping));
-       rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(mapping));
-       sw_rx_data_prod->page_offset = replace_buf->page_offset;
-
-       rxq->sw_rx_prod++;
-
-       /* move partial skb from cons to pool (don't unmap yet)
-        * save mapping, incase we drop the packet later on.
-        */
-       tpa_info->start_buf = *sw_rx_data_cons;
-       mapping = HILO_U64(le32_to_cpu(rx_bd_cons->addr.hi),
-                          le32_to_cpu(rx_bd_cons->addr.lo));
-
-       tpa_info->start_buf_mapping = mapping;
-       rxq->sw_rx_cons++;
-
-       /* set tpa state to start only if we are able to allocate skb
-        * for this aggregation, otherwise mark as error and aggregation will
-        * be dropped
-        */
-       tpa_info->skb = netdev_alloc_skb(edev->ndev,
-                                        le16_to_cpu(cqe->len_on_first_bd));
-       if (unlikely(!tpa_info->skb)) {
-               DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
-               tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
-               goto cons_buf;
-       }
-
-       skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
-       memcpy(&tpa_info->start_cqe, cqe, sizeof(tpa_info->start_cqe));
-
-       /* Start filling in the aggregation info */
-       tpa_info->frag_id = 0;
-       tpa_info->agg_state = QEDE_AGG_STATE_START;
-
-       rxhash = qede_get_rxhash(edev, cqe->bitfields,
-                                cqe->rss_hash, &rxhash_type);
-       skb_set_hash(tpa_info->skb, rxhash, rxhash_type);
-       if ((le16_to_cpu(cqe->pars_flags.flags) >>
-            PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) &
-                   PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK)
-               tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
-       else
-               tpa_info->vlan_tag = 0;
-
-       /* This is needed in order to enable forwarding support */
-       qede_set_gro_params(edev, tpa_info->skb, cqe);
-
-cons_buf: /* We still need to handle bd_len_list to consume buffers */
-       if (likely(cqe->ext_bd_len_list[0]))
-               qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
-                                  le16_to_cpu(cqe->ext_bd_len_list[0]));
-
-       if (unlikely(cqe->ext_bd_len_list[1])) {
-               DP_ERR(edev,
-                      "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n");
-               tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
-       }
-}
-
-#ifdef CONFIG_INET
-static void qede_gro_ip_csum(struct sk_buff *skb)
-{
-       const struct iphdr *iph = ip_hdr(skb);
-       struct tcphdr *th;
-
-       skb_set_transport_header(skb, sizeof(struct iphdr));
-       th = tcp_hdr(skb);
-
-       th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
-                                 iph->saddr, iph->daddr, 0);
-
-       tcp_gro_complete(skb);
-}
-
-static void qede_gro_ipv6_csum(struct sk_buff *skb)
-{
-       struct ipv6hdr *iph = ipv6_hdr(skb);
-       struct tcphdr *th;
-
-       skb_set_transport_header(skb, sizeof(struct ipv6hdr));
-       th = tcp_hdr(skb);
-
-       th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
-                                 &iph->saddr, &iph->daddr, 0);
-       tcp_gro_complete(skb);
-}
-#endif
-
-static void qede_gro_receive(struct qede_dev *edev,
-                            struct qede_fastpath *fp,
-                            struct sk_buff *skb,
-                            u16 vlan_tag)
-{
-       /* FW can send a single MTU sized packet from gro flow
-        * due to aggregation timeout/last segment etc. which
-        * is not expected to be a gro packet. If a skb has zero
-        * frags then simply push it in the stack as non gso skb.
-        */
-       if (unlikely(!skb->data_len)) {
-               skb_shinfo(skb)->gso_type = 0;
-               skb_shinfo(skb)->gso_size = 0;
-               goto send_skb;
-       }
-
-#ifdef CONFIG_INET
-       if (skb_shinfo(skb)->gso_size) {
-               skb_set_network_header(skb, 0);
-
-               switch (skb->protocol) {
-               case htons(ETH_P_IP):
-                       qede_gro_ip_csum(skb);
-                       break;
-               case htons(ETH_P_IPV6):
-                       qede_gro_ipv6_csum(skb);
-                       break;
-               default:
-                       DP_ERR(edev,
-                              "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
-                              ntohs(skb->protocol));
-               }
-       }
-#endif
-
-send_skb:
-       skb_record_rx_queue(skb, fp->rss_id);
-       qede_skb_receive(edev, fp, skb, vlan_tag);
-}
-
-static inline void qede_tpa_cont(struct qede_dev *edev,
-                                struct qede_rx_queue *rxq,
-                                struct eth_fast_path_rx_tpa_cont_cqe *cqe)
-{
-       int i;
-
-       for (i = 0; cqe->len_list[i]; i++)
-               qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
-                                  le16_to_cpu(cqe->len_list[i]));
-
-       if (unlikely(i > 1))
-               DP_ERR(edev,
-                      "Strange - TPA cont with more than a single len_list entry\n");
-}
-
-static void qede_tpa_end(struct qede_dev *edev,
-                        struct qede_fastpath *fp,
-                        struct eth_fast_path_rx_tpa_end_cqe *cqe)
-{
-       struct qede_rx_queue *rxq = fp->rxq;
-       struct qede_agg_info *tpa_info;
-       struct sk_buff *skb;
-       int i;
-
-       tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
-       skb = tpa_info->skb;
-
-       for (i = 0; cqe->len_list[i]; i++)
-               qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
-                                  le16_to_cpu(cqe->len_list[i]));
-       if (unlikely(i > 1))
-               DP_ERR(edev,
-                      "Strange - TPA emd with more than a single len_list entry\n");
-
-       if (unlikely(tpa_info->agg_state != QEDE_AGG_STATE_START))
-               goto err;
-
-       /* Sanity */
-       if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1))
-               DP_ERR(edev,
-                      "Strange - TPA had %02x BDs, but SKB has only %d frags\n",
-                      cqe->num_of_bds, tpa_info->frag_id);
-       if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len)))
-               DP_ERR(edev,
-                      "Strange - total packet len [cqe] is %4x but SKB has len %04x\n",
-                      le16_to_cpu(cqe->total_packet_len), skb->len);
-
-       memcpy(skb->data,
-              page_address(tpa_info->start_buf.data) +
-               tpa_info->start_cqe.placement_offset +
-               tpa_info->start_buf.page_offset,
-              le16_to_cpu(tpa_info->start_cqe.len_on_first_bd));
-
-       /* Recycle [mapped] start buffer for the next replacement */
-       tpa_info->replace_buf = tpa_info->start_buf;
-       tpa_info->replace_buf_mapping = tpa_info->start_buf_mapping;
-
-       /* Finalize the SKB */
-       skb->protocol = eth_type_trans(skb, edev->ndev);
-       skb->ip_summed = CHECKSUM_UNNECESSARY;
-
-       /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
-        * to skb_shinfo(skb)->gso_segs
-        */
-       NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs);
-
-       qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag);
-
-       tpa_info->agg_state = QEDE_AGG_STATE_NONE;
-
-       return;
-err:
-       /* The BD starting the aggregation is still mapped; Re-use it for
-        * future aggregations [as replacement buffer]
-        */
-       memcpy(&tpa_info->replace_buf, &tpa_info->start_buf,
-              sizeof(struct sw_rx_data));
-       tpa_info->replace_buf_mapping = tpa_info->start_buf_mapping;
-       tpa_info->start_buf.data = NULL;
-       tpa_info->agg_state = QEDE_AGG_STATE_NONE;
-       dev_kfree_skb_any(tpa_info->skb);
-       tpa_info->skb = NULL;
-}
-
-static bool qede_tunn_exist(u16 flag)
-{
-       return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
-                         PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT));
-}
-
-static u8 qede_check_tunn_csum(u16 flag)
-{
-       u16 csum_flag = 0;
-       u8 tcsum = 0;
-
-       if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
-                   PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT))
-               csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
-                            PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
-
-       if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
-                   PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
-               csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
-                            PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
-               tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
-       }
-
-       csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
-                    PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
-                    PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
-                    PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
-
-       if (csum_flag & flag)
-               return QEDE_CSUM_ERROR;
-
-       return QEDE_CSUM_UNNECESSARY | tcsum;
-}
-
-static u8 qede_check_notunn_csum(u16 flag)
-{
-       u16 csum_flag = 0;
-       u8 csum = 0;
-
-       if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
-                   PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
-               csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
-                            PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
-               csum = QEDE_CSUM_UNNECESSARY;
-       }
-
-       csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
-                    PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
-
-       if (csum_flag & flag)
-               return QEDE_CSUM_ERROR;
-
-       return csum;
-}
-
-static u8 qede_check_csum(u16 flag)
-{
-       if (!qede_tunn_exist(flag))
-               return qede_check_notunn_csum(flag);
-       else
-               return qede_check_tunn_csum(flag);
-}
-
-static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe,
-                                     u16 flag)
-{
-       u8 tun_pars_flg = cqe->tunnel_pars_flags.flags;
-
-       if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK <<
-                            ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) ||
-           (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
-                    PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT)))
-               return true;
-
-       return false;
-}
-
-static int qede_rx_int(struct qede_fastpath *fp, int budget)
-{
-       struct qede_dev *edev = fp->edev;
-       struct qede_rx_queue *rxq = fp->rxq;
-
-       u16 hw_comp_cons, sw_comp_cons, sw_rx_index, parse_flag;
-       int rx_pkt = 0;
-       u8 csum_flag;
-
-       hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
-       sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
-
-       /* Memory barrier to prevent the CPU from doing speculative reads of CQE
-        * / BD in the while-loop before reading hw_comp_cons. If the CQE is
-        * read before it is written by FW, then FW writes CQE and SB, and then
-        * the CPU reads the hw_comp_cons, it will use an old CQE.
-        */
-       rmb();
-
-       /* Loop to complete all indicated BDs */
-       while (sw_comp_cons != hw_comp_cons) {
-               struct eth_fast_path_rx_reg_cqe *fp_cqe;
-               enum pkt_hash_types rxhash_type;
-               enum eth_rx_cqe_type cqe_type;
-               struct sw_rx_data *sw_rx_data;
-               union eth_rx_cqe *cqe;
-               struct sk_buff *skb;
-               struct page *data;
-               __le16 flags;
-               u16 len, pad;
-               u32 rx_hash;
-
-               /* Get the CQE from the completion ring */
-               cqe = (union eth_rx_cqe *)
-                       qed_chain_consume(&rxq->rx_comp_ring);
-               cqe_type = cqe->fast_path_regular.type;
-
-               if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
-                       edev->ops->eth_cqe_completion(
-                                       edev->cdev, fp->rss_id,
-                                       (struct eth_slow_path_rx_cqe *)cqe);
-                       goto next_cqe;
-               }
-
-               if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) {
-                       switch (cqe_type) {
-                       case ETH_RX_CQE_TYPE_TPA_START:
-                               qede_tpa_start(edev, rxq,
-                                              &cqe->fast_path_tpa_start);
-                               goto next_cqe;
-                       case ETH_RX_CQE_TYPE_TPA_CONT:
-                               qede_tpa_cont(edev, rxq,
-                                             &cqe->fast_path_tpa_cont);
-                               goto next_cqe;
-                       case ETH_RX_CQE_TYPE_TPA_END:
-                               qede_tpa_end(edev, fp,
-                                            &cqe->fast_path_tpa_end);
-                               goto next_rx_only;
-                       default:
-                               break;
-                       }
-               }
-
-               /* Get the data from the SW ring */
-               sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
-               sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
-               data = sw_rx_data->data;
-
-               fp_cqe = &cqe->fast_path_regular;
-               len =  le16_to_cpu(fp_cqe->len_on_first_bd);
-               pad = fp_cqe->placement_offset;
-               flags = cqe->fast_path_regular.pars_flags.flags;
-
-               /* If this is an error packet then drop it */
-               parse_flag = le16_to_cpu(flags);
-
-               csum_flag = qede_check_csum(parse_flag);
-               if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
-                       if (qede_pkt_is_ip_fragmented(&cqe->fast_path_regular,
-                                                     parse_flag)) {
-                               rxq->rx_ip_frags++;
-                               goto alloc_skb;
-                       }
-
-                       DP_NOTICE(edev,
-                                 "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
-                                 sw_comp_cons, parse_flag);
-                       rxq->rx_hw_errors++;
-                       qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
-                       goto next_cqe;
-               }
-
-alloc_skb:
-               skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
-               if (unlikely(!skb)) {
-                       DP_NOTICE(edev,
-                                 "Build_skb failed, dropping incoming packet\n");
-                       qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
-                       rxq->rx_alloc_errors++;
-                       goto next_cqe;
-               }
-
-               /* Copy data into SKB */
-               if (len + pad <= edev->rx_copybreak) {
-                       memcpy(skb_put(skb, len),
-                              page_address(data) + pad +
-                               sw_rx_data->page_offset, len);
-                       qede_reuse_page(edev, rxq, sw_rx_data);
-               } else {
-                       struct skb_frag_struct *frag;
-                       unsigned int pull_len;
-                       unsigned char *va;
-
-                       frag = &skb_shinfo(skb)->frags[0];
-
-                       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, data,
-                                       pad + sw_rx_data->page_offset,
-                                       len, rxq->rx_buf_seg_size);
-
-                       va = skb_frag_address(frag);
-                       pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE);
-
-                       /* Align the pull_len to optimize memcpy */
-                       memcpy(skb->data, va, ALIGN(pull_len, sizeof(long)));
-
-                       skb_frag_size_sub(frag, pull_len);
-                       frag->page_offset += pull_len;
-                       skb->data_len -= pull_len;
-                       skb->tail += pull_len;
-
-                       if (unlikely(qede_realloc_rx_buffer(edev, rxq,
-                                                           sw_rx_data))) {
-                               DP_ERR(edev, "Failed to allocate rx buffer\n");
-                               /* Incr page ref count to reuse on allocation
-                                * failure so that it doesn't get freed while
-                                * freeing SKB.
-                                */
-
-                               page_ref_inc(sw_rx_data->data);
-                               rxq->rx_alloc_errors++;
-                               qede_recycle_rx_bd_ring(rxq, edev,
-                                                       fp_cqe->bd_num);
-                               dev_kfree_skb_any(skb);
-                               goto next_cqe;
-                       }
-               }
-
-               qede_rx_bd_ring_consume(rxq);
-
-               if (fp_cqe->bd_num != 1) {
-                       u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len);
-                       u8 num_frags;
-
-                       pkt_len -= len;
-
-                       for (num_frags = fp_cqe->bd_num - 1; num_frags > 0;
-                            num_frags--) {
-                               u16 cur_size = pkt_len > rxq->rx_buf_size ?
-                                               rxq->rx_buf_size : pkt_len;
-                               if (unlikely(!cur_size)) {
-                                       DP_ERR(edev,
-                                              "Still got %d BDs for mapping jumbo, but length became 0\n",
-                                              num_frags);
-                                       qede_recycle_rx_bd_ring(rxq, edev,
-                                                               num_frags);
-                                       dev_kfree_skb_any(skb);
-                                       goto next_cqe;
-                               }
-
-                               if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
-                                       qede_recycle_rx_bd_ring(rxq, edev,
-                                                               num_frags);
-                                       dev_kfree_skb_any(skb);
-                                       goto next_cqe;
-                               }
-
-                               sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
-                               sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
-                               qede_rx_bd_ring_consume(rxq);
-
-                               dma_unmap_page(&edev->pdev->dev,
-                                              sw_rx_data->mapping,
-                                              PAGE_SIZE, DMA_FROM_DEVICE);
-
-                               skb_fill_page_desc(skb,
-                                                  skb_shinfo(skb)->nr_frags++,
-                                                  sw_rx_data->data, 0,
-                                                  cur_size);
-
-                               skb->truesize += PAGE_SIZE;
-                               skb->data_len += cur_size;
-                               skb->len += cur_size;
-                               pkt_len -= cur_size;
-                       }
-
-                       if (unlikely(pkt_len))
-                               DP_ERR(edev,
-                                      "Mapped all BDs of jumbo, but still have %d bytes\n",
-                                      pkt_len);
-               }
-
-               skb->protocol = eth_type_trans(skb, edev->ndev);
-
-               rx_hash = qede_get_rxhash(edev, fp_cqe->bitfields,
-                                         fp_cqe->rss_hash,
-                                         &rxhash_type);
-
-               skb_set_hash(skb, rx_hash, rxhash_type);
-
-               qede_set_skb_csum(skb, csum_flag);
-
-               skb_record_rx_queue(skb, fp->rss_id);
-
-               qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
-next_rx_only:
-               rx_pkt++;
-
-next_cqe: /* don't consume bd rx buffer */
-               qed_chain_recycle_consumed(&rxq->rx_comp_ring);
-               sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
-               /* CR TPA - revisit how to handle budget in TPA perhaps
-                * increase on "end"
-                */
-               if (rx_pkt == budget)
-                       break;
-       } /* repeat while sw_comp_cons != hw_comp_cons... */
-
-       /* Update producers */
-       qede_update_rx_prod(edev, rxq);
+       if (!qed_ops) {
+               pr_notice("Failed to get qed ethtool operations\n");
+               return -EINVAL;
+       }
 
-       return rx_pkt;
-}
+       /* Must register notifier before pci ops, since we might miss
+        * interface rename after pci probe and netdev registeration.
+        */
+       ret = register_netdevice_notifier(&qede_netdev_notifier);
+       if (ret) {
+               pr_notice("Failed to register netdevice_notifier\n");
+               qed_put_eth_ops();
+               return -EINVAL;
+       }
 
-static int qede_poll(struct napi_struct *napi, int budget)
-{
-       struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
-                                               napi);
-       struct qede_dev *edev = fp->edev;
-       int rx_work_done = 0;
-       u8 tc;
-
-       for (tc = 0; tc < edev->num_tc; tc++)
-               if (qede_txq_has_work(&fp->txqs[tc]))
-                       qede_tx_int(edev, &fp->txqs[tc]);
-
-       rx_work_done = qede_has_rx_work(fp->rxq) ?
-                       qede_rx_int(fp, budget) : 0;
-       if (rx_work_done < budget) {
-               qed_sb_update_sb_idx(fp->sb_info);
-               /* *_has_*_work() reads the status block,
-                * thus we need to ensure that status block indices
-                * have been actually read (qed_sb_update_sb_idx)
-                * prior to this check (*_has_*_work) so that
-                * we won't write the "newer" value of the status block
-                * to HW (if there was a DMA right after
-                * qede_has_rx_work and if there is no rmb, the memory
-                * reading (qed_sb_update_sb_idx) may be postponed
-                * to right before *_ack_sb). In this case there
-                * will never be another interrupt until there is
-                * another update of the status block, while there
-                * is still unhandled work.
-                */
-               rmb();
-
-               /* Fall out from the NAPI loop if needed */
-               if (!(qede_has_rx_work(fp->rxq) ||
-                     qede_has_tx_work(fp))) {
-                       napi_complete(napi);
-
-                       /* Update and reenable interrupts */
-                       qed_sb_ack(fp->sb_info, IGU_INT_ENABLE,
-                                  1 /*update*/);
-               } else {
-                       rx_work_done = budget;
-               }
+       ret = pci_register_driver(&qede_pci_driver);
+       if (ret) {
+               pr_notice("Failed to register driver\n");
+               unregister_netdevice_notifier(&qede_netdev_notifier);
+               qed_put_eth_ops();
+               return -EINVAL;
        }
 
-       return rx_work_done;
+       return 0;
 }
 
-static irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
+static void __exit qede_cleanup(void)
 {
-       struct qede_fastpath *fp = fp_cookie;
-
-       qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
+       if (debug & QED_LOG_INFO_MASK)
+               pr_info("qede_cleanup called\n");
 
-       napi_schedule_irqoff(&fp->napi);
-       return IRQ_HANDLED;
+       unregister_netdevice_notifier(&qede_netdev_notifier);
+       pci_unregister_driver(&qede_pci_driver);
+       qed_put_eth_ops();
 }
 
-/* -------------------------------------------------------------------------
- * END OF FAST-PATH
- * -------------------------------------------------------------------------
- */
+module_init(qede_init);
+module_exit(qede_cleanup);
 
 static int qede_open(struct net_device *ndev);
 static int qede_close(struct net_device *ndev);
-static int qede_set_mac_addr(struct net_device *ndev, void *p);
-static void qede_set_rx_mode(struct net_device *ndev);
-static void qede_config_rx_mode(struct net_device *ndev);
-
-static int qede_set_ucast_rx_mac(struct qede_dev *edev,
-                                enum qed_filter_xcast_params_type opcode,
-                                unsigned char mac[ETH_ALEN])
-{
-       struct qed_filter_params filter_cmd;
-
-       memset(&filter_cmd, 0, sizeof(filter_cmd));
-       filter_cmd.type = QED_FILTER_TYPE_UCAST;
-       filter_cmd.filter.ucast.type = opcode;
-       filter_cmd.filter.ucast.mac_valid = 1;
-       ether_addr_copy(filter_cmd.filter.ucast.mac, mac);
-
-       return edev->ops->filter_config(edev->cdev, &filter_cmd);
-}
-
-static int qede_set_ucast_rx_vlan(struct qede_dev *edev,
-                                 enum qed_filter_xcast_params_type opcode,
-                                 u16 vid)
-{
-       struct qed_filter_params filter_cmd;
-
-       memset(&filter_cmd, 0, sizeof(filter_cmd));
-       filter_cmd.type = QED_FILTER_TYPE_UCAST;
-       filter_cmd.filter.ucast.type = opcode;
-       filter_cmd.filter.ucast.vlan_valid = 1;
-       filter_cmd.filter.ucast.vlan = vid;
-
-       return edev->ops->filter_config(edev->cdev, &filter_cmd);
-}
 
 void qede_fill_by_demand_stats(struct qede_dev *edev)
 {
+       struct qede_stats_common *p_common = &edev->stats.common;
        struct qed_eth_stats stats;
 
-       edev->ops->get_vport_stats(edev->cdev, &stats);
-       edev->stats.no_buff_discards = stats.no_buff_discards;
-       edev->stats.rx_ucast_bytes = stats.rx_ucast_bytes;
-       edev->stats.rx_mcast_bytes = stats.rx_mcast_bytes;
-       edev->stats.rx_bcast_bytes = stats.rx_bcast_bytes;
-       edev->stats.rx_ucast_pkts = stats.rx_ucast_pkts;
-       edev->stats.rx_mcast_pkts = stats.rx_mcast_pkts;
-       edev->stats.rx_bcast_pkts = stats.rx_bcast_pkts;
-       edev->stats.mftag_filter_discards = stats.mftag_filter_discards;
-       edev->stats.mac_filter_discards = stats.mac_filter_discards;
-
-       edev->stats.tx_ucast_bytes = stats.tx_ucast_bytes;
-       edev->stats.tx_mcast_bytes = stats.tx_mcast_bytes;
-       edev->stats.tx_bcast_bytes = stats.tx_bcast_bytes;
-       edev->stats.tx_ucast_pkts = stats.tx_ucast_pkts;
-       edev->stats.tx_mcast_pkts = stats.tx_mcast_pkts;
-       edev->stats.tx_bcast_pkts = stats.tx_bcast_pkts;
-       edev->stats.tx_err_drop_pkts = stats.tx_err_drop_pkts;
-       edev->stats.coalesced_pkts = stats.tpa_coalesced_pkts;
-       edev->stats.coalesced_events = stats.tpa_coalesced_events;
-       edev->stats.coalesced_aborts_num = stats.tpa_aborts_num;
-       edev->stats.non_coalesced_pkts = stats.tpa_not_coalesced_pkts;
-       edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes;
-
-       edev->stats.rx_64_byte_packets = stats.rx_64_byte_packets;
-       edev->stats.rx_65_to_127_byte_packets = stats.rx_65_to_127_byte_packets;
-       edev->stats.rx_128_to_255_byte_packets =
-                               stats.rx_128_to_255_byte_packets;
-       edev->stats.rx_256_to_511_byte_packets =
-                               stats.rx_256_to_511_byte_packets;
-       edev->stats.rx_512_to_1023_byte_packets =
-                               stats.rx_512_to_1023_byte_packets;
-       edev->stats.rx_1024_to_1518_byte_packets =
-                               stats.rx_1024_to_1518_byte_packets;
-       edev->stats.rx_1519_to_1522_byte_packets =
-                               stats.rx_1519_to_1522_byte_packets;
-       edev->stats.rx_1519_to_2047_byte_packets =
-                               stats.rx_1519_to_2047_byte_packets;
-       edev->stats.rx_2048_to_4095_byte_packets =
-                               stats.rx_2048_to_4095_byte_packets;
-       edev->stats.rx_4096_to_9216_byte_packets =
-                               stats.rx_4096_to_9216_byte_packets;
-       edev->stats.rx_9217_to_16383_byte_packets =
-                               stats.rx_9217_to_16383_byte_packets;
-       edev->stats.rx_crc_errors = stats.rx_crc_errors;
-       edev->stats.rx_mac_crtl_frames = stats.rx_mac_crtl_frames;
-       edev->stats.rx_pause_frames = stats.rx_pause_frames;
-       edev->stats.rx_pfc_frames = stats.rx_pfc_frames;
-       edev->stats.rx_align_errors = stats.rx_align_errors;
-       edev->stats.rx_carrier_errors = stats.rx_carrier_errors;
-       edev->stats.rx_oversize_packets = stats.rx_oversize_packets;
-       edev->stats.rx_jabbers = stats.rx_jabbers;
-       edev->stats.rx_undersize_packets = stats.rx_undersize_packets;
-       edev->stats.rx_fragments = stats.rx_fragments;
-       edev->stats.tx_64_byte_packets = stats.tx_64_byte_packets;
-       edev->stats.tx_65_to_127_byte_packets = stats.tx_65_to_127_byte_packets;
-       edev->stats.tx_128_to_255_byte_packets =
-                               stats.tx_128_to_255_byte_packets;
-       edev->stats.tx_256_to_511_byte_packets =
-                               stats.tx_256_to_511_byte_packets;
-       edev->stats.tx_512_to_1023_byte_packets =
-                               stats.tx_512_to_1023_byte_packets;
-       edev->stats.tx_1024_to_1518_byte_packets =
-                               stats.tx_1024_to_1518_byte_packets;
-       edev->stats.tx_1519_to_2047_byte_packets =
-                               stats.tx_1519_to_2047_byte_packets;
-       edev->stats.tx_2048_to_4095_byte_packets =
-                               stats.tx_2048_to_4095_byte_packets;
-       edev->stats.tx_4096_to_9216_byte_packets =
-                               stats.tx_4096_to_9216_byte_packets;
-       edev->stats.tx_9217_to_16383_byte_packets =
-                               stats.tx_9217_to_16383_byte_packets;
-       edev->stats.tx_pause_frames = stats.tx_pause_frames;
-       edev->stats.tx_pfc_frames = stats.tx_pfc_frames;
-       edev->stats.tx_lpi_entry_count = stats.tx_lpi_entry_count;
-       edev->stats.tx_total_collisions = stats.tx_total_collisions;
-       edev->stats.brb_truncates = stats.brb_truncates;
-       edev->stats.brb_discards = stats.brb_discards;
-       edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames;
+       p_common->no_buff_discards = stats.common.no_buff_discards;
+       p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
+       p_common->ttl0_discard = stats.common.ttl0_discard;
+       p_common->rx_ucast_bytes = stats.common.rx_ucast_bytes;
+       p_common->rx_mcast_bytes = stats.common.rx_mcast_bytes;
+       p_common->rx_bcast_bytes = stats.common.rx_bcast_bytes;
+       p_common->rx_ucast_pkts = stats.common.rx_ucast_pkts;
+       p_common->rx_mcast_pkts = stats.common.rx_mcast_pkts;
+       p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts;
+       p_common->mftag_filter_discards = stats.common.mftag_filter_discards;
+       p_common->mac_filter_discards = stats.common.mac_filter_discards;
+
+       p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes;
+       p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes;
+       p_common->tx_bcast_bytes = stats.common.tx_bcast_bytes;
+       p_common->tx_ucast_pkts = stats.common.tx_ucast_pkts;
+       p_common->tx_mcast_pkts = stats.common.tx_mcast_pkts;
+       p_common->tx_bcast_pkts = stats.common.tx_bcast_pkts;
+       p_common->tx_err_drop_pkts = stats.common.tx_err_drop_pkts;
+       p_common->coalesced_pkts = stats.common.tpa_coalesced_pkts;
+       p_common->coalesced_events = stats.common.tpa_coalesced_events;
+       p_common->coalesced_aborts_num = stats.common.tpa_aborts_num;
+       p_common->non_coalesced_pkts = stats.common.tpa_not_coalesced_pkts;
+       p_common->coalesced_bytes = stats.common.tpa_coalesced_bytes;
+
+       p_common->rx_64_byte_packets = stats.common.rx_64_byte_packets;
+       p_common->rx_65_to_127_byte_packets =
+           stats.common.rx_65_to_127_byte_packets;
+       p_common->rx_128_to_255_byte_packets =
+           stats.common.rx_128_to_255_byte_packets;
+       p_common->rx_256_to_511_byte_packets =
+           stats.common.rx_256_to_511_byte_packets;
+       p_common->rx_512_to_1023_byte_packets =
+           stats.common.rx_512_to_1023_byte_packets;
+       p_common->rx_1024_to_1518_byte_packets =
+           stats.common.rx_1024_to_1518_byte_packets;
+       p_common->rx_crc_errors = stats.common.rx_crc_errors;
+       p_common->rx_mac_crtl_frames = stats.common.rx_mac_crtl_frames;
+       p_common->rx_pause_frames = stats.common.rx_pause_frames;
+       p_common->rx_pfc_frames = stats.common.rx_pfc_frames;
+       p_common->rx_align_errors = stats.common.rx_align_errors;
+       p_common->rx_carrier_errors = stats.common.rx_carrier_errors;
+       p_common->rx_oversize_packets = stats.common.rx_oversize_packets;
+       p_common->rx_jabbers = stats.common.rx_jabbers;
+       p_common->rx_undersize_packets = stats.common.rx_undersize_packets;
+       p_common->rx_fragments = stats.common.rx_fragments;
+       p_common->tx_64_byte_packets = stats.common.tx_64_byte_packets;
+       p_common->tx_65_to_127_byte_packets =
+           stats.common.tx_65_to_127_byte_packets;
+       p_common->tx_128_to_255_byte_packets =
+           stats.common.tx_128_to_255_byte_packets;
+       p_common->tx_256_to_511_byte_packets =
+           stats.common.tx_256_to_511_byte_packets;
+       p_common->tx_512_to_1023_byte_packets =
+           stats.common.tx_512_to_1023_byte_packets;
+       p_common->tx_1024_to_1518_byte_packets =
+           stats.common.tx_1024_to_1518_byte_packets;
+       p_common->tx_pause_frames = stats.common.tx_pause_frames;
+       p_common->tx_pfc_frames = stats.common.tx_pfc_frames;
+       p_common->brb_truncates = stats.common.brb_truncates;
+       p_common->brb_discards = stats.common.brb_discards;
+       p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames;
+
+       if (QEDE_IS_BB(edev)) {
+               struct qede_stats_bb *p_bb = &edev->stats.bb;
+
+               p_bb->rx_1519_to_1522_byte_packets =
+                   stats.bb.rx_1519_to_1522_byte_packets;
+               p_bb->rx_1519_to_2047_byte_packets =
+                   stats.bb.rx_1519_to_2047_byte_packets;
+               p_bb->rx_2048_to_4095_byte_packets =
+                   stats.bb.rx_2048_to_4095_byte_packets;
+               p_bb->rx_4096_to_9216_byte_packets =
+                   stats.bb.rx_4096_to_9216_byte_packets;
+               p_bb->rx_9217_to_16383_byte_packets =
+                   stats.bb.rx_9217_to_16383_byte_packets;
+               p_bb->tx_1519_to_2047_byte_packets =
+                   stats.bb.tx_1519_to_2047_byte_packets;
+               p_bb->tx_2048_to_4095_byte_packets =
+                   stats.bb.tx_2048_to_4095_byte_packets;
+               p_bb->tx_4096_to_9216_byte_packets =
+                   stats.bb.tx_4096_to_9216_byte_packets;
+               p_bb->tx_9217_to_16383_byte_packets =
+                   stats.bb.tx_9217_to_16383_byte_packets;
+               p_bb->tx_lpi_entry_count = stats.bb.tx_lpi_entry_count;
+               p_bb->tx_total_collisions = stats.bb.tx_total_collisions;
+       } else {
+               struct qede_stats_ah *p_ah = &edev->stats.ah;
+
+               p_ah->rx_1519_to_max_byte_packets =
+                   stats.ah.rx_1519_to_max_byte_packets;
+               p_ah->tx_1519_to_max_byte_packets =
+                   stats.ah.tx_1519_to_max_byte_packets;
+       }
 }
 
-static struct rtnl_link_stats64 *qede_get_stats64(
-                           struct net_device *dev,
-                           struct rtnl_link_stats64 *stats)
+static
+struct rtnl_link_stats64 *qede_get_stats64(struct net_device *dev,
+                                          struct rtnl_link_stats64 *stats)
 {
        struct qede_dev *edev = netdev_priv(dev);
+       struct qede_stats_common *p_common;
 
        qede_fill_by_demand_stats(edev);
+       p_common = &edev->stats.common;
 
-       stats->rx_packets = edev->stats.rx_ucast_pkts +
-                           edev->stats.rx_mcast_pkts +
-                           edev->stats.rx_bcast_pkts;
-       stats->tx_packets = edev->stats.tx_ucast_pkts +
-                           edev->stats.tx_mcast_pkts +
-                           edev->stats.tx_bcast_pkts;
-
-       stats->rx_bytes = edev->stats.rx_ucast_bytes +
-                         edev->stats.rx_mcast_bytes +
-                         edev->stats.rx_bcast_bytes;
+       stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
+               p_common->rx_bcast_pkts;
+       stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
+               p_common->tx_bcast_pkts;
 
-       stats->tx_bytes = edev->stats.tx_ucast_bytes +
-                         edev->stats.tx_mcast_bytes +
-                         edev->stats.tx_bcast_bytes;
+       stats->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
+               p_common->rx_bcast_bytes;
+       stats->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
+               p_common->tx_bcast_bytes;
 
-       stats->tx_errors = edev->stats.tx_err_drop_pkts;
-       stats->multicast = edev->stats.rx_mcast_pkts +
-                          edev->stats.rx_bcast_pkts;
+       stats->tx_errors = p_common->tx_err_drop_pkts;
+       stats->multicast = p_common->rx_mcast_pkts + p_common->rx_bcast_pkts;
 
-       stats->rx_fifo_errors = edev->stats.no_buff_discards;
+       stats->rx_fifo_errors = p_common->no_buff_discards;
 
-       stats->collisions = edev->stats.tx_total_collisions;
-       stats->rx_crc_errors = edev->stats.rx_crc_errors;
-       stats->rx_frame_errors = edev->stats.rx_align_errors;
+       if (QEDE_IS_BB(edev))
+               stats->collisions = edev->stats.bb.tx_total_collisions;
+       stats->rx_crc_errors = p_common->rx_crc_errors;
+       stats->rx_frame_errors = p_common->rx_align_errors;
 
        return stats;
 }
@@ -1867,343 +495,35 @@ static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
 
        return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
 }
-#endif
-
-static void qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
-{
-       struct qed_update_vport_params params;
-       int rc;
-
-       /* Proceed only if action actually needs to be performed */
-       if (edev->accept_any_vlan == action)
-               return;
-
-       memset(&params, 0, sizeof(params));
-
-       params.vport_id = 0;
-       params.accept_any_vlan = action;
-       params.update_accept_any_vlan_flg = 1;
-
-       rc = edev->ops->vport_update(edev->cdev, &params);
-       if (rc) {
-               DP_ERR(edev, "Failed to %s accept-any-vlan\n",
-                      action ? "enable" : "disable");
-       } else {
-               DP_INFO(edev, "%s accept-any-vlan\n",
-                       action ? "enabled" : "disabled");
-               edev->accept_any_vlan = action;
-       }
-}
-
-static int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
-{
-       struct qede_dev *edev = netdev_priv(dev);
-       struct qede_vlan *vlan, *tmp;
-       int rc;
-
-       DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid);
-
-       vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
-       if (!vlan) {
-               DP_INFO(edev, "Failed to allocate struct for vlan\n");
-               return -ENOMEM;
-       }
-       INIT_LIST_HEAD(&vlan->list);
-       vlan->vid = vid;
-       vlan->configured = false;
-
-       /* Verify vlan isn't already configured */
-       list_for_each_entry(tmp, &edev->vlan_list, list) {
-               if (tmp->vid == vlan->vid) {
-                       DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
-                                  "vlan already configured\n");
-                       kfree(vlan);
-                       return -EEXIST;
-               }
-       }
-
-       /* If interface is down, cache this VLAN ID and return */
-       if (edev->state != QEDE_STATE_OPEN) {
-               DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
-                          "Interface is down, VLAN %d will be configured when interface is up\n",
-                          vid);
-               if (vid != 0)
-                       edev->non_configured_vlans++;
-               list_add(&vlan->list, &edev->vlan_list);
-
-               return 0;
-       }
-
-       /* Check for the filter limit.
-        * Note - vlan0 has a reserved filter and can be added without
-        * worrying about quota
-        */
-       if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) ||
-           (vlan->vid == 0)) {
-               rc = qede_set_ucast_rx_vlan(edev,
-                                           QED_FILTER_XCAST_TYPE_ADD,
-                                           vlan->vid);
-               if (rc) {
-                       DP_ERR(edev, "Failed to configure VLAN %d\n",
-                              vlan->vid);
-                       kfree(vlan);
-                       return -EINVAL;
-               }
-               vlan->configured = true;
-
-               /* vlan0 filter isn't consuming out of our quota */
-               if (vlan->vid != 0)
-                       edev->configured_vlans++;
-       } else {
-               /* Out of quota; Activate accept-any-VLAN mode */
-               if (!edev->non_configured_vlans)
-                       qede_config_accept_any_vlan(edev, true);
-
-               edev->non_configured_vlans++;
-       }
-
-       list_add(&vlan->list, &edev->vlan_list);
-
-       return 0;
-}
-
-static void qede_del_vlan_from_list(struct qede_dev *edev,
-                                   struct qede_vlan *vlan)
-{
-       /* vlan0 filter isn't consuming out of our quota */
-       if (vlan->vid != 0) {
-               if (vlan->configured)
-                       edev->configured_vlans--;
-               else
-                       edev->non_configured_vlans--;
-       }
-
-       list_del(&vlan->list);
-       kfree(vlan);
-}
-
-static int qede_configure_vlan_filters(struct qede_dev *edev)
-{
-       int rc = 0, real_rc = 0, accept_any_vlan = 0;
-       struct qed_dev_eth_info *dev_info;
-       struct qede_vlan *vlan = NULL;
-
-       if (list_empty(&edev->vlan_list))
-               return 0;
-
-       dev_info = &edev->dev_info;
-
-       /* Configure non-configured vlans */
-       list_for_each_entry(vlan, &edev->vlan_list, list) {
-               if (vlan->configured)
-                       continue;
-
-               /* We have used all our credits, now enable accept_any_vlan */
-               if ((vlan->vid != 0) &&
-                   (edev->configured_vlans == dev_info->num_vlan_filters)) {
-                       accept_any_vlan = 1;
-                       continue;
-               }
-
-               DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid);
-
-               rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD,
-                                           vlan->vid);
-               if (rc) {
-                       DP_ERR(edev, "Failed to configure VLAN %u\n",
-                              vlan->vid);
-                       real_rc = rc;
-                       continue;
-               }
-
-               vlan->configured = true;
-               /* vlan0 filter doesn't consume our VLAN filter's quota */
-               if (vlan->vid != 0) {
-                       edev->non_configured_vlans--;
-                       edev->configured_vlans++;
-               }
-       }
-
-       /* enable accept_any_vlan mode if we have more VLANs than credits,
-        * or remove accept_any_vlan mode if we've actually removed
-        * a non-configured vlan, and all remaining vlans are truly configured.
-        */
-
-       if (accept_any_vlan)
-               qede_config_accept_any_vlan(edev, true);
-       else if (!edev->non_configured_vlans)
-               qede_config_accept_any_vlan(edev, false);
-
-       return real_rc;
-}
-
-static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
-{
-       struct qede_dev *edev = netdev_priv(dev);
-       struct qede_vlan *vlan = NULL;
-       int rc;
-
-       DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
-
-       /* Find whether entry exists */
-       list_for_each_entry(vlan, &edev->vlan_list, list)
-               if (vlan->vid == vid)
-                       break;
-
-       if (!vlan || (vlan->vid != vid)) {
-               DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
-                          "Vlan isn't configured\n");
-               return 0;
-       }
-
-       if (edev->state != QEDE_STATE_OPEN) {
-               /* As interface is already down, we don't have a VPORT
-                * instance to remove vlan filter. So just update vlan list
-                */
-               DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
-                          "Interface is down, removing VLAN from list only\n");
-               qede_del_vlan_from_list(edev, vlan);
-               return 0;
-       }
-
-       /* Remove vlan */
-       if (vlan->configured) {
-               rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL,
-                                           vid);
-               if (rc) {
-                       DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
-                       return -EINVAL;
-               }
-       }
-
-       qede_del_vlan_from_list(edev, vlan);
-
-       /* We have removed a VLAN - try to see if we can
-        * configure non-configured VLAN from the list.
-        */
-       rc = qede_configure_vlan_filters(edev);
-
-       return rc;
-}
-
-static void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
-{
-       struct qede_vlan *vlan = NULL;
-
-       if (list_empty(&edev->vlan_list))
-               return;
-
-       list_for_each_entry(vlan, &edev->vlan_list, list) {
-               if (!vlan->configured)
-                       continue;
-
-               vlan->configured = false;
-
-               /* vlan0 filter isn't consuming out of our quota */
-               if (vlan->vid != 0) {
-                       edev->non_configured_vlans++;
-                       edev->configured_vlans--;
-               }
-
-               DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
-                          "marked vlan %d as non-configured\n",
-                          vlan->vid);
-       }
-
-       edev->accept_any_vlan = false;
-}
-
-int qede_set_features(struct net_device *dev, netdev_features_t features)
-{
-       struct qede_dev *edev = netdev_priv(dev);
-       netdev_features_t changes = features ^ dev->features;
-       bool need_reload = false;
-
-       /* No action needed if hardware GRO is disabled during driver load */
-       if (changes & NETIF_F_GRO) {
-               if (dev->features & NETIF_F_GRO)
-                       need_reload = !edev->gro_disable;
-               else
-                       need_reload = edev->gro_disable;
-       }
-
-       if (need_reload && netif_running(edev->ndev)) {
-               dev->features = features;
-               qede_reload(edev, NULL, NULL);
-               return 1;
-       }
-
-       return 0;
-}
 
-static void qede_udp_tunnel_add(struct net_device *dev,
-                               struct udp_tunnel_info *ti)
+static int qede_set_vf_trust(struct net_device *dev, int vfidx, bool setting)
 {
        struct qede_dev *edev = netdev_priv(dev);
-       u16 t_port = ntohs(ti->port);
-
-       switch (ti->type) {
-       case UDP_TUNNEL_TYPE_VXLAN:
-               if (edev->vxlan_dst_port)
-                       return;
-
-               edev->vxlan_dst_port = t_port;
 
-               DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d",
-                          t_port);
-
-               set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
-               break;
-       case UDP_TUNNEL_TYPE_GENEVE:
-               if (edev->geneve_dst_port)
-                       return;
-
-               edev->geneve_dst_port = t_port;
-
-               DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d",
-                          t_port);
-               set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
-               break;
-       default:
-               return;
-       }
+       if (!edev->ops)
+               return -EINVAL;
 
-       schedule_delayed_work(&edev->sp_task, 0);
+       return edev->ops->iov->set_trust(edev->cdev, vfidx, setting);
 }
+#endif
 
-static void qede_udp_tunnel_del(struct net_device *dev,
-                               struct udp_tunnel_info *ti)
+static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
        struct qede_dev *edev = netdev_priv(dev);
-       u16 t_port = ntohs(ti->port);
-
-       switch (ti->type) {
-       case UDP_TUNNEL_TYPE_VXLAN:
-               if (t_port != edev->vxlan_dst_port)
-                       return;
-
-               edev->vxlan_dst_port = 0;
-
-               DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d",
-                          t_port);
-
-               set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
-               break;
-       case UDP_TUNNEL_TYPE_GENEVE:
-               if (t_port != edev->geneve_dst_port)
-                       return;
 
-               edev->geneve_dst_port = 0;
+       if (!netif_running(dev))
+               return -EAGAIN;
 
-               DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d",
-                          t_port);
-               set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
-               break;
+       switch (cmd) {
+       case SIOCSHWTSTAMP:
+               return qede_ptp_hw_ts(edev, ifr);
        default:
-               return;
+               DP_VERBOSE(edev, QED_MSG_DEBUG,
+                          "default IOCTL cmd 0x%x\n", cmd);
+               return -EOPNOTSUPP;
        }
 
-       schedule_delayed_work(&edev->sp_task, 0);
+       return 0;
 }
 
 static const struct net_device_ops qede_netdev_ops = {
@@ -2214,9 +534,11 @@ static const struct net_device_ops qede_netdev_ops = {
        .ndo_set_mac_address = qede_set_mac_addr,
        .ndo_validate_addr = eth_validate_addr,
        .ndo_change_mtu = qede_change_mtu,
+       .ndo_do_ioctl = qede_ioctl,
 #ifdef CONFIG_QED_SRIOV
        .ndo_set_vf_mac = qede_set_vf_mac,
        .ndo_set_vf_vlan = qede_set_vf_vlan,
+       .ndo_set_vf_trust = qede_set_vf_trust,
 #endif
        .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
@@ -2230,6 +552,11 @@ static const struct net_device_ops qede_netdev_ops = {
 #endif
        .ndo_udp_tunnel_add = qede_udp_tunnel_add,
        .ndo_udp_tunnel_del = qede_udp_tunnel_del,
+       .ndo_features_check = qede_features_check,
+       .ndo_xdp = qede_xdp,
+#ifdef CONFIG_RFS_ACCEL
+       .ndo_rx_flow_steer = qede_rx_flow_steer,
+#endif
 };
 
 /* -------------------------------------------------------------------------
@@ -2240,15 +567,13 @@ static const struct net_device_ops qede_netdev_ops = {
 static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
                                            struct pci_dev *pdev,
                                            struct qed_dev_eth_info *info,
-                                           u32 dp_module,
-                                           u8 dp_level)
+                                           u32 dp_module, u8 dp_level)
 {
        struct net_device *ndev;
        struct qede_dev *edev;
 
        ndev = alloc_etherdev_mqs(sizeof(*edev),
-                                 info->num_queues,
-                                 info->num_queues);
+                                 info->num_queues, info->num_queues);
        if (!ndev) {
                pr_err("etherdev allocation failed\n");
                return NULL;
@@ -2264,13 +589,14 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
        edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
        edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
 
+       DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n",
+               info->num_queues, info->num_queues);
+
        SET_NETDEV_DEV(ndev, &pdev->dev);
 
        memset(&edev->stats, 0, sizeof(edev->stats));
        memcpy(&edev->dev_info, info, sizeof(*info));
 
-       edev->num_tc = edev->dev_info.num_tc;
-
        INIT_LIST_HEAD(&edev->vlan_list);
 
        return edev;
@@ -2280,7 +606,7 @@ static void qede_init_ndev(struct qede_dev *edev)
 {
        struct net_device *ndev = edev->ndev;
        struct pci_dev *pdev = edev->pdev;
-       u32 hw_features;
+       netdev_features_t hw_features;
 
        pci_set_drvdata(pdev, ndev);
 
@@ -2295,6 +621,8 @@ static void qede_init_ndev(struct qede_dev *edev)
 
        qede_set_ethtool_ops(ndev);
 
+       ndev->priv_flags |= IFF_UNICAST_FLT;
+
        /* user-changeble features */
        hw_features = NETIF_F_GRO | NETIF_F_SG |
                      NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -2302,11 +630,18 @@ static void qede_init_ndev(struct qede_dev *edev)
 
        /* Encap features*/
        hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
-                      NETIF_F_TSO_ECN;
+                      NETIF_F_TSO_ECN | NETIF_F_GSO_UDP_TUNNEL_CSUM |
+                      NETIF_F_GSO_GRE_CSUM;
+
+       if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1)
+               hw_features |= NETIF_F_NTUPLE;
+
        ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                                NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN |
                                NETIF_F_TSO6 | NETIF_F_GSO_GRE |
-                               NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM;
+                               NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM |
+                               NETIF_F_GSO_UDP_TUNNEL_CSUM |
+                               NETIF_F_GSO_GRE_CSUM;
 
        ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
                              NETIF_F_HIGHDMA;
@@ -2318,6 +653,8 @@ static void qede_init_ndev(struct qede_dev *edev)
 
        /* Set network device HW mac */
        ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac);
+
+       ndev->mtu = edev->dev_info.common.mtu;
 }
 
 /* This function converts from 32b param to two params of level and module
@@ -2352,49 +689,79 @@ static void qede_free_fp_array(struct qede_dev *edev)
                struct qede_fastpath *fp;
                int i;
 
-               for_each_rss(i) {
+               for_each_queue(i) {
                        fp = &edev->fp_array[i];
 
                        kfree(fp->sb_info);
                        kfree(fp->rxq);
-                       kfree(fp->txqs);
+                       kfree(fp->xdp_tx);
+                       kfree(fp->txq);
                }
                kfree(edev->fp_array);
        }
-       edev->num_rss = 0;
+
+       edev->num_queues = 0;
+       edev->fp_num_tx = 0;
+       edev->fp_num_rx = 0;
 }
 
 static int qede_alloc_fp_array(struct qede_dev *edev)
 {
+       u8 fp_combined, fp_rx = edev->fp_num_rx;
        struct qede_fastpath *fp;
        int i;
 
-       edev->fp_array = kcalloc(QEDE_RSS_CNT(edev),
+       edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev),
                                 sizeof(*edev->fp_array), GFP_KERNEL);
        if (!edev->fp_array) {
                DP_NOTICE(edev, "fp array allocation failed\n");
                goto err;
        }
 
-       for_each_rss(i) {
+       fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx;
+
+       /* Allocate the FP elements for Rx queues followed by combined and then
+        * the Tx. This ordering should be maintained so that the respective
+        * queues (Rx or Tx) will be together in the fastpath array and the
+        * associated ids will be sequential.
+        */
+       for_each_queue(i) {
                fp = &edev->fp_array[i];
 
-               fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL);
+               fp->sb_info = kzalloc(sizeof(*fp->sb_info), GFP_KERNEL);
                if (!fp->sb_info) {
                        DP_NOTICE(edev, "sb info struct allocation failed\n");
                        goto err;
                }
 
-               fp->rxq = kcalloc(1, sizeof(*fp->rxq), GFP_KERNEL);
-               if (!fp->rxq) {
-                       DP_NOTICE(edev, "RXQ struct allocation failed\n");
-                       goto err;
+               if (fp_rx) {
+                       fp->type = QEDE_FASTPATH_RX;
+                       fp_rx--;
+               } else if (fp_combined) {
+                       fp->type = QEDE_FASTPATH_COMBINED;
+                       fp_combined--;
+               } else {
+                       fp->type = QEDE_FASTPATH_TX;
                }
 
-               fp->txqs = kcalloc(edev->num_tc, sizeof(*fp->txqs), GFP_KERNEL);
-               if (!fp->txqs) {
-                       DP_NOTICE(edev, "TXQ array allocation failed\n");
-                       goto err;
+               if (fp->type & QEDE_FASTPATH_TX) {
+                       fp->txq = kzalloc(sizeof(*fp->txq), GFP_KERNEL);
+                       if (!fp->txq)
+                               goto err;
+               }
+
+               if (fp->type & QEDE_FASTPATH_RX) {
+                       fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL);
+                       if (!fp->rxq)
+                               goto err;
+
+                       if (edev->xdp_prog) {
+                               fp->xdp_tx = kzalloc(sizeof(*fp->xdp_tx),
+                                                    GFP_KERNEL);
+                               if (!fp->xdp_tx)
+                                       goto err;
+                               fp->type |= QEDE_FASTPATH_XDP;
+                       }
                }
        }
 
@@ -2410,12 +777,11 @@ static void qede_sp_task(struct work_struct *work)
                                             sp_task.work);
        struct qed_dev *cdev = edev->cdev;
 
-       mutex_lock(&edev->qede_lock);
+       __qede_lock(edev);
 
-       if (edev->state == QEDE_STATE_OPEN) {
-               if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
+       if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
+               if (edev->state == QEDE_STATE_OPEN)
                        qede_config_rx_mode(edev->ndev);
-       }
 
        if (test_and_clear_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags)) {
                struct qed_tunn_params tunn_params;
@@ -2435,16 +801,25 @@ static void qede_sp_task(struct work_struct *work)
                qed_ops->tunn_config(cdev, &tunn_params);
        }
 
-       mutex_unlock(&edev->qede_lock);
+#ifdef CONFIG_RFS_ACCEL
+       if (test_and_clear_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags)) {
+               if (edev->state == QEDE_STATE_OPEN)
+                       qede_process_arfs_filters(edev, false);
+       }
+#endif
+       __qede_unlock(edev);
 }
 
 static void qede_update_pf_params(struct qed_dev *cdev)
 {
        struct qed_pf_params pf_params;
 
-       /* 64 rx + 64 tx */
+       /* 64 rx + 64 tx + 64 XDP */
        memset(&pf_params, 0, sizeof(struct qed_pf_params));
-       pf_params.eth_pf_params.num_cons = 128;
+       pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * 3;
+#ifdef CONFIG_RFS_ACCEL
+       pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
+#endif
        qed_ops->common->update_pf_params(cdev, &pf_params);
 }
 
@@ -2456,7 +831,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
                        bool is_vf, enum qede_probe_mode mode)
 {
        struct qed_probe_params probe_params;
-       struct qed_slowpath_params params;
+       struct qed_slowpath_params sp_params;
        struct qed_dev_eth_info dev_info;
        struct qede_dev *edev;
        struct qed_dev *cdev;
@@ -2479,14 +854,14 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
        qede_update_pf_params(cdev);
 
        /* Start the Slowpath-process */
-       memset(&params, 0, sizeof(struct qed_slowpath_params));
-       params.int_mode = QED_INT_MODE_MSIX;
-       params.drv_major = QEDE_MAJOR_VERSION;
-       params.drv_minor = QEDE_MINOR_VERSION;
-       params.drv_rev = QEDE_REVISION_VERSION;
-       params.drv_eng = QEDE_ENGINEERING_VERSION;
-       strlcpy(params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
-       rc = qed_ops->common->slowpath_start(cdev, &params);
+       memset(&sp_params, 0, sizeof(sp_params));
+       sp_params.int_mode = QED_INT_MODE_MSIX;
+       sp_params.drv_major = QEDE_MAJOR_VERSION;
+       sp_params.drv_minor = QEDE_MINOR_VERSION;
+       sp_params.drv_rev = QEDE_REVISION_VERSION;
+       sp_params.drv_eng = QEDE_ENGINEERING_VERSION;
+       strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
+       rc = qed_ops->common->slowpath_start(cdev, &sp_params);
        if (rc) {
                pr_notice("Cannot start slowpath\n");
                goto err1;
@@ -2509,14 +884,34 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
 
        qede_init_ndev(edev);
 
+       rc = qede_roce_dev_add(edev);
+       if (rc)
+               goto err3;
+
+       /* Prepare the lock prior to the registeration of the netdev,
+        * as once it's registered we might reach flows requiring it
+        * [it's even possible to reach a flow needing it directly
+        * from there, although it's unlikely].
+        */
+       INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
+       mutex_init(&edev->qede_lock);
        rc = register_netdev(edev->ndev);
        if (rc) {
                DP_NOTICE(edev, "Cannot register net-device\n");
-               goto err3;
+               goto err4;
        }
 
        edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION);
 
+       /* PTP not supported on VFs */
+       if (!is_vf) {
+               rc = qede_ptp_register_phc(edev);
+               if (rc) {
+                       DP_NOTICE(edev, "Cannot register PHC\n");
+                       goto err5;
+               }
+       }
+
        edev->ops->register_ops(cdev, &qede_ll_ops, edev);
 
 #ifdef CONFIG_DCB
@@ -2524,14 +919,16 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
                qede_set_dcbnl_ops(edev->ndev);
 #endif
 
-       INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
-       mutex_init(&edev->qede_lock);
        edev->rx_copybreak = QEDE_RX_HDR_SIZE;
 
        DP_INFO(edev, "Ending successfully qede probe\n");
 
        return 0;
 
+err5:
+       unregister_netdev(edev->ndev);
+err4:
+       qede_roce_dev_remove(edev);
 err3:
        free_netdev(edev->ndev);
 err2:
@@ -2577,20 +974,36 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
 
        DP_INFO(edev, "Starting qede_remove\n");
 
-       cancel_delayed_work_sync(&edev->sp_task);
        unregister_netdev(ndev);
+       cancel_delayed_work_sync(&edev->sp_task);
+
+       qede_ptp_remove(edev);
+
+       qede_roce_dev_remove(edev);
 
        edev->ops->common->set_power_state(cdev, PCI_D0);
 
        pci_set_drvdata(pdev, NULL);
 
-       free_netdev(ndev);
+       /* Release edev's reference to XDP's bpf if such exist */
+       if (edev->xdp_prog)
+               bpf_prog_put(edev->xdp_prog);
 
        /* Use global ops since we've freed edev */
        qed_ops->common->slowpath_stop(cdev);
+       if (system_state == SYSTEM_POWER_OFF)
+               return;
        qed_ops->common->remove(cdev);
 
-       pr_notice("Ending successfully qede_remove\n");
+       /* Since this can happen out-of-sync with other flows,
+        * don't release the netdevice until after slowpath stop
+        * has been called to guarantee various other contexts
+        * [e.g., QED register callbacks] won't break anything when
+        * accessing the netdevice.
+        */
+        free_netdev(ndev);
+
+       dev_info(&pdev->dev, "Ending qede_remove successfully\n");
 }
 
 static void qede_remove(struct pci_dev *pdev)
@@ -2598,6 +1011,11 @@ static void qede_remove(struct pci_dev *pdev)
        __qede_remove(pdev, QEDE_REMOVE_NORMAL);
 }
 
+static void qede_shutdown(struct pci_dev *pdev)
+{
+       __qede_remove(pdev, QEDE_REMOVE_NORMAL);
+}
+
 /* -------------------------------------------------------------------------
  * START OF LOAD / UNLOAD
  * -------------------------------------------------------------------------
@@ -2609,8 +1027,8 @@ static int qede_set_num_queues(struct qede_dev *edev)
        u16 rss_num;
 
        /* Setup queues according to possible resources*/
-       if (edev->req_rss)
-               rss_num = edev->req_rss;
+       if (edev->req_queues)
+               rss_num = edev->req_queues;
        else
                rss_num = netif_get_num_default_rss_queues() *
                          edev->dev_info.common.num_hwfns;
@@ -2620,11 +1038,15 @@ static int qede_set_num_queues(struct qede_dev *edev)
        rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
        if (rc > 0) {
                /* Managed to request interrupts for our queues */
-               edev->num_rss = rc;
+               edev->num_queues = rc;
                DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
-                       QEDE_RSS_CNT(edev), rss_num);
+                       QEDE_QUEUE_CNT(edev), rss_num);
                rc = 0;
        }
+
+       edev->fp_num_tx = edev->req_num_tx;
+       edev->fp_num_rx = edev->req_num_rx;
+
        return rc;
 }
 
@@ -2638,16 +1060,14 @@ static void qede_free_mem_sb(struct qede_dev *edev,
 
 /* This function allocates fast-path status block memory */
 static int qede_alloc_mem_sb(struct qede_dev *edev,
-                            struct qed_sb_info *sb_info,
-                            u16 sb_id)
+                            struct qed_sb_info *sb_info, u16 sb_id)
 {
        struct status_block *sb_virt;
        dma_addr_t sb_phys;
        int rc;
 
        sb_virt = dma_alloc_coherent(&edev->pdev->dev,
-                                    sizeof(*sb_virt),
-                                    &sb_phys, GFP_KERNEL);
+                                    sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
        if (!sb_virt) {
                DP_ERR(edev, "Status block allocation failed\n");
                return -ENOMEM;
@@ -2679,16 +1099,15 @@ static void qede_free_rx_buffers(struct qede_dev *edev,
                data = rx_buf->data;
 
                dma_unmap_page(&edev->pdev->dev,
-                              rx_buf->mapping,
-                              PAGE_SIZE, DMA_FROM_DEVICE);
+                              rx_buf->mapping, PAGE_SIZE, rxq->data_direction);
 
                rx_buf->data = NULL;
                __free_page(data);
        }
 }
 
-static void qede_free_sge_mem(struct qede_dev *edev,
-                             struct qede_rx_queue *rxq) {
+static void qede_free_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
+{
        int i;
 
        if (edev->gro_disable)
@@ -2696,7 +1115,7 @@ static void qede_free_sge_mem(struct qede_dev *edev,
 
        for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
                struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
-               struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
+               struct sw_rx_data *replace_buf = &tpa_info->buffer;
 
                if (replace_buf->data) {
                        dma_unmap_page(&edev->pdev->dev,
@@ -2707,72 +1126,30 @@ static void qede_free_sge_mem(struct qede_dev *edev,
        }
 }
 
-static void qede_free_mem_rxq(struct qede_dev *edev,
-                             struct qede_rx_queue *rxq)
-{
-       qede_free_sge_mem(edev, rxq);
-
-       /* Free rx buffers */
-       qede_free_rx_buffers(edev, rxq);
-
-       /* Free the parallel SW ring */
-       kfree(rxq->sw_rx_ring);
-
-       /* Free the real RQ ring used by FW */
-       edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
-       edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
-}
-
-static int qede_alloc_rx_buffer(struct qede_dev *edev,
-                               struct qede_rx_queue *rxq)
+static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
 {
-       struct sw_rx_data *sw_rx_data;
-       struct eth_rx_bd *rx_bd;
-       dma_addr_t mapping;
-       struct page *data;
-       u16 rx_buf_size;
-
-       rx_buf_size = rxq->rx_buf_size;
-
-       data = alloc_pages(GFP_ATOMIC, 0);
-       if (unlikely(!data)) {
-               DP_NOTICE(edev, "Failed to allocate Rx data [page]\n");
-               return -ENOMEM;
-       }
-
-       /* Map the entire page as it would be used
-        * for multiple RX buffer segment size mapping.
-        */
-       mapping = dma_map_page(&edev->pdev->dev, data, 0,
-                              PAGE_SIZE, DMA_FROM_DEVICE);
-       if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
-               __free_page(data);
-               DP_NOTICE(edev, "Failed to map Rx buffer\n");
-               return -ENOMEM;
-       }
-
-       sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
-       sw_rx_data->page_offset = 0;
-       sw_rx_data->data = data;
-       sw_rx_data->mapping = mapping;
+       qede_free_sge_mem(edev, rxq);
 
-       /* Advance PROD and get BD pointer */
-       rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
-       WARN_ON(!rx_bd);
-       rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
-       rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping));
+       /* Free rx buffers */
+       qede_free_rx_buffers(edev, rxq);
 
-       rxq->sw_rx_prod++;
+       /* Free the parallel SW ring */
+       kfree(rxq->sw_rx_ring);
 
-       return 0;
+       /* Free the real RQ ring used by FW */
+       edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
+       edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
 }
 
-static int qede_alloc_sge_mem(struct qede_dev *edev,
-                             struct qede_rx_queue *rxq)
+static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
 {
        dma_addr_t mapping;
        int i;
 
+       /* Don't perform FW aggregations in case of XDP */
+       if (edev->xdp_prog)
+               edev->gro_disable = 1;
+
        if (edev->gro_disable)
                return 0;
 
@@ -2783,7 +1160,7 @@ static int qede_alloc_sge_mem(struct qede_dev *edev,
 
        for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
                struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
-               struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
+               struct sw_rx_data *replace_buf = &tpa_info->buffer;
 
                replace_buf->data = alloc_pages(GFP_ATOMIC, 0);
                if (unlikely(!replace_buf->data)) {
@@ -2793,7 +1170,7 @@ static int qede_alloc_sge_mem(struct qede_dev *edev,
                }
 
                mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0,
-                                      rxq->rx_buf_size, DMA_FROM_DEVICE);
+                                      PAGE_SIZE, DMA_FROM_DEVICE);
                if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
                        DP_NOTICE(edev,
                                  "Failed to map TPA replacement buffer\n");
@@ -2801,10 +1178,9 @@ static int qede_alloc_sge_mem(struct qede_dev *edev,
                }
 
                replace_buf->mapping = mapping;
-               tpa_info->replace_buf.page_offset = 0;
-
-               tpa_info->replace_buf_mapping = mapping;
-               tpa_info->agg_state = QEDE_AGG_STATE_NONE;
+               tpa_info->buffer.page_offset = 0;
+               tpa_info->buffer_mapping = mapping;
+               tpa_info->state = QEDE_AGG_STATE_NONE;
        }
 
        return 0;
@@ -2815,20 +1191,26 @@ err:
 }
 
 /* This function allocates all memory needed per Rx queue */
-static int qede_alloc_mem_rxq(struct qede_dev *edev,
-                             struct qede_rx_queue *rxq)
+static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
 {
        int i, rc, size;
 
        rxq->num_rx_buffers = edev->q_num_rx_buffers;
 
-       rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD +
-                          edev->ndev->mtu;
-       if (rxq->rx_buf_size > PAGE_SIZE)
-               rxq->rx_buf_size = PAGE_SIZE;
+       rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
+       rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : 0;
+
+       /* Make sure that the headroom and  payload fit in a single page */
+       if (rxq->rx_buf_size + rxq->rx_headroom > PAGE_SIZE)
+               rxq->rx_buf_size = PAGE_SIZE - rxq->rx_headroom;
 
-       /* Segment size to spilt a page in multiple equal parts */
-       rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size);
+       /* Segment size to spilt a page in multiple equal parts,
+        * unless XDP is used in which case we'd use the entire page.
+        */
+       if (!edev->xdp_prog)
+               rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size);
+       else
+               rxq->rx_buf_seg_size = PAGE_SIZE;
 
        /* Allocate the parallel driver ring for Rx buffers */
        size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
@@ -2863,8 +1245,9 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
                goto err;
 
        /* Allocate buffers for the Rx ring */
+       rxq->filled_buffers = 0;
        for (i = 0; i < rxq->num_rx_buffers; i++) {
-               rc = qede_alloc_rx_buffer(edev, rxq);
+               rc = qede_alloc_rx_buffer(rxq, false);
                if (rc) {
                        DP_ERR(edev,
                               "Rx buffers allocation failed at index %d\n", i);
@@ -2877,38 +1260,44 @@ err:
        return rc;
 }
 
-static void qede_free_mem_txq(struct qede_dev *edev,
-                             struct qede_tx_queue *txq)
+static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
 {
        /* Free the parallel SW ring */
-       kfree(txq->sw_tx_ring);
+       if (txq->is_xdp)
+               kfree(txq->sw_tx_ring.xdp);
+       else
+               kfree(txq->sw_tx_ring.skbs);
 
        /* Free the real RQ ring used by FW */
        edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
 }
 
 /* This function allocates all memory needed per Tx queue */
-static int qede_alloc_mem_txq(struct qede_dev *edev,
-                             struct qede_tx_queue *txq)
+static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
 {
-       int size, rc;
        union eth_tx_bd_types *p_virt;
+       int size, rc;
 
        txq->num_tx_buffers = edev->q_num_tx_buffers;
 
        /* Allocate the parallel driver ring for Tx buffers */
-       size = sizeof(*txq->sw_tx_ring) * NUM_TX_BDS_MAX;
-       txq->sw_tx_ring = kzalloc(size, GFP_KERNEL);
-       if (!txq->sw_tx_ring) {
-               DP_NOTICE(edev, "Tx buffers ring allocation failed\n");
-               goto err;
+       if (txq->is_xdp) {
+               size = sizeof(*txq->sw_tx_ring.xdp) * TX_RING_SIZE;
+               txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL);
+               if (!txq->sw_tx_ring.xdp)
+                       goto err;
+       } else {
+               size = sizeof(*txq->sw_tx_ring.skbs) * TX_RING_SIZE;
+               txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL);
+               if (!txq->sw_tx_ring.skbs)
+                       goto err;
        }
 
        rc = edev->ops->common->chain_alloc(edev->cdev,
                                            QED_CHAIN_USE_TO_CONSUME_PRODUCE,
                                            QED_CHAIN_MODE_PBL,
                                            QED_CHAIN_CNT_TYPE_U16,
-                                           NUM_TX_BDS_MAX,
+                                           TX_RING_SIZE,
                                            sizeof(*p_virt), &txq->tx_pbl);
        if (rc)
                goto err;
@@ -2921,43 +1310,47 @@ err:
 }
 
 /* This function frees all memory of a single fp */
-static void qede_free_mem_fp(struct qede_dev *edev,
-                            struct qede_fastpath *fp)
+static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
 {
-       int tc;
-
        qede_free_mem_sb(edev, fp->sb_info);
 
-       qede_free_mem_rxq(edev, fp->rxq);
+       if (fp->type & QEDE_FASTPATH_RX)
+               qede_free_mem_rxq(edev, fp->rxq);
 
-       for (tc = 0; tc < edev->num_tc; tc++)
-               qede_free_mem_txq(edev, &fp->txqs[tc]);
+       if (fp->type & QEDE_FASTPATH_TX)
+               qede_free_mem_txq(edev, fp->txq);
 }
 
 /* This function allocates all memory needed for a single fp (i.e. an entity
- * which contains status block, one rx queue and multiple per-TC tx queues.
+ * which contains status block, one rx queue and/or multiple per-TC tx queues.
  */
-static int qede_alloc_mem_fp(struct qede_dev *edev,
-                            struct qede_fastpath *fp)
+static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
 {
-       int rc, tc;
+       int rc = 0;
 
-       rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->rss_id);
+       rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id);
        if (rc)
-               goto err;
+               goto out;
 
-       rc = qede_alloc_mem_rxq(edev, fp->rxq);
-       if (rc)
-               goto err;
+       if (fp->type & QEDE_FASTPATH_RX) {
+               rc = qede_alloc_mem_rxq(edev, fp->rxq);
+               if (rc)
+                       goto out;
+       }
 
-       for (tc = 0; tc < edev->num_tc; tc++) {
-               rc = qede_alloc_mem_txq(edev, &fp->txqs[tc]);
+       if (fp->type & QEDE_FASTPATH_XDP) {
+               rc = qede_alloc_mem_txq(edev, fp->xdp_tx);
                if (rc)
-                       goto err;
+                       goto out;
        }
 
-       return 0;
-err:
+       if (fp->type & QEDE_FASTPATH_TX) {
+               rc = qede_alloc_mem_txq(edev, fp->txq);
+               if (rc)
+                       goto out;
+       }
+
+out:
        return rc;
 }
 
@@ -2965,7 +1358,7 @@ static void qede_free_mem_load(struct qede_dev *edev)
 {
        int i;
 
-       for_each_rss(i) {
+       for_each_queue(i) {
                struct qede_fastpath *fp = &edev->fp_array[i];
 
                qede_free_mem_fp(edev, fp);
@@ -2975,16 +1368,16 @@ static void qede_free_mem_load(struct qede_dev *edev)
 /* This function allocates all qede memory at NIC load. */
 static int qede_alloc_mem_load(struct qede_dev *edev)
 {
-       int rc = 0, rss_id;
+       int rc = 0, queue_id;
 
-       for (rss_id = 0; rss_id < QEDE_RSS_CNT(edev); rss_id++) {
-               struct qede_fastpath *fp = &edev->fp_array[rss_id];
+       for (queue_id = 0; queue_id < QEDE_QUEUE_CNT(edev); queue_id++) {
+               struct qede_fastpath *fp = &edev->fp_array[queue_id];
 
                rc = qede_alloc_mem_fp(edev, fp);
                if (rc) {
                        DP_ERR(edev,
                               "Failed to allocate memory for fastpath - rss id = %d\n",
-                              rss_id);
+                              queue_id);
                        qede_free_mem_load(edev);
                        return rc;
                }
@@ -2996,30 +1389,41 @@ static int qede_alloc_mem_load(struct qede_dev *edev)
 /* This function inits fp content and resets the SB, RXQ and TXQ structures */
 static void qede_init_fp(struct qede_dev *edev)
 {
-       int rss_id, txq_index, tc;
+       int queue_id, rxq_index = 0, txq_index = 0;
        struct qede_fastpath *fp;
 
-       for_each_rss(rss_id) {
-               fp = &edev->fp_array[rss_id];
+       for_each_queue(queue_id) {
+               fp = &edev->fp_array[queue_id];
 
                fp->edev = edev;
-               fp->rss_id = rss_id;
+               fp->id = queue_id;
 
-               memset((void *)&fp->napi, 0, sizeof(fp->napi));
+               if (fp->type & QEDE_FASTPATH_XDP) {
+                       fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev,
+                                                               rxq_index);
+                       fp->xdp_tx->is_xdp = 1;
+               }
 
-               memset((void *)fp->sb_info, 0, sizeof(*fp->sb_info));
+               if (fp->type & QEDE_FASTPATH_RX) {
+                       fp->rxq->rxq_id = rxq_index++;
 
-               memset((void *)fp->rxq, 0, sizeof(*fp->rxq));
-               fp->rxq->rxq_id = rss_id;
+                       /* Determine how to map buffers for this queue */
+                       if (fp->type & QEDE_FASTPATH_XDP)
+                               fp->rxq->data_direction = DMA_BIDIRECTIONAL;
+                       else
+                               fp->rxq->data_direction = DMA_FROM_DEVICE;
+                       fp->rxq->dev = &edev->pdev->dev;
+               }
 
-               memset((void *)fp->txqs, 0, (edev->num_tc * sizeof(*fp->txqs)));
-               for (tc = 0; tc < edev->num_tc; tc++) {
-                       txq_index = tc * QEDE_RSS_CNT(edev) + rss_id;
-                       fp->txqs[tc].index = txq_index;
+               if (fp->type & QEDE_FASTPATH_TX) {
+                       fp->txq->index = txq_index++;
+                       if (edev->dev_info.is_legacy)
+                               fp->txq->is_legacy = 1;
+                       fp->txq->dev = &edev->pdev->dev;
                }
 
                snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
-                        edev->ndev->name, rss_id);
+                        edev->ndev->name, queue_id);
        }
 
        edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO);
@@ -3029,12 +1433,13 @@ static int qede_set_real_num_queues(struct qede_dev *edev)
 {
        int rc = 0;
 
-       rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_CNT(edev));
+       rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_COUNT(edev));
        if (rc) {
                DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
                return rc;
        }
-       rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_CNT(edev));
+
+       rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_COUNT(edev));
        if (rc) {
                DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
                return rc;
@@ -3047,7 +1452,7 @@ static void qede_napi_disable_remove(struct qede_dev *edev)
 {
        int i;
 
-       for_each_rss(i) {
+       for_each_queue(i) {
                napi_disable(&edev->fp_array[i].napi);
 
                netif_napi_del(&edev->fp_array[i].napi);
@@ -3059,7 +1464,7 @@ static void qede_napi_add_enable(struct qede_dev *edev)
        int i;
 
        /* Add NAPI objects */
-       for_each_rss(i) {
+       for_each_queue(i) {
                netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
                               qede_poll, NAPI_POLL_WEIGHT);
                napi_enable(&edev->fp_array[i].napi);
@@ -3088,14 +1493,26 @@ static int qede_req_msix_irqs(struct qede_dev *edev)
        int i, rc;
 
        /* Sanitize number of interrupts == number of prepared RSS queues */
-       if (QEDE_RSS_CNT(edev) > edev->int_info.msix_cnt) {
+       if (QEDE_QUEUE_CNT(edev) > edev->int_info.msix_cnt) {
                DP_ERR(edev,
                       "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
-                      QEDE_RSS_CNT(edev), edev->int_info.msix_cnt);
+                      QEDE_QUEUE_CNT(edev), edev->int_info.msix_cnt);
                return -EINVAL;
        }
 
-       for (i = 0; i < QEDE_RSS_CNT(edev); i++) {
+       for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
+#ifdef CONFIG_RFS_ACCEL
+               struct qede_fastpath *fp = &edev->fp_array[i];
+
+               if (edev->ndev->rx_cpu_rmap && (fp->type & QEDE_FASTPATH_RX)) {
+                       rc = irq_cpu_rmap_add(edev->ndev->rx_cpu_rmap,
+                                             edev->int_info.msix[i].vector);
+                       if (rc) {
+                               DP_ERR(edev, "Failed to add CPU rmap\n");
+                               qede_free_arfs(edev);
+                       }
+               }
+#endif
                rc = request_irq(edev->int_info.msix[i].vector,
                                 qede_msix_fp_int, 0, edev->fp_array[i].name,
                                 &edev->fp_array[i]);
@@ -3140,18 +1557,17 @@ static int qede_setup_irqs(struct qede_dev *edev)
 
                /* qed should learn receive the RSS ids and callbacks */
                ops = edev->ops->common;
-               for (i = 0; i < QEDE_RSS_CNT(edev); i++)
+               for (i = 0; i < QEDE_QUEUE_CNT(edev); i++)
                        ops->simd_handler_config(edev->cdev,
                                                 &edev->fp_array[i], i,
                                                 qede_simd_fp_handler);
-               edev->int_info.used_cnt = QEDE_RSS_CNT(edev);
+               edev->int_info.used_cnt = QEDE_QUEUE_CNT(edev);
        }
        return 0;
 }
 
 static int qede_drain_txq(struct qede_dev *edev,
-                         struct qede_tx_queue *txq,
-                         bool allow_drain)
+                         struct qede_tx_queue *txq, bool allow_drain)
 {
        int rc, cnt = 1000;
 
@@ -3183,65 +1599,81 @@ static int qede_drain_txq(struct qede_dev *edev,
        return 0;
 }
 
+static int qede_stop_txq(struct qede_dev *edev,
+                        struct qede_tx_queue *txq, int rss_id)
+{
+       return edev->ops->q_tx_stop(edev->cdev, rss_id, txq->handle);
+}
+
 static int qede_stop_queues(struct qede_dev *edev)
 {
-       struct qed_update_vport_params vport_update_params;
+       struct qed_update_vport_params *vport_update_params;
        struct qed_dev *cdev = edev->cdev;
-       int rc, tc, i;
+       struct qede_fastpath *fp;
+       int rc, i;
 
        /* Disable the vport */
-       memset(&vport_update_params, 0, sizeof(vport_update_params));
-       vport_update_params.vport_id = 0;
-       vport_update_params.update_vport_active_flg = 1;
-       vport_update_params.vport_active_flg = 0;
-       vport_update_params.update_rss_flg = 0;
+       vport_update_params = vzalloc(sizeof(*vport_update_params));
+       if (!vport_update_params)
+               return -ENOMEM;
+
+       vport_update_params->vport_id = 0;
+       vport_update_params->update_vport_active_flg = 1;
+       vport_update_params->vport_active_flg = 0;
+       vport_update_params->update_rss_flg = 0;
+
+       rc = edev->ops->vport_update(cdev, vport_update_params);
+       vfree(vport_update_params);
 
-       rc = edev->ops->vport_update(cdev, &vport_update_params);
        if (rc) {
                DP_ERR(edev, "Failed to update vport\n");
                return rc;
        }
 
        /* Flush Tx queues. If needed, request drain from MCP */
-       for_each_rss(i) {
-               struct qede_fastpath *fp = &edev->fp_array[i];
+       for_each_queue(i) {
+               fp = &edev->fp_array[i];
 
-               for (tc = 0; tc < edev->num_tc; tc++) {
-                       struct qede_tx_queue *txq = &fp->txqs[tc];
+               if (fp->type & QEDE_FASTPATH_TX) {
+                       rc = qede_drain_txq(edev, fp->txq, true);
+                       if (rc)
+                               return rc;
+               }
 
-                       rc = qede_drain_txq(edev, txq, true);
+               if (fp->type & QEDE_FASTPATH_XDP) {
+                       rc = qede_drain_txq(edev, fp->xdp_tx, true);
                        if (rc)
                                return rc;
                }
        }
 
-       /* Stop all Queues in reverse order*/
-       for (i = QEDE_RSS_CNT(edev) - 1; i >= 0; i--) {
-               struct qed_stop_rxq_params rx_params;
+       /* Stop all Queues in reverse order */
+       for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) {
+               fp = &edev->fp_array[i];
 
-               /* Stop the Tx Queue(s)*/
-               for (tc = 0; tc < edev->num_tc; tc++) {
-                       struct qed_stop_txq_params tx_params;
+               /* Stop the Tx Queue(s) */
+               if (fp->type & QEDE_FASTPATH_TX) {
+                       rc = qede_stop_txq(edev, fp->txq, i);
+                       if (rc)
+                               return rc;
+               }
 
-                       tx_params.rss_id = i;
-                       tx_params.tx_queue_id = tc * QEDE_RSS_CNT(edev) + i;
-                       rc = edev->ops->q_tx_stop(cdev, &tx_params);
+               /* Stop the Rx Queue */
+               if (fp->type & QEDE_FASTPATH_RX) {
+                       rc = edev->ops->q_rx_stop(cdev, i, fp->rxq->handle);
                        if (rc) {
-                               DP_ERR(edev, "Failed to stop TXQ #%d\n",
-                                      tx_params.tx_queue_id);
+                               DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
                                return rc;
                        }
                }
 
-               /* Stop the Rx Queue*/
-               memset(&rx_params, 0, sizeof(rx_params));
-               rx_params.rss_id = i;
-               rx_params.rx_queue_id = i;
+               /* Stop the XDP forwarding queue */
+               if (fp->type & QEDE_FASTPATH_XDP) {
+                       rc = qede_stop_txq(edev, fp->xdp_tx, i);
+                       if (rc)
+                               return rc;
 
-               rc = edev->ops->q_rx_stop(cdev, &rx_params);
-               if (rc) {
-                       DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
-                       return rc;
+                       bpf_prog_put(fp->rxq->xdp_prog);
                }
        }
 
@@ -3253,23 +1685,74 @@ static int qede_stop_queues(struct qede_dev *edev)
        return rc;
 }
 
+static int qede_start_txq(struct qede_dev *edev,
+                         struct qede_fastpath *fp,
+                         struct qede_tx_queue *txq, u8 rss_id, u16 sb_idx)
+{
+       dma_addr_t phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl);
+       u32 page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl);
+       struct qed_queue_start_common_params params;
+       struct qed_txq_start_ret_params ret_params;
+       int rc;
+
+       memset(&params, 0, sizeof(params));
+       memset(&ret_params, 0, sizeof(ret_params));
+
+       /* Let the XDP queue share the queue-zone with one of the regular txq.
+        * We don't really care about its coalescing.
+        */
+       if (txq->is_xdp)
+               params.queue_id = QEDE_TXQ_XDP_TO_IDX(edev, txq);
+       else
+               params.queue_id = txq->index;
+
+       params.sb = fp->sb_info->igu_sb_id;
+       params.sb_idx = sb_idx;
+
+       rc = edev->ops->q_tx_start(edev->cdev, rss_id, &params, phys_table,
+                                  page_cnt, &ret_params);
+       if (rc) {
+               DP_ERR(edev, "Start TXQ #%d failed %d\n", txq->index, rc);
+               return rc;
+       }
+
+       txq->doorbell_addr = ret_params.p_doorbell;
+       txq->handle = ret_params.p_handle;
+
+       /* Determine the FW consumer address associated */
+       txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[sb_idx];
+
+       /* Prepare the doorbell parameters */
+       SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST, DB_DEST_XCM);
+       SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
+       SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_VAL_SEL,
+                 DQ_XCM_ETH_TX_BD_PROD_CMD);
+       txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
+
+       return rc;
+}
+
 static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
 {
-       int rc, tc, i;
        int vlan_removal_en = 1;
        struct qed_dev *cdev = edev->cdev;
-       struct qed_update_vport_params vport_update_params;
-       struct qed_queue_start_common_params q_params;
        struct qed_dev_info *qed_info = &edev->dev_info.common;
+       struct qed_update_vport_params *vport_update_params;
+       struct qed_queue_start_common_params q_params;
        struct qed_start_vport_params start = {0};
-       bool reset_rss_indir = false;
+       int rc, i;
 
-       if (!edev->num_rss) {
+       if (!edev->num_queues) {
                DP_ERR(edev,
                       "Cannot update V-VPORT as active as there are no Rx queues\n");
                return -EINVAL;
        }
 
+       vport_update_params = vzalloc(sizeof(*vport_update_params));
+       if (!vport_update_params)
+               return -ENOMEM;
+
+       start.handle_ptp_pkts = !!(edev->ptp);
        start.gro_enable = !edev->gro_disable;
        start.mtu = edev->ndev->mtu;
        start.vport_id = 0;
@@ -3281,175 +1764,118 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
 
        if (rc) {
                DP_ERR(edev, "Start V-PORT failed %d\n", rc);
-               return rc;
+               goto out;
        }
 
        DP_VERBOSE(edev, NETIF_MSG_IFUP,
                   "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
                   start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
 
-       for_each_rss(i) {
+       for_each_queue(i) {
                struct qede_fastpath *fp = &edev->fp_array[i];
-               dma_addr_t phys_table = fp->rxq->rx_comp_ring.pbl.p_phys_table;
-
-               memset(&q_params, 0, sizeof(q_params));
-               q_params.rss_id = i;
-               q_params.queue_id = i;
-               q_params.vport_id = 0;
-               q_params.sb = fp->sb_info->igu_sb_id;
-               q_params.sb_idx = RX_PI;
-
-               rc = edev->ops->q_rx_start(cdev, &q_params,
-                                          fp->rxq->rx_buf_size,
-                                          fp->rxq->rx_bd_ring.p_phys_addr,
-                                          phys_table,
-                                          fp->rxq->rx_comp_ring.page_cnt,
-                                          &fp->rxq->hw_rxq_prod_addr);
-               if (rc) {
-                       DP_ERR(edev, "Start RXQ #%d failed %d\n", i, rc);
-                       return rc;
-               }
-
-               fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
-
-               qede_update_rx_prod(edev, fp->rxq);
+               dma_addr_t p_phys_table;
+               u32 page_cnt;
 
-               for (tc = 0; tc < edev->num_tc; tc++) {
-                       struct qede_tx_queue *txq = &fp->txqs[tc];
-                       int txq_index = tc * QEDE_RSS_CNT(edev) + i;
+               if (fp->type & QEDE_FASTPATH_RX) {
+                       struct qed_rxq_start_ret_params ret_params;
+                       struct qede_rx_queue *rxq = fp->rxq;
+                       __le16 *val;
 
+                       memset(&ret_params, 0, sizeof(ret_params));
                        memset(&q_params, 0, sizeof(q_params));
-                       q_params.rss_id = i;
-                       q_params.queue_id = txq_index;
+                       q_params.queue_id = rxq->rxq_id;
                        q_params.vport_id = 0;
                        q_params.sb = fp->sb_info->igu_sb_id;
-                       q_params.sb_idx = TX_PI(tc);
+                       q_params.sb_idx = RX_PI;
 
-                       rc = edev->ops->q_tx_start(cdev, &q_params,
-                                                  txq->tx_pbl.pbl.p_phys_table,
-                                                  txq->tx_pbl.page_cnt,
-                                                  &txq->doorbell_addr);
+                       p_phys_table =
+                           qed_chain_get_pbl_phys(&rxq->rx_comp_ring);
+                       page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring);
+
+                       rc = edev->ops->q_rx_start(cdev, i, &q_params,
+                                                  rxq->rx_buf_size,
+                                                  rxq->rx_bd_ring.p_phys_addr,
+                                                  p_phys_table,
+                                                  page_cnt, &ret_params);
                        if (rc) {
-                               DP_ERR(edev, "Start TXQ #%d failed %d\n",
-                                      txq_index, rc);
-                               return rc;
+                               DP_ERR(edev, "Start RXQ #%d failed %d\n", i,
+                                      rc);
+                               goto out;
                        }
 
-                       txq->hw_cons_ptr =
-                               &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
-                       SET_FIELD(txq->tx_db.data.params,
-                                 ETH_DB_DATA_DEST, DB_DEST_XCM);
-                       SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
-                                 DB_AGG_CMD_SET);
-                       SET_FIELD(txq->tx_db.data.params,
-                                 ETH_DB_DATA_AGG_VAL_SEL,
-                                 DQ_XCM_ETH_TX_BD_PROD_CMD);
-
-                       txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
-               }
-       }
-
-       /* Prepare and send the vport enable */
-       memset(&vport_update_params, 0, sizeof(vport_update_params));
-       vport_update_params.vport_id = start.vport_id;
-       vport_update_params.update_vport_active_flg = 1;
-       vport_update_params.vport_active_flg = 1;
-
-       if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) &&
-           qed_info->tx_switching) {
-               vport_update_params.update_tx_switching_flg = 1;
-               vport_update_params.tx_switching_flg = 1;
-       }
+                       /* Use the return parameters */
+                       rxq->hw_rxq_prod_addr = ret_params.p_prod;
+                       rxq->handle = ret_params.p_handle;
 
-       /* Fill struct with RSS params */
-       if (QEDE_RSS_CNT(edev) > 1) {
-               vport_update_params.update_rss_flg = 1;
+                       val = &fp->sb_info->sb_virt->pi_array[RX_PI];
+                       rxq->hw_cons_ptr = val;
 
-               /* Need to validate current RSS config uses valid entries */
-               for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
-                       if (edev->rss_params.rss_ind_table[i] >=
-                           edev->num_rss) {
-                               reset_rss_indir = true;
-                               break;
-                       }
+                       qede_update_rx_prod(edev, rxq);
                }
 
-               if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) ||
-                   reset_rss_indir) {
-                       u16 val;
-
-                       for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
-                               u16 indir_val;
+               if (fp->type & QEDE_FASTPATH_XDP) {
+                       rc = qede_start_txq(edev, fp, fp->xdp_tx, i, XDP_PI);
+                       if (rc)
+                               goto out;
 
-                               val = QEDE_RSS_CNT(edev);
-                               indir_val = ethtool_rxfh_indir_default(i, val);
-                               edev->rss_params.rss_ind_table[i] = indir_val;
+                       fp->rxq->xdp_prog = bpf_prog_add(edev->xdp_prog, 1);
+                       if (IS_ERR(fp->rxq->xdp_prog)) {
+                               rc = PTR_ERR(fp->rxq->xdp_prog);
+                               fp->rxq->xdp_prog = NULL;
+                               goto out;
                        }
-                       edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
                }
 
-               if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) {
-                       netdev_rss_key_fill(edev->rss_params.rss_key,
-                                           sizeof(edev->rss_params.rss_key));
-                       edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
-               }
-
-               if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) {
-                       edev->rss_params.rss_caps = QED_RSS_IPV4 |
-                                                   QED_RSS_IPV6 |
-                                                   QED_RSS_IPV4_TCP |
-                                                   QED_RSS_IPV6_TCP;
-                       edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
+               if (fp->type & QEDE_FASTPATH_TX) {
+                       rc = qede_start_txq(edev, fp, fp->txq, i, TX_PI(0));
+                       if (rc)
+                               goto out;
                }
-
-               memcpy(&vport_update_params.rss_params, &edev->rss_params,
-                      sizeof(vport_update_params.rss_params));
-       } else {
-               memset(&vport_update_params.rss_params, 0,
-                      sizeof(vport_update_params.rss_params));
-       }
-
-       rc = edev->ops->vport_update(cdev, &vport_update_params);
-       if (rc) {
-               DP_ERR(edev, "Update V-PORT failed %d\n", rc);
-               return rc;
        }
 
-       return 0;
-}
+       /* Prepare and send the vport enable */
+       vport_update_params->vport_id = start.vport_id;
+       vport_update_params->update_vport_active_flg = 1;
+       vport_update_params->vport_active_flg = 1;
 
-static int qede_set_mcast_rx_mac(struct qede_dev *edev,
-                                enum qed_filter_xcast_params_type opcode,
-                                unsigned char *mac, int num_macs)
-{
-       struct qed_filter_params filter_cmd;
-       int i;
+       if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) &&
+           qed_info->tx_switching) {
+               vport_update_params->update_tx_switching_flg = 1;
+               vport_update_params->tx_switching_flg = 1;
+       }
 
-       memset(&filter_cmd, 0, sizeof(filter_cmd));
-       filter_cmd.type = QED_FILTER_TYPE_MCAST;
-       filter_cmd.filter.mcast.type = opcode;
-       filter_cmd.filter.mcast.num = num_macs;
+       qede_fill_rss_params(edev, &vport_update_params->rss_params,
+                            &vport_update_params->update_rss_flg);
 
-       for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
-               ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac);
+       rc = edev->ops->vport_update(cdev, vport_update_params);
+       if (rc)
+               DP_ERR(edev, "Update V-PORT failed %d\n", rc);
 
-       return edev->ops->filter_config(edev->cdev, &filter_cmd);
+out:
+       vfree(vport_update_params);
+       return rc;
 }
 
 enum qede_unload_mode {
        QEDE_UNLOAD_NORMAL,
 };
 
-static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode)
+static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
+                       bool is_locked)
 {
        struct qed_link_params link_params;
        int rc;
 
        DP_INFO(edev, "Starting qede unload\n");
 
-       mutex_lock(&edev->qede_lock);
+       if (!is_locked)
+               __qede_lock(edev);
+
+       qede_roce_dev_event_close(edev);
        edev->state = QEDE_STATE_CLOSED;
 
+       qede_ptp_stop(edev);
+
        /* Close OS Tx */
        netif_tx_disable(edev->ndev);
        netif_carrier_off(edev->ndev);
@@ -3468,7 +1894,12 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode)
 
        qede_vlan_mark_nonconfigured(edev);
        edev->ops->fastpath_stop(edev->cdev);
-
+#ifdef CONFIG_RFS_ACCEL
+       if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) {
+               qede_poll_for_freeing_arfs_filters(edev);
+               qede_free_arfs(edev);
+       }
+#endif
        /* Release the interrupts */
        qede_sync_free_irqs(edev);
        edev->ops->common->set_fp_int(edev->cdev, 0);
@@ -3479,7 +1910,8 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode)
        qede_free_fp_array(edev);
 
 out:
-       mutex_unlock(&edev->qede_lock);
+       if (!is_locked)
+               __qede_unlock(edev);
        DP_INFO(edev, "Ending qede unload\n");
 }
 
@@ -3488,34 +1920,44 @@ enum qede_load_mode {
        QEDE_LOAD_RELOAD,
 };
 
-static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
+static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
+                    bool is_locked)
 {
        struct qed_link_params link_params;
-       struct qed_link_output link_output;
        int rc;
 
        DP_INFO(edev, "Starting qede load\n");
 
+       if (!is_locked)
+               __qede_lock(edev);
+
        rc = qede_set_num_queues(edev);
        if (rc)
-               goto err0;
+               goto out;
 
        rc = qede_alloc_fp_array(edev);
        if (rc)
-               goto err0;
+               goto out;
 
        qede_init_fp(edev);
 
        rc = qede_alloc_mem_load(edev);
        if (rc)
                goto err1;
-       DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
-               QEDE_RSS_CNT(edev), edev->num_tc);
+       DP_INFO(edev, "Allocated %d Rx, %d Tx queues\n",
+               QEDE_RSS_COUNT(edev), QEDE_TSS_COUNT(edev));
 
        rc = qede_set_real_num_queues(edev);
        if (rc)
                goto err2;
 
+#ifdef CONFIG_RFS_ACCEL
+       if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) {
+               rc = qede_alloc_arfs(edev);
+               if (rc)
+                       DP_NOTICE(edev, "aRFS memory allocation failed\n");
+       }
+#endif
        qede_napi_add_enable(edev);
        DP_INFO(edev, "Napi added and enabled\n");
 
@@ -3532,10 +1974,6 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
        /* Add primary mac and set Rx filters */
        ether_addr_copy(edev->primary_mac, edev->ndev->dev_addr);
 
-       mutex_lock(&edev->qede_lock);
-       edev->state = QEDE_STATE_OPEN;
-       mutex_unlock(&edev->qede_lock);
-
        /* Program un-configured VLANs */
        qede_configure_vlan_filters(edev);
 
@@ -3544,15 +1982,16 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
        link_params.link_up = true;
        edev->ops->common->set_link(edev->cdev, &link_params);
 
-       /* Query whether link is already-up */
-       memset(&link_output, 0, sizeof(link_output));
-       edev->ops->common->get_link(edev->cdev, &link_output);
-       qede_link_update(edev, &link_output);
+       qede_roce_dev_event_open(edev);
+
+       qede_ptp_start(edev, (mode == QEDE_LOAD_NORMAL));
+
+       edev->state = QEDE_STATE_OPEN;
 
        DP_INFO(edev, "Ending successfully qede load\n");
 
-       return 0;
 
+       goto out;
 err4:
        qede_sync_free_irqs(edev);
        memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info));
@@ -3563,27 +2002,43 @@ err2:
 err1:
        edev->ops->common->set_fp_int(edev->cdev, 0);
        qede_free_fp_array(edev);
-       edev->num_rss = 0;
-err0:
+       edev->num_queues = 0;
+       edev->fp_num_tx = 0;
+       edev->fp_num_rx = 0;
+out:
+       if (!is_locked)
+               __qede_unlock(edev);
+
        return rc;
 }
 
+/* 'func' should be able to run between unload and reload assuming interface
+ * is actually running, or afterwards in case it's currently DOWN.
+ */
 void qede_reload(struct qede_dev *edev,
-                void (*func)(struct qede_dev *, union qede_reload_args *),
-                union qede_reload_args *args)
+                struct qede_reload_args *args, bool is_locked)
 {
-       qede_unload(edev, QEDE_UNLOAD_NORMAL);
-       /* Call function handler to update parameters
-        * needed for function load.
+       if (!is_locked)
+               __qede_lock(edev);
+
+       /* Since qede_lock is held, internal state wouldn't change even
+        * if netdev state would start transitioning. Check whether current
+        * internal configuration indicates device is up, then reload.
         */
-       if (func)
-               func(edev, args);
+       if (edev->state == QEDE_STATE_OPEN) {
+               qede_unload(edev, QEDE_UNLOAD_NORMAL, true);
+               if (args)
+                       args->func(edev, args);
+               qede_load(edev, QEDE_LOAD_RELOAD, true);
 
-       qede_load(edev, QEDE_LOAD_RELOAD);
+               /* Since no one is going to do it for us, re-configure */
+               qede_config_rx_mode(edev->ndev);
+       } else if (args) {
+               args->func(edev, args);
+       }
 
-       mutex_lock(&edev->qede_lock);
-       qede_config_rx_mode(edev->ndev);
-       mutex_unlock(&edev->qede_lock);
+       if (!is_locked)
+               __qede_unlock(edev);
 }
 
 /* called with rtnl_lock */
@@ -3596,13 +2051,14 @@ static int qede_open(struct net_device *ndev)
 
        edev->ops->common->set_power_state(edev->cdev, PCI_D0);
 
-       rc = qede_load(edev, QEDE_LOAD_NORMAL);
-
+       rc = qede_load(edev, QEDE_LOAD_NORMAL, false);
        if (rc)
                return rc;
 
        udp_tunnel_get_rx_info(ndev);
 
+       edev->ops->common->update_drv_state(edev->cdev, true);
+
        return 0;
 }
 
@@ -3610,7 +2066,9 @@ static int qede_close(struct net_device *ndev)
 {
        struct qede_dev *edev = netdev_priv(ndev);
 
-       qede_unload(edev, QEDE_UNLOAD_NORMAL);
+       qede_unload(edev, QEDE_UNLOAD_NORMAL, false);
+
+       edev->ops->common->update_drv_state(edev->cdev, false);
 
        return 0;
 }
@@ -3638,197 +2096,3 @@ static void qede_link_update(void *dev, struct qed_link_output *link)
                }
        }
 }
-
-static int qede_set_mac_addr(struct net_device *ndev, void *p)
-{
-       struct qede_dev *edev = netdev_priv(ndev);
-       struct sockaddr *addr = p;
-       int rc;
-
-       ASSERT_RTNL(); /* @@@TBD To be removed */
-
-       DP_INFO(edev, "Set_mac_addr called\n");
-
-       if (!is_valid_ether_addr(addr->sa_data)) {
-               DP_NOTICE(edev, "The MAC address is not valid\n");
-               return -EFAULT;
-       }
-
-       if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
-               DP_NOTICE(edev, "qed prevents setting MAC\n");
-               return -EINVAL;
-       }
-
-       ether_addr_copy(ndev->dev_addr, addr->sa_data);
-
-       if (!netif_running(ndev))  {
-               DP_NOTICE(edev, "The device is currently down\n");
-               return 0;
-       }
-
-       /* Remove the previous primary mac */
-       rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
-                                  edev->primary_mac);
-       if (rc)
-               return rc;
-
-       /* Add MAC filter according to the new unicast HW MAC address */
-       ether_addr_copy(edev->primary_mac, ndev->dev_addr);
-       return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
-                                     edev->primary_mac);
-}
-
-static int
-qede_configure_mcast_filtering(struct net_device *ndev,
-                              enum qed_filter_rx_mode_type *accept_flags)
-{
-       struct qede_dev *edev = netdev_priv(ndev);
-       unsigned char *mc_macs, *temp;
-       struct netdev_hw_addr *ha;
-       int rc = 0, mc_count;
-       size_t size;
-
-       size = 64 * ETH_ALEN;
-
-       mc_macs = kzalloc(size, GFP_KERNEL);
-       if (!mc_macs) {
-               DP_NOTICE(edev,
-                         "Failed to allocate memory for multicast MACs\n");
-               rc = -ENOMEM;
-               goto exit;
-       }
-
-       temp = mc_macs;
-
-       /* Remove all previously configured MAC filters */
-       rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
-                                  mc_macs, 1);
-       if (rc)
-               goto exit;
-
-       netif_addr_lock_bh(ndev);
-
-       mc_count = netdev_mc_count(ndev);
-       if (mc_count < 64) {
-               netdev_for_each_mc_addr(ha, ndev) {
-                       ether_addr_copy(temp, ha->addr);
-                       temp += ETH_ALEN;
-               }
-       }
-
-       netif_addr_unlock_bh(ndev);
-
-       /* Check for all multicast @@@TBD resource allocation */
-       if ((ndev->flags & IFF_ALLMULTI) ||
-           (mc_count > 64)) {
-               if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR)
-                       *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
-       } else {
-               /* Add all multicast MAC filters */
-               rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
-                                          mc_macs, mc_count);
-       }
-
-exit:
-       kfree(mc_macs);
-       return rc;
-}
-
-static void qede_set_rx_mode(struct net_device *ndev)
-{
-       struct qede_dev *edev = netdev_priv(ndev);
-
-       DP_INFO(edev, "qede_set_rx_mode called\n");
-
-       if (edev->state != QEDE_STATE_OPEN) {
-               DP_INFO(edev,
-                       "qede_set_rx_mode called while interface is down\n");
-       } else {
-               set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
-               schedule_delayed_work(&edev->sp_task, 0);
-       }
-}
-
-/* Must be called with qede_lock held */
-static void qede_config_rx_mode(struct net_device *ndev)
-{
-       enum qed_filter_rx_mode_type accept_flags = QED_FILTER_TYPE_UCAST;
-       struct qede_dev *edev = netdev_priv(ndev);
-       struct qed_filter_params rx_mode;
-       unsigned char *uc_macs, *temp;
-       struct netdev_hw_addr *ha;
-       int rc, uc_count;
-       size_t size;
-
-       netif_addr_lock_bh(ndev);
-
-       uc_count = netdev_uc_count(ndev);
-       size = uc_count * ETH_ALEN;
-
-       uc_macs = kzalloc(size, GFP_ATOMIC);
-       if (!uc_macs) {
-               DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n");
-               netif_addr_unlock_bh(ndev);
-               return;
-       }
-
-       temp = uc_macs;
-       netdev_for_each_uc_addr(ha, ndev) {
-               ether_addr_copy(temp, ha->addr);
-               temp += ETH_ALEN;
-       }
-
-       netif_addr_unlock_bh(ndev);
-
-       /* Configure the struct for the Rx mode */
-       memset(&rx_mode, 0, sizeof(struct qed_filter_params));
-       rx_mode.type = QED_FILTER_TYPE_RX_MODE;
-
-       /* Remove all previous unicast secondary macs and multicast macs
-        * (configrue / leave the primary mac)
-        */
-       rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
-                                  edev->primary_mac);
-       if (rc)
-               goto out;
-
-       /* Check for promiscuous */
-       if ((ndev->flags & IFF_PROMISC) ||
-           (uc_count > 15)) { /* @@@TBD resource allocation - 1 */
-               accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
-       } else {
-               /* Add MAC filters according to the unicast secondary macs */
-               int i;
-
-               temp = uc_macs;
-               for (i = 0; i < uc_count; i++) {
-                       rc = qede_set_ucast_rx_mac(edev,
-                                                  QED_FILTER_XCAST_TYPE_ADD,
-                                                  temp);
-                       if (rc)
-                               goto out;
-
-                       temp += ETH_ALEN;
-               }
-
-               rc = qede_configure_mcast_filtering(ndev, &accept_flags);
-               if (rc)
-                       goto out;
-       }
-
-       /* take care of VLAN mode */
-       if (ndev->flags & IFF_PROMISC) {
-               qede_config_accept_any_vlan(edev, true);
-       } else if (!edev->non_configured_vlans) {
-               /* It's possible that accept_any_vlan mode is set due to a
-                * previous setting of IFF_PROMISC. If vlan credits are
-                * sufficient, disable accept_any_vlan.
-                */
-               qede_config_accept_any_vlan(edev, false);
-       }
-
-       rx_mode.filter.accept_flags = accept_flags;
-       edev->ops->filter_config(edev->cdev, &rx_mode);
-out:
-       kfree(uc_macs);
-}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
new file mode 100644 (file)
index 0000000..2e62dec
--- /dev/null
@@ -0,0 +1,536 @@
+/* QLogic qede NIC Driver
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "qede_ptp.h"
+
+struct qede_ptp {
+       const struct qed_eth_ptp_ops    *ops;
+       struct ptp_clock_info           clock_info;
+       struct cyclecounter             cc;
+       struct timecounter              tc;
+       struct ptp_clock                *clock;
+       struct work_struct              work;
+       struct qede_dev                 *edev;
+       struct sk_buff                  *tx_skb;
+
+       /* ptp spinlock is used for protecting the cycle/time counter fields
+        * and, also for serializing the qed PTP API invocations.
+        */
+       spinlock_t                      lock;
+       bool                            hw_ts_ioctl_called;
+       u16                             tx_type;
+       u16                             rx_filter;
+};
+
+/**
+ * qede_ptp_adjfreq
+ * @ptp: the ptp clock structure
+ * @ppb: parts per billion adjustment from base
+ *
+ * Adjust the frequency of the ptp cycle counter by the
+ * indicated ppb from the base frequency.
+ */
+static int qede_ptp_adjfreq(struct ptp_clock_info *info, s32 ppb)
+{
+       struct qede_ptp *ptp = container_of(info, struct qede_ptp, clock_info);
+       struct qede_dev *edev = ptp->edev;
+       int rc;
+
+       __qede_lock(edev);
+       if (edev->state == QEDE_STATE_OPEN) {
+               spin_lock_bh(&ptp->lock);
+               rc = ptp->ops->adjfreq(edev->cdev, ppb);
+               spin_unlock_bh(&ptp->lock);
+       } else {
+               DP_ERR(edev, "PTP adjfreq called while interface is down\n");
+               rc = -EFAULT;
+       }
+       __qede_unlock(edev);
+
+       return rc;
+}
+
+static int qede_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
+{
+       struct qede_dev *edev;
+       struct qede_ptp *ptp;
+
+       ptp = container_of(info, struct qede_ptp, clock_info);
+       edev = ptp->edev;
+
+       DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP adjtime called, delta = %llx\n",
+                  delta);
+
+       spin_lock_bh(&ptp->lock);
+       timecounter_adjtime(&ptp->tc, delta);
+       spin_unlock_bh(&ptp->lock);
+
+       return 0;
+}
+
+static int qede_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
+{
+       struct qede_dev *edev;
+       struct qede_ptp *ptp;
+       u64 ns;
+
+       ptp = container_of(info, struct qede_ptp, clock_info);
+       edev = ptp->edev;
+
+       spin_lock_bh(&ptp->lock);
+       ns = timecounter_read(&ptp->tc);
+       spin_unlock_bh(&ptp->lock);
+
+       DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP gettime called, ns = %llu\n", ns);
+
+       *ts = ns_to_timespec64(ns);
+
+       return 0;
+}
+
+static int qede_ptp_settime(struct ptp_clock_info *info,
+                           const struct timespec64 *ts)
+{
+       struct qede_dev *edev;
+       struct qede_ptp *ptp;
+       u64 ns;
+
+       ptp = container_of(info, struct qede_ptp, clock_info);
+       edev = ptp->edev;
+
+       ns = timespec64_to_ns(ts);
+
+       DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP settime called, ns = %llu\n", ns);
+
+       /* Re-init the timecounter */
+       spin_lock_bh(&ptp->lock);
+       timecounter_init(&ptp->tc, &ptp->cc, ns);
+       spin_unlock_bh(&ptp->lock);
+
+       return 0;
+}
+
+/* Enable (or disable) ancillary features of the phc subsystem */
+static int qede_ptp_ancillary_feature_enable(struct ptp_clock_info *info,
+                                            struct ptp_clock_request *rq,
+                                            int on)
+{
+       struct qede_dev *edev;
+       struct qede_ptp *ptp;
+
+       ptp = container_of(info, struct qede_ptp, clock_info);
+       edev = ptp->edev;
+
+       DP_ERR(edev, "PHC ancillary features are not supported\n");
+
+       return -ENOTSUPP;
+}
+
+static void qede_ptp_task(struct work_struct *work)
+{
+       struct skb_shared_hwtstamps shhwtstamps;
+       struct qede_dev *edev;
+       struct qede_ptp *ptp;
+       u64 timestamp, ns;
+       int rc;
+
+       ptp = container_of(work, struct qede_ptp, work);
+       edev = ptp->edev;
+
+       /* Read Tx timestamp registers */
+       spin_lock_bh(&ptp->lock);
+       rc = ptp->ops->read_tx_ts(edev->cdev, &timestamp);
+       spin_unlock_bh(&ptp->lock);
+       if (rc) {
+               /* Reschedule to keep checking for a valid timestamp value */
+               schedule_work(&ptp->work);
+               return;
+       }
+
+       ns = timecounter_cyc2time(&ptp->tc, timestamp);
+       memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+       shhwtstamps.hwtstamp = ns_to_ktime(ns);
+       skb_tstamp_tx(ptp->tx_skb, &shhwtstamps);
+       dev_kfree_skb_any(ptp->tx_skb);
+       ptp->tx_skb = NULL;
+
+       DP_VERBOSE(edev, QED_MSG_DEBUG,
+                  "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
+                  timestamp, ns);
+}
+
+/* Read the PHC. This API is invoked with ptp_lock held. */
+static u64 qede_ptp_read_cc(const struct cyclecounter *cc)
+{
+       struct qede_dev *edev;
+       struct qede_ptp *ptp;
+       u64 phc_cycles;
+       int rc;
+
+       ptp = container_of(cc, struct qede_ptp, cc);
+       edev = ptp->edev;
+       rc = ptp->ops->read_cc(edev->cdev, &phc_cycles);
+       if (rc)
+               WARN_ONCE(1, "PHC read err %d\n", rc);
+
+       DP_VERBOSE(edev, QED_MSG_DEBUG, "PHC read cycles = %llu\n", phc_cycles);
+
+       return phc_cycles;
+}
+
+static void qede_ptp_init_cc(struct qede_dev *edev)
+{
+       struct qede_ptp *ptp;
+
+       ptp = edev->ptp;
+       if (!ptp)
+               return;
+
+       memset(&ptp->cc, 0, sizeof(ptp->cc));
+       ptp->cc.read = qede_ptp_read_cc;
+       ptp->cc.mask = CYCLECOUNTER_MASK(64);
+       ptp->cc.shift = 0;
+       ptp->cc.mult = 1;
+}
+
+static int qede_ptp_cfg_filters(struct qede_dev *edev)
+{
+       struct qede_ptp *ptp = edev->ptp;
+
+       if (!ptp)
+               return -EIO;
+
+       if (!ptp->hw_ts_ioctl_called) {
+               DP_INFO(edev, "TS IOCTL not called\n");
+               return 0;
+       }
+
+       switch (ptp->tx_type) {
+       case HWTSTAMP_TX_ON:
+               edev->flags |= QEDE_TX_TIMESTAMPING_EN;
+               ptp->ops->hwtstamp_tx_on(edev->cdev);
+               break;
+
+       case HWTSTAMP_TX_ONESTEP_SYNC:
+               DP_ERR(edev, "One-step timestamping is not supported\n");
+               return -ERANGE;
+       }
+
+       spin_lock_bh(&ptp->lock);
+       switch (ptp->rx_filter) {
+       case HWTSTAMP_FILTER_NONE:
+               break;
+       case HWTSTAMP_FILTER_ALL:
+       case HWTSTAMP_FILTER_SOME:
+               ptp->rx_filter = HWTSTAMP_FILTER_NONE;
+               break;
+       case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+       case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+       case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+               ptp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+               /* Initialize PTP detection for UDP/IPv4 events */
+               ptp->ops->cfg_rx_filters(edev->cdev, QED_PTP_FILTER_IPV4);
+               break;
+       case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+               ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+               /* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */
+               ptp->ops->cfg_rx_filters(edev->cdev, QED_PTP_FILTER_IPV4_IPV6);
+               break;
+       case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+               ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+               /* Initialize PTP detection L2 events */
+               ptp->ops->cfg_rx_filters(edev->cdev, QED_PTP_FILTER_L2);
+               break;
+       case HWTSTAMP_FILTER_PTP_V2_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+               ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+               /* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */
+               ptp->ops->cfg_rx_filters(edev->cdev,
+                                        QED_PTP_FILTER_L2_IPV4_IPV6);
+               break;
+       }
+
+       spin_unlock_bh(&ptp->lock);
+
+       return 0;
+}
+
+int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *ifr)
+{
+       struct hwtstamp_config config;
+       struct qede_ptp *ptp;
+       int rc;
+
+       ptp = edev->ptp;
+       if (!ptp)
+               return -EIO;
+
+       if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+               return -EFAULT;
+
+       DP_VERBOSE(edev, QED_MSG_DEBUG,
+                  "HWTSTAMP IOCTL: Requested tx_type = %d, requested rx_filters = %d\n",
+                  config.tx_type, config.rx_filter);
+
+       if (config.flags) {
+               DP_ERR(edev, "config.flags is reserved for future use\n");
+               return -EINVAL;
+       }
+
+       ptp->hw_ts_ioctl_called = 1;
+       ptp->tx_type = config.tx_type;
+       ptp->rx_filter = config.rx_filter;
+
+       rc = qede_ptp_cfg_filters(edev);
+       if (rc)
+               return rc;
+
+       config.rx_filter = ptp->rx_filter;
+
+       return copy_to_user(ifr->ifr_data, &config,
+                           sizeof(config)) ? -EFAULT : 0;
+}
+
+/* Called during load, to initialize PTP-related stuff */
+static void qede_ptp_init(struct qede_dev *edev, bool init_tc)
+{
+       struct qede_ptp *ptp;
+       int rc;
+
+       ptp = edev->ptp;
+       if (!ptp)
+               return;
+
+       spin_lock_init(&ptp->lock);
+
+       /* Configure PTP in HW */
+       rc = ptp->ops->enable(edev->cdev);
+       if (rc) {
+               DP_ERR(edev, "Stopping PTP initialization\n");
+               return;
+       }
+
+       /* Init work queue for Tx timestamping */
+       INIT_WORK(&ptp->work, qede_ptp_task);
+
+       /* Init cyclecounter and timecounter. This is done only in the first
+        * load. If done in every load, PTP application will fail when doing
+        * unload / load (e.g. MTU change) while it is running.
+        */
+       if (init_tc) {
+               qede_ptp_init_cc(edev);
+               timecounter_init(&ptp->tc, &ptp->cc,
+                                ktime_to_ns(ktime_get_real()));
+       }
+
+       DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP initialization is successful\n");
+}
+
+void qede_ptp_start(struct qede_dev *edev, bool init_tc)
+{
+       qede_ptp_init(edev, init_tc);
+       qede_ptp_cfg_filters(edev);
+}
+
+void qede_ptp_remove(struct qede_dev *edev)
+{
+       struct qede_ptp *ptp;
+
+       ptp = edev->ptp;
+       if (ptp && ptp->clock) {
+               ptp_clock_unregister(ptp->clock);
+               ptp->clock = NULL;
+       }
+
+       kfree(ptp);
+       edev->ptp = NULL;
+}
+
+int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info)
+{
+       struct qede_ptp *ptp = edev->ptp;
+
+       if (!ptp)
+               return -EIO;
+
+       info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+                               SOF_TIMESTAMPING_RX_SOFTWARE |
+                               SOF_TIMESTAMPING_SOFTWARE |
+                               SOF_TIMESTAMPING_TX_HARDWARE |
+                               SOF_TIMESTAMPING_RX_HARDWARE |
+                               SOF_TIMESTAMPING_RAW_HARDWARE;
+
+       if (ptp->clock)
+               info->phc_index = ptp_clock_index(ptp->clock);
+       else
+               info->phc_index = -1;
+
+       info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+                          BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+                          BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+                          BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
+
+       info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+
+       return 0;
+}
+
+/* Called during unload, to stop PTP-related stuff */
+void qede_ptp_stop(struct qede_dev *edev)
+{
+       struct qede_ptp *ptp;
+
+       ptp = edev->ptp;
+       if (!ptp)
+               return;
+
+       /* Cancel PTP work queue. Should be done after the Tx queues are
+        * drained to prevent additional scheduling.
+        */
+       cancel_work_sync(&ptp->work);
+       if (ptp->tx_skb) {
+               dev_kfree_skb_any(ptp->tx_skb);
+               ptp->tx_skb = NULL;
+       }
+
+       /* Disable PTP in HW */
+       spin_lock_bh(&ptp->lock);
+       ptp->ops->disable(edev->cdev);
+       spin_unlock_bh(&ptp->lock);
+}
+
+int qede_ptp_register_phc(struct qede_dev *edev)
+{
+       struct qede_ptp *ptp;
+
+       ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
+       if (!ptp) {
+               DP_INFO(edev, "Failed to allocate struct for PTP\n");
+               return -ENOMEM;
+       }
+
+       ptp->edev = edev;
+       ptp->ops = edev->ops->ptp;
+       if (!ptp->ops) {
+               kfree(ptp);
+               edev->ptp = NULL;
+               DP_ERR(edev, "PTP clock registeration failed\n");
+               return -EIO;
+       }
+
+       edev->ptp = ptp;
+
+       /* Fill the ptp_clock_info struct and register PTP clock */
+       ptp->clock_info.owner = THIS_MODULE;
+       snprintf(ptp->clock_info.name, 16, "%s", edev->ndev->name);
+       ptp->clock_info.max_adj = QED_MAX_PHC_DRIFT_PPB;
+       ptp->clock_info.n_alarm = 0;
+       ptp->clock_info.n_ext_ts = 0;
+       ptp->clock_info.n_per_out = 0;
+       ptp->clock_info.pps = 0;
+       ptp->clock_info.adjfreq = qede_ptp_adjfreq;
+       ptp->clock_info.adjtime = qede_ptp_adjtime;
+       ptp->clock_info.gettime64 = qede_ptp_gettime;
+       ptp->clock_info.settime64 = qede_ptp_settime;
+       ptp->clock_info.enable = qede_ptp_ancillary_feature_enable;
+
+       ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev);
+       if (IS_ERR(ptp->clock)) {
+               ptp->clock = NULL;
+               kfree(ptp);
+               edev->ptp = NULL;
+               DP_ERR(edev, "PTP clock registeration failed\n");
+       }
+
+       return 0;
+}
+
+void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb)
+{
+       struct qede_ptp *ptp;
+
+       ptp = edev->ptp;
+       if (!ptp)
+               return;
+
+       if (unlikely(!(edev->flags & QEDE_TX_TIMESTAMPING_EN))) {
+               DP_NOTICE(edev,
+                         "Tx timestamping was not enabled, this packet will not be timestamped\n");
+       } else if (unlikely(ptp->tx_skb)) {
+               DP_NOTICE(edev,
+                         "The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
+       } else {
+               skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+               /* schedule check for Tx timestamp */
+               ptp->tx_skb = skb_get(skb);
+               schedule_work(&ptp->work);
+       }
+}
+
+void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb)
+{
+       struct qede_ptp *ptp;
+       u64 timestamp, ns;
+       int rc;
+
+       ptp = edev->ptp;
+       if (!ptp)
+               return;
+
+       spin_lock_bh(&ptp->lock);
+       rc = ptp->ops->read_rx_ts(edev->cdev, &timestamp);
+       if (rc) {
+               spin_unlock_bh(&ptp->lock);
+               DP_INFO(edev, "Invalid Rx timestamp\n");
+               return;
+       }
+
+       ns = timecounter_cyc2time(&ptp->tc, timestamp);
+       spin_unlock_bh(&ptp->lock);
+       skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
+       DP_VERBOSE(edev, QED_MSG_DEBUG,
+                  "Rx timestamp, timestamp cycles = %llu, ns = %llu\n",
+                  timestamp, ns);
+}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.h b/drivers/net/ethernet/qlogic/qede/qede_ptp.h
new file mode 100644 (file)
index 0000000..f328f9b
--- /dev/null
@@ -0,0 +1,65 @@
+/* QLogic qede NIC Driver
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _QEDE_PTP_H_
+#define _QEDE_PTP_H_
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/net_tstamp.h>
+#include <linux/timecounter.h>
+#include "qede.h"
+
+void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb);
+void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb);
+int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *req);
+void qede_ptp_start(struct qede_dev *edev, bool init_tc);
+void qede_ptp_stop(struct qede_dev *edev);
+void qede_ptp_remove(struct qede_dev *edev);
+int qede_ptp_register_phc(struct qede_dev *edev);
+int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *ts);
+
+static inline void qede_ptp_record_rx_ts(struct qede_dev *edev,
+                                        union eth_rx_cqe *cqe,
+                                        struct sk_buff *skb)
+{
+       /* Check if this packet was timestamped */
+       if (unlikely(le16_to_cpu(cqe->fast_path_regular.pars_flags.flags) &
+                    (1 << PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT))) {
+               if (likely(le16_to_cpu(cqe->fast_path_regular.pars_flags.flags)
+                   & (1 << PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT))) {
+                       qede_ptp_rx_ts(edev, skb);
+               } else {
+                       DP_INFO(edev,
+                               "Timestamp recorded for non PTP packets\n");
+               }
+       }
+}
+#endif /* _QEDE_PTP_H_ */
diff --git a/drivers/net/ethernet/qlogic/qede/qede_roce.c b/drivers/net/ethernet/qlogic/qede/qede_roce.c
new file mode 100644 (file)
index 0000000..f00657c
--- /dev/null
@@ -0,0 +1,314 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/qed/qede_roce.h>
+#include "qede.h"
+
+static struct qedr_driver *qedr_drv;
+static LIST_HEAD(qedr_dev_list);
+static DEFINE_MUTEX(qedr_dev_list_lock);
+
+bool qede_roce_supported(struct qede_dev *dev)
+{
+       return dev->dev_info.common.rdma_supported;
+}
+
+static void _qede_roce_dev_add(struct qede_dev *edev)
+{
+       if (!qedr_drv)
+               return;
+
+       edev->rdma_info.qedr_dev = qedr_drv->add(edev->cdev, edev->pdev,
+                                                edev->ndev);
+}
+
+static int qede_roce_create_wq(struct qede_dev *edev)
+{
+       INIT_LIST_HEAD(&edev->rdma_info.roce_event_list);
+       edev->rdma_info.roce_wq = create_singlethread_workqueue("roce_wq");
+       if (!edev->rdma_info.roce_wq) {
+               DP_NOTICE(edev, "qedr: Could not create workqueue\n");
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static void qede_roce_cleanup_event(struct qede_dev *edev)
+{
+       struct list_head *head = &edev->rdma_info.roce_event_list;
+       struct qede_roce_event_work *event_node;
+
+       flush_workqueue(edev->rdma_info.roce_wq);
+       while (!list_empty(head)) {
+               event_node = list_entry(head->next, struct qede_roce_event_work,
+                                       list);
+               cancel_work_sync(&event_node->work);
+               list_del(&event_node->list);
+               kfree(event_node);
+       }
+}
+
+static void qede_roce_destroy_wq(struct qede_dev *edev)
+{
+       qede_roce_cleanup_event(edev);
+       destroy_workqueue(edev->rdma_info.roce_wq);
+}
+
+int qede_roce_dev_add(struct qede_dev *edev)
+{
+       int rc = 0;
+
+       if (qede_roce_supported(edev)) {
+               rc = qede_roce_create_wq(edev);
+               if (rc)
+                       return rc;
+
+               INIT_LIST_HEAD(&edev->rdma_info.entry);
+               mutex_lock(&qedr_dev_list_lock);
+               list_add_tail(&edev->rdma_info.entry, &qedr_dev_list);
+               _qede_roce_dev_add(edev);
+               mutex_unlock(&qedr_dev_list_lock);
+       }
+
+       return rc;
+}
+
+static void _qede_roce_dev_remove(struct qede_dev *edev)
+{
+       if (qedr_drv && qedr_drv->remove && edev->rdma_info.qedr_dev)
+               qedr_drv->remove(edev->rdma_info.qedr_dev);
+       edev->rdma_info.qedr_dev = NULL;
+}
+
+void qede_roce_dev_remove(struct qede_dev *edev)
+{
+       if (!qede_roce_supported(edev))
+               return;
+
+       qede_roce_destroy_wq(edev);
+       mutex_lock(&qedr_dev_list_lock);
+       _qede_roce_dev_remove(edev);
+       list_del(&edev->rdma_info.entry);
+       mutex_unlock(&qedr_dev_list_lock);
+}
+
+static void _qede_roce_dev_open(struct qede_dev *edev)
+{
+       if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
+               qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_UP);
+}
+
+static void qede_roce_dev_open(struct qede_dev *edev)
+{
+       if (!qede_roce_supported(edev))
+               return;
+
+       mutex_lock(&qedr_dev_list_lock);
+       _qede_roce_dev_open(edev);
+       mutex_unlock(&qedr_dev_list_lock);
+}
+
+static void _qede_roce_dev_close(struct qede_dev *edev)
+{
+       if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
+               qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_DOWN);
+}
+
+static void qede_roce_dev_close(struct qede_dev *edev)
+{
+       if (!qede_roce_supported(edev))
+               return;
+
+       mutex_lock(&qedr_dev_list_lock);
+       _qede_roce_dev_close(edev);
+       mutex_unlock(&qedr_dev_list_lock);
+}
+
+static void qede_roce_dev_shutdown(struct qede_dev *edev)
+{
+       if (!qede_roce_supported(edev))
+               return;
+
+       mutex_lock(&qedr_dev_list_lock);
+       if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
+               qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CLOSE);
+       mutex_unlock(&qedr_dev_list_lock);
+}
+
+int qede_roce_register_driver(struct qedr_driver *drv)
+{
+       struct qede_dev *edev;
+       u8 qedr_counter = 0;
+
+       mutex_lock(&qedr_dev_list_lock);
+       if (qedr_drv) {
+               mutex_unlock(&qedr_dev_list_lock);
+               return -EINVAL;
+       }
+       qedr_drv = drv;
+
+       list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) {
+               struct net_device *ndev;
+
+               qedr_counter++;
+               _qede_roce_dev_add(edev);
+               ndev = edev->ndev;
+               if (netif_running(ndev) && netif_oper_up(ndev))
+                       _qede_roce_dev_open(edev);
+       }
+       mutex_unlock(&qedr_dev_list_lock);
+
+       pr_notice("qedr: discovered and registered %d RoCE funcs\n",
+                 qedr_counter);
+
+       return 0;
+}
+EXPORT_SYMBOL(qede_roce_register_driver);
+
+void qede_roce_unregister_driver(struct qedr_driver *drv)
+{
+       struct qede_dev *edev;
+
+       mutex_lock(&qedr_dev_list_lock);
+       list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) {
+               if (edev->rdma_info.qedr_dev)
+                       _qede_roce_dev_remove(edev);
+       }
+       qedr_drv = NULL;
+       mutex_unlock(&qedr_dev_list_lock);
+}
+EXPORT_SYMBOL(qede_roce_unregister_driver);
+
+static void qede_roce_changeaddr(struct qede_dev *edev)
+{
+       if (!qede_roce_supported(edev))
+               return;
+
+       if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
+               qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CHANGE_ADDR);
+}
+
+struct qede_roce_event_work *qede_roce_get_free_event_node(struct qede_dev
+                                                          *edev)
+{
+       struct qede_roce_event_work *event_node = NULL;
+       struct list_head *list_node = NULL;
+       bool found = false;
+
+       list_for_each(list_node, &edev->rdma_info.roce_event_list) {
+               event_node = list_entry(list_node, struct qede_roce_event_work,
+                                       list);
+               if (!work_pending(&event_node->work)) {
+                       found = true;
+                       break;
+               }
+       }
+
+       if (!found) {
+               event_node = kzalloc(sizeof(*event_node), GFP_KERNEL);
+               if (!event_node) {
+                       DP_NOTICE(edev,
+                                 "qedr: Could not allocate memory for roce work\n");
+                       return NULL;
+               }
+               list_add_tail(&event_node->list,
+                             &edev->rdma_info.roce_event_list);
+       }
+
+       return event_node;
+}
+
+static void qede_roce_handle_event(struct work_struct *work)
+{
+       struct qede_roce_event_work *event_node;
+       enum qede_roce_event event;
+       struct qede_dev *edev;
+
+       event_node = container_of(work, struct qede_roce_event_work, work);
+       event = event_node->event;
+       edev = event_node->ptr;
+
+       switch (event) {
+       case QEDE_UP:
+               qede_roce_dev_open(edev);
+               break;
+       case QEDE_DOWN:
+               qede_roce_dev_close(edev);
+               break;
+       case QEDE_CLOSE:
+               qede_roce_dev_shutdown(edev);
+               break;
+       case QEDE_CHANGE_ADDR:
+               qede_roce_changeaddr(edev);
+               break;
+       default:
+               DP_NOTICE(edev, "Invalid roce event %d", event);
+       }
+}
+
+static void qede_roce_add_event(struct qede_dev *edev,
+                               enum qede_roce_event event)
+{
+       struct qede_roce_event_work *event_node;
+
+       if (!edev->rdma_info.qedr_dev)
+               return;
+
+       event_node = qede_roce_get_free_event_node(edev);
+       if (!event_node)
+               return;
+
+       event_node->event = event;
+       event_node->ptr = edev;
+
+       INIT_WORK(&event_node->work, qede_roce_handle_event);
+       queue_work(edev->rdma_info.roce_wq, &event_node->work);
+}
+
+void qede_roce_dev_event_open(struct qede_dev *edev)
+{
+       qede_roce_add_event(edev, QEDE_UP);
+}
+
+void qede_roce_dev_event_close(struct qede_dev *edev)
+{
+       qede_roce_add_event(edev, QEDE_DOWN);
+}
+
+void qede_roce_event_changeaddr(struct qede_dev *edev)
+{
+       qede_roce_add_event(edev, QEDE_CHANGE_ADDR);
+}
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
new file mode 100644 (file)
index 0000000..bb812db
--- /dev/null
@@ -0,0 +1,190 @@
+/* QLogic FCoE Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#include "drv_fcoe_fw_funcs.h"
+#include "drv_scsi_fw_funcs.h"
+
+#define FCOE_RX_ID ((u32)0x0000FFFF)
+
+static inline void init_common_sqe(struct fcoe_task_params *task_params,
+                                  enum fcoe_sqe_request_type request_type)
+{
+       memset(task_params->sqe, 0, sizeof(*(task_params->sqe)));
+       SET_FIELD(task_params->sqe->flags, FCOE_WQE_REQ_TYPE,
+                 request_type);
+       task_params->sqe->task_id = task_params->itid;
+}
+
+int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
+                               struct scsi_sgl_task_params *sgl_task_params,
+                               struct regpair sense_data_buffer_phys_addr,
+                               u32 task_retry_id,
+                               u8 fcp_cmd_payload[32])
+{
+       struct fcoe_task_context *ctx = task_params->context;
+       struct ystorm_fcoe_task_st_ctx *y_st_ctx;
+       struct tstorm_fcoe_task_st_ctx *t_st_ctx;
+       struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
+       struct mstorm_fcoe_task_st_ctx *m_st_ctx;
+       u32 io_size, val;
+       bool slow_sgl;
+
+       memset(ctx, 0, sizeof(*(ctx)));
+       slow_sgl = scsi_is_slow_sgl(sgl_task_params->num_sges,
+                                   sgl_task_params->small_mid_sge);
+       io_size = (task_params->task_type == FCOE_TASK_TYPE_WRITE_INITIATOR ?
+                  task_params->tx_io_size : task_params->rx_io_size);
+
+       /* Ystorm ctx */
+       y_st_ctx = &ctx->ystorm_st_context;
+       y_st_ctx->data_2_trns_rem = cpu_to_le32(io_size);
+       y_st_ctx->task_rety_identifier = cpu_to_le32(task_retry_id);
+       y_st_ctx->task_type = task_params->task_type;
+       memcpy(&y_st_ctx->tx_info_union.fcp_cmd_payload,
+              fcp_cmd_payload, sizeof(struct fcoe_fcp_cmd_payload));
+
+       /* Tstorm ctx */
+       t_st_ctx = &ctx->tstorm_st_context;
+       t_st_ctx->read_only.dev_type = (task_params->is_tape_device == 1 ?
+                                       FCOE_TASK_DEV_TYPE_TAPE :
+                                       FCOE_TASK_DEV_TYPE_DISK);
+       t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid);
+       val = cpu_to_le32(task_params->cq_rss_number);
+       t_st_ctx->read_only.glbl_q_num = val;
+       t_st_ctx->read_only.fcp_cmd_trns_size = cpu_to_le32(io_size);
+       t_st_ctx->read_only.task_type = task_params->task_type;
+       SET_FIELD(t_st_ctx->read_write.flags,
+                 FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1);
+       t_st_ctx->read_write.rx_id = cpu_to_le32(FCOE_RX_ID);
+
+       /* Ustorm ctx */
+       u_ag_ctx = &ctx->ustorm_ag_context;
+       u_ag_ctx->global_cq_num = cpu_to_le32(task_params->cq_rss_number);
+
+       /* Mstorm buffer for sense/rsp data placement */
+       m_st_ctx = &ctx->mstorm_st_context;
+       val = cpu_to_le32(sense_data_buffer_phys_addr.hi);
+       m_st_ctx->rsp_buf_addr.hi = val;
+       val = cpu_to_le32(sense_data_buffer_phys_addr.lo);
+       m_st_ctx->rsp_buf_addr.lo = val;
+
+       if (task_params->task_type == FCOE_TASK_TYPE_WRITE_INITIATOR) {
+               /* Ystorm ctx */
+               y_st_ctx->expect_first_xfer = 1;
+
+               /* Set the amount of super SGEs. Can be up to 4. */
+               SET_FIELD(y_st_ctx->sgl_mode,
+                         YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
+                         (slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL));
+               init_scsi_sgl_context(&y_st_ctx->sgl_params,
+                                     &y_st_ctx->data_desc,
+                                     sgl_task_params);
+
+               /* Mstorm ctx */
+               SET_FIELD(m_st_ctx->flags,
+                         MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
+                         (slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL));
+       } else {
+               /* Tstorm ctx */
+               SET_FIELD(t_st_ctx->read_write.flags,
+                         FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE,
+                         (slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL));
+
+               /* Mstorm ctx */
+               m_st_ctx->data_2_trns_rem = cpu_to_le32(io_size);
+               init_scsi_sgl_context(&m_st_ctx->sgl_params,
+                                     &m_st_ctx->data_desc,
+                                     sgl_task_params);
+       }
+
+       init_common_sqe(task_params, SEND_FCOE_CMD);
+       return 0;
+}
+
+int init_initiator_midpath_unsolicited_fcoe_task(
+       struct fcoe_task_params *task_params,
+       struct fcoe_tx_mid_path_params *mid_path_fc_header,
+       struct scsi_sgl_task_params *tx_sgl_task_params,
+       struct scsi_sgl_task_params *rx_sgl_task_params,
+       u8 fw_to_place_fc_header)
+{
+       struct fcoe_task_context *ctx = task_params->context;
+       struct ystorm_fcoe_task_st_ctx *y_st_ctx;
+       struct tstorm_fcoe_task_st_ctx *t_st_ctx;
+       struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
+       struct mstorm_fcoe_task_st_ctx *m_st_ctx;
+       u32 val;
+
+       memset(ctx, 0, sizeof(*(ctx)));
+
+       /* Init Ystorm */
+       y_st_ctx = &ctx->ystorm_st_context;
+       init_scsi_sgl_context(&y_st_ctx->sgl_params,
+                             &y_st_ctx->data_desc,
+                             tx_sgl_task_params);
+       SET_FIELD(y_st_ctx->sgl_mode,
+                 YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE, SCSI_FAST_SGL);
+       y_st_ctx->data_2_trns_rem = cpu_to_le32(task_params->tx_io_size);
+       y_st_ctx->task_type = task_params->task_type;
+       memcpy(&y_st_ctx->tx_info_union.tx_params.mid_path,
+              mid_path_fc_header, sizeof(struct fcoe_tx_mid_path_params));
+
+       /* Init Mstorm */
+       m_st_ctx = &ctx->mstorm_st_context;
+       init_scsi_sgl_context(&m_st_ctx->sgl_params,
+                             &m_st_ctx->data_desc,
+                             rx_sgl_task_params);
+       SET_FIELD(m_st_ctx->flags,
+                 MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER,
+                 fw_to_place_fc_header);
+       m_st_ctx->data_2_trns_rem = cpu_to_le32(task_params->rx_io_size);
+
+       /* Init Tstorm */
+       t_st_ctx = &ctx->tstorm_st_context;
+       t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid);
+       val = cpu_to_le32(task_params->cq_rss_number);
+       t_st_ctx->read_only.glbl_q_num = val;
+       t_st_ctx->read_only.task_type = task_params->task_type;
+       SET_FIELD(t_st_ctx->read_write.flags,
+                 FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1);
+       t_st_ctx->read_write.rx_id = cpu_to_le32(FCOE_RX_ID);
+
+       /* Init Ustorm */
+       u_ag_ctx = &ctx->ustorm_ag_context;
+       u_ag_ctx->global_cq_num = cpu_to_le32(task_params->cq_rss_number);
+
+       /* Init SQE */
+       init_common_sqe(task_params, SEND_FCOE_MIDPATH);
+       task_params->sqe->additional_info_union.burst_length =
+                                   tx_sgl_task_params->total_buffer_size;
+       SET_FIELD(task_params->sqe->flags,
+                 FCOE_WQE_NUM_SGES, tx_sgl_task_params->num_sges);
+       SET_FIELD(task_params->sqe->flags, FCOE_WQE_SGL_MODE,
+                 SCSI_FAST_SGL);
+
+       return 0;
+}
+
+int init_initiator_abort_fcoe_task(struct fcoe_task_params *task_params)
+{
+       init_common_sqe(task_params, SEND_FCOE_ABTS_REQUEST);
+       return 0;
+}
+
+int init_initiator_cleanup_fcoe_task(struct fcoe_task_params *task_params)
+{
+       init_common_sqe(task_params, FCOE_EXCHANGE_CLEANUP);
+       return 0;
+}
+
+int init_initiator_sequence_recovery_fcoe_task(
+       struct fcoe_task_params *task_params, u32 off)
+{
+       init_common_sqe(task_params, FCOE_SEQUENCE_RECOVERY);
+       task_params->sqe->additional_info_union.seq_rec_updated_offset = off;
+       return 0;
+}
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
new file mode 100644 (file)
index 0000000..617529b
--- /dev/null
@@ -0,0 +1,93 @@
+/* QLogic FCoE Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#ifndef _FCOE_FW_FUNCS_H
+#define _FCOE_FW_FUNCS_H
+#include "drv_scsi_fw_funcs.h"
+#include "qedf_hsi.h"
+#include <linux/qed/qed_if.h>
+
+struct fcoe_task_params {
+       /* Output parameter [set/filled by the HSI function] */
+       struct fcoe_task_context *context;
+
+       /* Output parameter [set/filled by the HSI function] */
+       struct fcoe_wqe *sqe;
+       enum fcoe_task_type task_type;
+       u32 tx_io_size; /* in bytes */
+       u32 rx_io_size; /* in bytes */
+       u32 conn_cid;
+       u16 itid;
+       u8 cq_rss_number;
+
+        /* Whether it's Tape device or not (0=Disk, 1=Tape) */
+       u8 is_tape_device;
+};
+
+/**
+ * @brief init_initiator_rw_fcoe_task - Initializes FCoE task context for
+ * read/write task types and init fcoe_sqe
+ *
+ * @param task_params - Pointer to task parameters struct
+ * @param sgl_task_params - Pointer to SGL task params
+ * @param sense_data_buffer_phys_addr - Pointer to sense data buffer
+ * @param task_retry_id - retry identification - Used only for Tape device
+ * @param fcp_cmnd_payload - FCP CMD Payload
+ */
+int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
+       struct scsi_sgl_task_params *sgl_task_params,
+       struct regpair sense_data_buffer_phys_addr,
+       u32 task_retry_id,
+       u8 fcp_cmd_payload[32]);
+
+/**
+ * @brief init_initiator_midpath_fcoe_task - Initializes FCoE task context for
+ * midpath/unsolicited task types and init fcoe_sqe
+ *
+ * @param task_params - Pointer to task parameters struct
+ * @param mid_path_fc_header - FC header
+ * @param tx_sgl_task_params - Pointer to Tx SGL task params
+ * @param rx_sgl_task_params - Pointer to Rx SGL task params
+ * @param fw_to_place_fc_header        - Indication if the FW will place the FC header
+ * in addition to the data arrives.
+ */
+int init_initiator_midpath_unsolicited_fcoe_task(
+       struct fcoe_task_params *task_params,
+       struct fcoe_tx_mid_path_params *mid_path_fc_header,
+       struct scsi_sgl_task_params *tx_sgl_task_params,
+       struct scsi_sgl_task_params *rx_sgl_task_params,
+       u8 fw_to_place_fc_header);
+
+/**
+ * @brief init_initiator_abort_fcoe_task - Initializes FCoE task context for
+ * abort task types and init fcoe_sqe
+ *
+ * @param task_params - Pointer to task parameters struct
+ */
+int init_initiator_abort_fcoe_task(struct fcoe_task_params *task_params);
+
+/**
+ * @brief init_initiator_cleanup_fcoe_task - Initializes FCoE task context for
+ * cleanup task types and init fcoe_sqe
+ *
+ *
+ * @param task_params - Pointer to task parameters struct
+ */
+int init_initiator_cleanup_fcoe_task(struct fcoe_task_params *task_params);
+
+/**
+ * @brief init_initiator_cleanup_fcoe_task - Initializes FCoE task context for
+ * sequence recovery task types and init fcoe_sqe
+ *
+ *
+ * @param task_params - Pointer to task parameters struct
+ * @param desired_offset - The desired offest the task will be re-sent from
+ */
+int init_initiator_sequence_recovery_fcoe_task(
+       struct fcoe_task_params *task_params,
+       u32 desired_offset);
+#endif
diff --git a/drivers/scsi/qedf/drv_scsi_fw_funcs.c b/drivers/scsi/qedf/drv_scsi_fw_funcs.c
new file mode 100644 (file)
index 0000000..11e0cc0
--- /dev/null
@@ -0,0 +1,44 @@
+/* QLogic FCoE Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#include "drv_scsi_fw_funcs.h"
+
+#define SCSI_NUM_SGES_IN_CACHE 0x4
+
+bool scsi_is_slow_sgl(u16 num_sges, bool small_mid_sge)
+{
+       return (num_sges > SCSI_NUM_SGES_SLOW_SGL_THR && small_mid_sge);
+}
+
+void init_scsi_sgl_context(struct scsi_sgl_params *ctx_sgl_params,
+                          struct scsi_cached_sges *ctx_data_desc,
+                          struct scsi_sgl_task_params *sgl_task_params)
+{
+       /* no need to check for sgl_task_params->sgl validity */
+       u8 num_sges_to_init = sgl_task_params->num_sges >
+                             SCSI_NUM_SGES_IN_CACHE ? SCSI_NUM_SGES_IN_CACHE :
+                             sgl_task_params->num_sges;
+       u8 sge_index;
+       u32 val;
+
+       val = cpu_to_le32(sgl_task_params->sgl_phys_addr.lo);
+       ctx_sgl_params->sgl_addr.lo = val;
+       val = cpu_to_le32(sgl_task_params->sgl_phys_addr.hi);
+       ctx_sgl_params->sgl_addr.hi = val;
+       val = cpu_to_le32(sgl_task_params->total_buffer_size);
+       ctx_sgl_params->sgl_total_length = val;
+       ctx_sgl_params->sgl_num_sges = cpu_to_le16(sgl_task_params->num_sges);
+
+       for (sge_index = 0; sge_index < num_sges_to_init; sge_index++) {
+               val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.lo);
+               ctx_data_desc->sge[sge_index].sge_addr.lo = val;
+               val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.hi);
+               ctx_data_desc->sge[sge_index].sge_addr.hi = val;
+               val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_len);
+               ctx_data_desc->sge[sge_index].sge_len = val;
+       }
+}
diff --git a/drivers/scsi/qedf/drv_scsi_fw_funcs.h b/drivers/scsi/qedf/drv_scsi_fw_funcs.h
new file mode 100644 (file)
index 0000000..9cb4541
--- /dev/null
@@ -0,0 +1,85 @@
+/* QLogic FCoE Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#ifndef _SCSI_FW_FUNCS_H
+#define _SCSI_FW_FUNCS_H
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/storage_common.h>
+#include <linux/qed/fcoe_common.h>
+
+struct scsi_sgl_task_params {
+       struct scsi_sge *sgl;
+       struct regpair sgl_phys_addr;
+       u32 total_buffer_size;
+       u16 num_sges;
+
+        /* true if SGL contains a small (< 4KB) SGE in middle(not 1st or last)
+         * -> relevant for tx only
+         */
+       bool small_mid_sge;
+};
+
+struct scsi_dif_task_params {
+       u32 initial_ref_tag;
+       bool initial_ref_tag_is_valid;
+       u16 application_tag;
+       u16 application_tag_mask;
+       u16 dif_block_size_log;
+       bool dif_on_network;
+       bool dif_on_host;
+       u8 host_guard_type;
+       u8 protection_type;
+       u8 ref_tag_mask;
+       bool crc_seed;
+
+        /* Enable Connection error upon DIF error (segments with DIF errors are
+         * dropped)
+         */
+       bool tx_dif_conn_err_en;
+       bool ignore_app_tag;
+       bool keep_ref_tag_const;
+       bool validate_guard;
+       bool validate_app_tag;
+       bool validate_ref_tag;
+       bool forward_guard;
+       bool forward_app_tag;
+       bool forward_ref_tag;
+       bool forward_app_tag_with_mask;
+       bool forward_ref_tag_with_mask;
+};
+
+struct scsi_initiator_cmd_params {
+        /* for cdb_size > default CDB size (extended CDB > 16 bytes) ->
+         * pointer to the CDB buffer SGE
+         */
+       struct scsi_sge extended_cdb_sge;
+
+       /* Physical address of sense data buffer for sense data - 256B buffer */
+       struct regpair sense_data_buffer_phys_addr;
+};
+
+/**
+ * @brief scsi_is_slow_sgl - checks for slow SGL
+ *
+ * @param num_sges - number of sges in SGL
+ * @param small_mid_sge - True is the SGL contains an SGE which is smaller than
+ * 4KB and its not the 1st or last SGE in the SGL
+ */
+bool scsi_is_slow_sgl(u16 num_sges, bool small_mid_sge);
+
+/**
+ * @brief init_scsi_sgl_context - initializes SGL task context
+ *
+ * @param sgl_params - SGL context parameters to initialize (output parameter)
+ * @param data_desc - context struct containing SGEs array to set (output
+ * parameter)
+ * @param sgl_task_params - SGL parameters (input)
+ */
+void init_scsi_sgl_context(struct scsi_sgl_params *sgl_params,
+       struct scsi_cached_sges *ctx_data_desc,
+       struct scsi_sgl_task_params *sgl_task_params);
+#endif
diff --git a/drivers/scsi/qedi/qedi_fw_api.c b/drivers/scsi/qedi/qedi_fw_api.c
new file mode 100644 (file)
index 0000000..fd354d4
--- /dev/null
@@ -0,0 +1,781 @@
+/* QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include "qedi_hsi.h"
+#include <linux/qed/qed_if.h>
+
+#include "qedi_fw_iscsi.h"
+#include "qedi_fw_scsi.h"
+
+#define SCSI_NUM_SGES_IN_CACHE 0x4
+
+static bool scsi_is_slow_sgl(u16 num_sges, bool small_mid_sge)
+{
+       return (num_sges > SCSI_NUM_SGES_SLOW_SGL_THR && small_mid_sge);
+}
+
+static
+void init_scsi_sgl_context(struct scsi_sgl_params *ctx_sgl_params,
+                          struct scsi_cached_sges *ctx_data_desc,
+                          struct scsi_sgl_task_params *sgl_task_params)
+{
+       u8 sge_index;
+       u8 num_sges;
+       u32 val;
+
+       num_sges = (sgl_task_params->num_sges > SCSI_NUM_SGES_IN_CACHE) ?
+                            SCSI_NUM_SGES_IN_CACHE : sgl_task_params->num_sges;
+
+       /* sgl params */
+       val = cpu_to_le32(sgl_task_params->sgl_phys_addr.lo);
+       ctx_sgl_params->sgl_addr.lo = val;
+       val = cpu_to_le32(sgl_task_params->sgl_phys_addr.hi);
+       ctx_sgl_params->sgl_addr.hi = val;
+       val = cpu_to_le32(sgl_task_params->total_buffer_size);
+       ctx_sgl_params->sgl_total_length = val;
+       ctx_sgl_params->sgl_num_sges = cpu_to_le16(sgl_task_params->num_sges);
+
+       for (sge_index = 0; sge_index < num_sges; sge_index++) {
+               val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.lo);
+               ctx_data_desc->sge[sge_index].sge_addr.lo = val;
+               val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.hi);
+               ctx_data_desc->sge[sge_index].sge_addr.hi = val;
+               val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_len);
+               ctx_data_desc->sge[sge_index].sge_len = val;
+       }
+}
+
+static u32 calc_rw_task_size(struct iscsi_task_params *task_params,
+                            enum iscsi_task_type task_type,
+                            struct scsi_sgl_task_params *sgl_task_params,
+                            struct scsi_dif_task_params *dif_task_params)
+{
+       u32 io_size;
+
+       if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE ||
+           task_type == ISCSI_TASK_TYPE_TARGET_READ)
+               io_size = task_params->tx_io_size;
+       else
+               io_size = task_params->rx_io_size;
+
+       if (!io_size)
+               return 0;
+
+       if (!dif_task_params)
+               return io_size;
+
+       return !dif_task_params->dif_on_network ?
+              io_size : sgl_task_params->total_buffer_size;
+}
+
+static void
+init_dif_context_flags(struct iscsi_dif_flags *ctx_dif_flags,
+                      struct scsi_dif_task_params *dif_task_params)
+{
+       if (!dif_task_params)
+               return;
+
+       SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG,
+                 dif_task_params->dif_block_size_log);
+       SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_DIF_TO_PEER,
+                 dif_task_params->dif_on_network ? 1 : 0);
+       SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_HOST_INTERFACE,
+                 dif_task_params->dif_on_host ? 1 : 0);
+}
+
+static void init_sqe(struct iscsi_task_params *task_params,
+                    struct scsi_sgl_task_params *sgl_task_params,
+                    struct scsi_dif_task_params *dif_task_params,
+                    struct iscsi_common_hdr *pdu_header,
+                    struct scsi_initiator_cmd_params *cmd_params,
+                    enum iscsi_task_type task_type,
+                    bool is_cleanup)
+{
+       if (!task_params->sqe)
+               return;
+
+       memset(task_params->sqe, 0, sizeof(*task_params->sqe));
+       task_params->sqe->task_id = cpu_to_le16(task_params->itid);
+       if (is_cleanup) {
+               SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
+                         ISCSI_WQE_TYPE_TASK_CLEANUP);
+               return;
+       }
+
+       switch (task_type) {
+       case ISCSI_TASK_TYPE_INITIATOR_WRITE:
+       {
+               u32 buf_size = 0;
+               u32 num_sges = 0;
+
+               init_dif_context_flags(&task_params->sqe->prot_flags,
+                                      dif_task_params);
+
+               SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
+                         ISCSI_WQE_TYPE_NORMAL);
+
+               if (task_params->tx_io_size) {
+                       buf_size = calc_rw_task_size(task_params, task_type,
+                                                    sgl_task_params,
+                                                    dif_task_params);
+
+               if (scsi_is_slow_sgl(sgl_task_params->num_sges,
+                                    sgl_task_params->small_mid_sge))
+                       num_sges = ISCSI_WQE_NUM_SGES_SLOWIO;
+               else
+                       num_sges = min(sgl_task_params->num_sges,
+                                      (u16)SCSI_NUM_SGES_SLOW_SGL_THR);
+       }
+
+       SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES, num_sges);
+       SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CONT_LEN,
+                 buf_size);
+
+       if (GET_FIELD(pdu_header->hdr_second_dword,
+                     ISCSI_CMD_HDR_TOTAL_AHS_LEN))
+               SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CDB_SIZE,
+                         cmd_params->extended_cdb_sge.sge_len);
+       }
+               break;
+       case ISCSI_TASK_TYPE_INITIATOR_READ:
+               SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
+                         ISCSI_WQE_TYPE_NORMAL);
+
+               if (GET_FIELD(pdu_header->hdr_second_dword,
+                             ISCSI_CMD_HDR_TOTAL_AHS_LEN))
+                       SET_FIELD(task_params->sqe->contlen_cdbsize,
+                                 ISCSI_WQE_CDB_SIZE,
+                                 cmd_params->extended_cdb_sge.sge_len);
+               break;
+       case ISCSI_TASK_TYPE_LOGIN_RESPONSE:
+       case ISCSI_TASK_TYPE_MIDPATH:
+       {
+               bool advance_statsn = true;
+
+               if (task_type == ISCSI_TASK_TYPE_LOGIN_RESPONSE)
+                       SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
+                                 ISCSI_WQE_TYPE_LOGIN);
+               else
+                       SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
+                                 ISCSI_WQE_TYPE_MIDDLE_PATH);
+
+               if (task_type == ISCSI_TASK_TYPE_MIDPATH) {
+                       u8 opcode = GET_FIELD(pdu_header->hdr_first_byte,
+                                             ISCSI_COMMON_HDR_OPCODE);
+
+                       if (opcode != ISCSI_OPCODE_TEXT_RESPONSE &&
+                           (opcode != ISCSI_OPCODE_NOP_IN ||
+                           pdu_header->itt == ISCSI_TTT_ALL_ONES))
+                               advance_statsn = false;
+               }
+
+               SET_FIELD(task_params->sqe->flags, ISCSI_WQE_RESPONSE,
+                         advance_statsn ? 1 : 0);
+
+               if (task_params->tx_io_size) {
+                       SET_FIELD(task_params->sqe->contlen_cdbsize,
+                                 ISCSI_WQE_CONT_LEN, task_params->tx_io_size);
+
+               if (scsi_is_slow_sgl(sgl_task_params->num_sges,
+                                    sgl_task_params->small_mid_sge))
+                       SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES,
+                                 ISCSI_WQE_NUM_SGES_SLOWIO);
+               else
+                       SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES,
+                                 min(sgl_task_params->num_sges,
+                                     (u16)SCSI_NUM_SGES_SLOW_SGL_THR));
+               }
+       }
+               break;
+       default:
+               break;
+       }
+}
+
+static void init_default_iscsi_task(struct iscsi_task_params *task_params,
+                                   struct data_hdr *pdu_header,
+                                   enum iscsi_task_type task_type)
+{
+       struct iscsi_task_context *context;
+       u16 index;
+       u32 val;
+
+       context = task_params->context;
+       memset(context, 0, sizeof(*context));
+
+       for (index = 0; index <
+            ARRAY_SIZE(context->ystorm_st_context.pdu_hdr.data.data);
+            index++) {
+               val = cpu_to_le32(pdu_header->data[index]);
+               context->ystorm_st_context.pdu_hdr.data.data[index] = val;
+       }
+
+       context->mstorm_st_context.task_type = task_type;
+       context->mstorm_ag_context.task_cid =
+                                           cpu_to_le16(task_params->conn_icid);
+
+       SET_FIELD(context->ustorm_ag_context.flags1,
+                 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+
+       context->ustorm_st_context.task_type = task_type;
+       context->ustorm_st_context.cq_rss_number = task_params->cq_rss_number;
+       context->ustorm_ag_context.icid = cpu_to_le16(task_params->conn_icid);
+}
+
+static
+void init_initiator_rw_cdb_ystorm_context(struct ystorm_iscsi_task_st_ctx *ystc,
+                                         struct scsi_initiator_cmd_params *cmd)
+{
+       union iscsi_task_hdr *ctx_pdu_hdr = &ystc->pdu_hdr;
+       u32 val;
+
+       if (!cmd->extended_cdb_sge.sge_len)
+               return;
+
+       SET_FIELD(ctx_pdu_hdr->ext_cdb_cmd.hdr_second_dword,
+                 ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE,
+                 cmd->extended_cdb_sge.sge_len);
+       val = cpu_to_le32(cmd->extended_cdb_sge.sge_addr.lo);
+       ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_addr.lo = val;
+       val = cpu_to_le32(cmd->extended_cdb_sge.sge_addr.hi);
+       ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_addr.hi = val;
+       val = cpu_to_le32(cmd->extended_cdb_sge.sge_len);
+       ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_len  = val;
+}
+
+static
+void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt,
+                              struct ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt,
+                              u32 remaining_recv_len,
+                              u32 expected_data_transfer_len,
+                              u8 num_sges, bool tx_dif_conn_err_en)
+{
+       u32 val;
+
+       ustorm_st_cxt->rem_rcv_len = cpu_to_le32(remaining_recv_len);
+       ustorm_ag_cxt->exp_data_acked = cpu_to_le32(expected_data_transfer_len);
+       val = cpu_to_le32(expected_data_transfer_len);
+       ustorm_st_cxt->exp_data_transfer_len = val;
+       SET_FIELD(ustorm_st_cxt->reg1.reg1_map, ISCSI_REG1_NUM_SGES, num_sges);
+       SET_FIELD(ustorm_ag_cxt->flags2,
+                 USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN,
+                 tx_dif_conn_err_en ? 1 : 0);
+}
+
+static
+void set_rw_exp_data_acked_and_cont_len(struct iscsi_task_context *context,
+                                       struct iscsi_conn_params  *conn_params,
+                                       enum iscsi_task_type task_type,
+                                       u32 task_size,
+                                       u32 exp_data_transfer_len,
+                                       u8 total_ahs_length)
+{
+       u32 max_unsolicited_data = 0, val;
+
+       if (total_ahs_length &&
+           (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE ||
+            task_type == ISCSI_TASK_TYPE_INITIATOR_READ))
+               SET_FIELD(context->ustorm_st_context.flags2,
+                         USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST, 1);
+
+       switch (task_type) {
+       case ISCSI_TASK_TYPE_INITIATOR_WRITE:
+               if (!conn_params->initial_r2t)
+                       max_unsolicited_data = conn_params->first_burst_length;
+               else if (conn_params->immediate_data)
+                       max_unsolicited_data =
+                                         min(conn_params->first_burst_length,
+                                             conn_params->max_send_pdu_length);
+
+               context->ustorm_ag_context.exp_data_acked =
+                                  cpu_to_le32(total_ahs_length == 0 ?
+                                               min(exp_data_transfer_len,
+                                                   max_unsolicited_data) :
+                                               ((u32)(total_ahs_length +
+                                                      ISCSI_AHS_CNTL_SIZE)));
+               break;
+       case ISCSI_TASK_TYPE_TARGET_READ:
+               val = cpu_to_le32(exp_data_transfer_len);
+               context->ustorm_ag_context.exp_data_acked = val;
+               break;
+       case ISCSI_TASK_TYPE_INITIATOR_READ:
+               context->ustorm_ag_context.exp_data_acked =
+                                       cpu_to_le32((total_ahs_length == 0 ? 0 :
+                                                    total_ahs_length +
+                                                    ISCSI_AHS_CNTL_SIZE));
+               break;
+       case ISCSI_TASK_TYPE_TARGET_WRITE:
+               val = cpu_to_le32(task_size);
+               context->ustorm_ag_context.exp_cont_len = val;
+               break;
+       default:
+               break;
+       }
+}
+
+static
+void init_rtdif_task_context(struct rdif_task_context *rdif_context,
+                            struct tdif_task_context *tdif_context,
+                            struct scsi_dif_task_params *dif_task_params,
+                            enum iscsi_task_type task_type)
+{
+       u32 val;
+
+       if (!dif_task_params->dif_on_network || !dif_task_params->dif_on_host)
+               return;
+
+       if (task_type == ISCSI_TASK_TYPE_TARGET_WRITE ||
+           task_type == ISCSI_TASK_TYPE_INITIATOR_READ) {
+               rdif_context->app_tag_value =
+                                 cpu_to_le16(dif_task_params->application_tag);
+               rdif_context->partial_crc_value = cpu_to_le16(0xffff);
+               val = cpu_to_le32(dif_task_params->initial_ref_tag);
+               rdif_context->initial_ref_tag = val;
+               rdif_context->app_tag_mask =
+                            cpu_to_le16(dif_task_params->application_tag_mask);
+               SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_CRC_SEED,
+                         dif_task_params->crc_seed ? 1 : 0);
+               SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_HOSTGUARDTYPE,
+                         dif_task_params->host_guard_type);
+               SET_FIELD(rdif_context->flags0,
+                         RDIF_TASK_CONTEXT_PROTECTIONTYPE,
+                         dif_task_params->protection_type);
+               SET_FIELD(rdif_context->flags0,
+                         RDIF_TASK_CONTEXT_INITIALREFTAGVALID, 1);
+               SET_FIELD(rdif_context->flags0,
+                         RDIF_TASK_CONTEXT_KEEPREFTAGCONST,
+                         dif_task_params->keep_ref_tag_const ? 1 : 0);
+               SET_FIELD(rdif_context->flags1,
+                         RDIF_TASK_CONTEXT_VALIDATEAPPTAG,
+                         (dif_task_params->validate_app_tag &&
+                         dif_task_params->dif_on_network) ? 1 : 0);
+               SET_FIELD(rdif_context->flags1,
+                         RDIF_TASK_CONTEXT_VALIDATEGUARD,
+                         (dif_task_params->validate_guard &&
+                         dif_task_params->dif_on_network) ? 1 : 0);
+               SET_FIELD(rdif_context->flags1,
+                         RDIF_TASK_CONTEXT_VALIDATEREFTAG,
+                         (dif_task_params->validate_ref_tag &&
+                         dif_task_params->dif_on_network) ? 1 : 0);
+               SET_FIELD(rdif_context->flags1,
+                         RDIF_TASK_CONTEXT_HOSTINTERFACE,
+                         dif_task_params->dif_on_host ? 1 : 0);
+               SET_FIELD(rdif_context->flags1,
+                         RDIF_TASK_CONTEXT_NETWORKINTERFACE,
+                         dif_task_params->dif_on_network ? 1 : 0);
+               SET_FIELD(rdif_context->flags1,
+                         RDIF_TASK_CONTEXT_FORWARDGUARD,
+                         dif_task_params->forward_guard ? 1 : 0);
+               SET_FIELD(rdif_context->flags1,
+                         RDIF_TASK_CONTEXT_FORWARDAPPTAG,
+                         dif_task_params->forward_app_tag ? 1 : 0);
+               SET_FIELD(rdif_context->flags1,
+                         RDIF_TASK_CONTEXT_FORWARDREFTAG,
+                         dif_task_params->forward_ref_tag ? 1 : 0);
+               SET_FIELD(rdif_context->flags1,
+                         RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK,
+                         dif_task_params->forward_app_tag_with_mask ? 1 : 0);
+               SET_FIELD(rdif_context->flags1,
+                         RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK,
+                         dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
+               SET_FIELD(rdif_context->flags1,
+                         RDIF_TASK_CONTEXT_INTERVALSIZE,
+                         dif_task_params->dif_block_size_log - 9);
+               SET_FIELD(rdif_context->state,
+                         RDIF_TASK_CONTEXT_REFTAGMASK,
+                         dif_task_params->ref_tag_mask);
+               SET_FIELD(rdif_context->state, RDIF_TASK_CONTEXT_IGNOREAPPTAG,
+                         dif_task_params->ignore_app_tag);
+       }
+
+       if (task_type == ISCSI_TASK_TYPE_TARGET_READ ||
+           task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
+               tdif_context->app_tag_value =
+                                 cpu_to_le16(dif_task_params->application_tag);
+               tdif_context->partial_crc_valueB =
+                      cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
+               tdif_context->partial_crc_value_a =
+                      cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
+               SET_FIELD(tdif_context->flags0, TDIF_TASK_CONTEXT_CRC_SEED,
+                         dif_task_params->crc_seed ? 1 : 0);
+
+               SET_FIELD(tdif_context->flags0,
+                         TDIF_TASK_CONTEXT_SETERRORWITHEOP,
+                         dif_task_params->tx_dif_conn_err_en ? 1 : 0);
+               SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDGUARD,
+                         dif_task_params->forward_guard   ? 1 : 0);
+               SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDAPPTAG,
+                         dif_task_params->forward_app_tag ? 1 : 0);
+               SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDREFTAG,
+                         dif_task_params->forward_ref_tag ? 1 : 0);
+               SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_INTERVALSIZE,
+                         dif_task_params->dif_block_size_log - 9);
+               SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_HOSTINTERFACE,
+                         dif_task_params->dif_on_host    ? 1 : 0);
+               SET_FIELD(tdif_context->flags1,
+                         TDIF_TASK_CONTEXT_NETWORKINTERFACE,
+                         dif_task_params->dif_on_network ? 1 : 0);
+               val = cpu_to_le32(dif_task_params->initial_ref_tag);
+               tdif_context->initial_ref_tag = val;
+               tdif_context->app_tag_mask =
+                            cpu_to_le16(dif_task_params->application_tag_mask);
+               SET_FIELD(tdif_context->flags0,
+                         TDIF_TASK_CONTEXT_HOSTGUARDTYPE,
+                         dif_task_params->host_guard_type);
+               SET_FIELD(tdif_context->flags0,
+                         TDIF_TASK_CONTEXT_PROTECTIONTYPE,
+                         dif_task_params->protection_type);
+               SET_FIELD(tdif_context->flags0,
+                         TDIF_TASK_CONTEXT_INITIALREFTAGVALID,
+                         dif_task_params->initial_ref_tag_is_valid ? 1 : 0);
+               SET_FIELD(tdif_context->flags0,
+                         TDIF_TASK_CONTEXT_KEEPREFTAGCONST,
+                         dif_task_params->keep_ref_tag_const ? 1 : 0);
+               SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_VALIDATEGUARD,
+                         (dif_task_params->validate_guard &&
+                          dif_task_params->dif_on_host) ? 1 : 0);
+               SET_FIELD(tdif_context->flags1,
+                         TDIF_TASK_CONTEXT_VALIDATEAPPTAG,
+                         (dif_task_params->validate_app_tag &&
+                         dif_task_params->dif_on_host) ? 1 : 0);
+               SET_FIELD(tdif_context->flags1,
+                         TDIF_TASK_CONTEXT_VALIDATEREFTAG,
+                         (dif_task_params->validate_ref_tag &&
+                          dif_task_params->dif_on_host) ? 1 : 0);
+               SET_FIELD(tdif_context->flags1,
+                         TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK,
+                         dif_task_params->forward_app_tag_with_mask ? 1 : 0);
+               SET_FIELD(tdif_context->flags1,
+                         TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK,
+                         dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
+               SET_FIELD(tdif_context->flags1,
+                         TDIF_TASK_CONTEXT_REFTAGMASK,
+                         dif_task_params->ref_tag_mask);
+               SET_FIELD(tdif_context->flags0,
+                         TDIF_TASK_CONTEXT_IGNOREAPPTAG,
+                         dif_task_params->ignore_app_tag ? 1 : 0);
+       }
+}
+
+static void set_local_completion_context(struct iscsi_task_context *context)
+{
+       SET_FIELD(context->ystorm_st_context.state.flags,
+                 YSTORM_ISCSI_TASK_STATE_LOCAL_COMP, 1);
+       SET_FIELD(context->ustorm_st_context.flags,
+                 USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 1);
+}
+
+static int init_rw_iscsi_task(struct iscsi_task_params *task_params,
+                             enum iscsi_task_type task_type,
+                             struct iscsi_conn_params *conn_params,
+                             struct iscsi_common_hdr *pdu_header,
+                             struct scsi_sgl_task_params *sgl_task_params,
+                             struct scsi_initiator_cmd_params *cmd_params,
+                             struct scsi_dif_task_params *dif_task_params)
+{
+       u32 exp_data_transfer_len = conn_params->max_burst_length;
+       struct iscsi_task_context *cxt;
+       bool slow_io = false;
+       u32 task_size, val;
+       u8 num_sges = 0;
+
+       task_size = calc_rw_task_size(task_params, task_type, sgl_task_params,
+                                     dif_task_params);
+
+       init_default_iscsi_task(task_params, (struct data_hdr *)pdu_header,
+                               task_type);
+
+       cxt = task_params->context;
+
+       val = cpu_to_le32(task_size);
+       cxt->ystorm_st_context.pdu_hdr.cmd.expected_transfer_length = val;
+       init_initiator_rw_cdb_ystorm_context(&cxt->ystorm_st_context,
+                                            cmd_params);
+       val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.lo);
+       cxt->mstorm_st_context.sense_db.lo = val;
+
+       val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.hi);
+       cxt->mstorm_st_context.sense_db.hi = val;
+
+       if (task_params->tx_io_size) {
+               init_dif_context_flags(&cxt->ystorm_st_context.state.dif_flags,
+                                      dif_task_params);
+               init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
+                                     &cxt->ystorm_st_context.state.data_desc,
+                                     sgl_task_params);
+
+               slow_io = scsi_is_slow_sgl(sgl_task_params->num_sges,
+                                          sgl_task_params->small_mid_sge);
+
+               num_sges = !slow_io ? min_t(u16, sgl_task_params->num_sges,
+                                           (u16)SCSI_NUM_SGES_SLOW_SGL_THR) :
+                                     ISCSI_WQE_NUM_SGES_SLOWIO;
+
+               if (slow_io) {
+                       SET_FIELD(cxt->ystorm_st_context.state.flags,
+                                 YSTORM_ISCSI_TASK_STATE_SLOW_IO, 1);
+               }
+       } else if (task_params->rx_io_size) {
+               init_dif_context_flags(&cxt->mstorm_st_context.dif_flags,
+                                      dif_task_params);
+               init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
+                                     &cxt->mstorm_st_context.data_desc,
+                                     sgl_task_params);
+               num_sges = !scsi_is_slow_sgl(sgl_task_params->num_sges,
+                               sgl_task_params->small_mid_sge) ?
+                               min_t(u16, sgl_task_params->num_sges,
+                                     (u16)SCSI_NUM_SGES_SLOW_SGL_THR) :
+                               ISCSI_WQE_NUM_SGES_SLOWIO;
+               cxt->mstorm_st_context.rem_task_size = cpu_to_le32(task_size);
+       }
+
+       if (exp_data_transfer_len > task_size  ||
+           task_type != ISCSI_TASK_TYPE_TARGET_WRITE)
+               exp_data_transfer_len = task_size;
+
+       init_ustorm_task_contexts(&task_params->context->ustorm_st_context,
+                                 &task_params->context->ustorm_ag_context,
+                                 task_size, exp_data_transfer_len, num_sges,
+                                 dif_task_params ?
+                                 dif_task_params->tx_dif_conn_err_en : false);
+
+       set_rw_exp_data_acked_and_cont_len(task_params->context, conn_params,
+                                          task_type, task_size,
+                                          exp_data_transfer_len,
+                                       GET_FIELD(pdu_header->hdr_second_dword,
+                                                 ISCSI_CMD_HDR_TOTAL_AHS_LEN));
+
+       if (dif_task_params)
+               init_rtdif_task_context(&task_params->context->rdif_context,
+                                       &task_params->context->tdif_context,
+                                       dif_task_params, task_type);
+
+       init_sqe(task_params, sgl_task_params, dif_task_params, pdu_header,
+                cmd_params, task_type, false);
+
+       return 0;
+}
+
+int init_initiator_rw_iscsi_task(struct iscsi_task_params *task_params,
+                                struct iscsi_conn_params *conn_params,
+                                struct scsi_initiator_cmd_params *cmd_params,
+                                struct iscsi_cmd_hdr *cmd_header,
+                                struct scsi_sgl_task_params *tx_sgl_params,
+                                struct scsi_sgl_task_params *rx_sgl_params,
+                                struct scsi_dif_task_params *dif_task_params)
+{
+       if (GET_FIELD(cmd_header->flags_attr, ISCSI_CMD_HDR_WRITE))
+               return init_rw_iscsi_task(task_params,
+                                         ISCSI_TASK_TYPE_INITIATOR_WRITE,
+                                         conn_params,
+                                         (struct iscsi_common_hdr *)cmd_header,
+                                         tx_sgl_params, cmd_params,
+                                         dif_task_params);
+       else if (GET_FIELD(cmd_header->flags_attr, ISCSI_CMD_HDR_READ))
+               return init_rw_iscsi_task(task_params,
+                                         ISCSI_TASK_TYPE_INITIATOR_READ,
+                                         conn_params,
+                                         (struct iscsi_common_hdr *)cmd_header,
+                                         rx_sgl_params, cmd_params,
+                                         dif_task_params);
+       else
+               return -1;
+}
+
+int init_initiator_login_request_task(struct iscsi_task_params *task_params,
+                                     struct iscsi_login_req_hdr  *login_header,
+                                     struct scsi_sgl_task_params *tx_params,
+                                     struct scsi_sgl_task_params *rx_params)
+{
+       struct iscsi_task_context *cxt;
+
+       cxt = task_params->context;
+
+       init_default_iscsi_task(task_params,
+                               (struct data_hdr *)login_header,
+                               ISCSI_TASK_TYPE_MIDPATH);
+
+       init_ustorm_task_contexts(&cxt->ustorm_st_context,
+                                 &cxt->ustorm_ag_context,
+                                 task_params->rx_io_size ?
+                                 rx_params->total_buffer_size : 0,
+                                 task_params->tx_io_size ?
+                                 tx_params->total_buffer_size : 0, 0,
+                                 0);
+
+       if (task_params->tx_io_size)
+               init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
+                                     &cxt->ystorm_st_context.state.data_desc,
+                                     tx_params);
+
+       if (task_params->rx_io_size)
+               init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
+                                     &cxt->mstorm_st_context.data_desc,
+                                     rx_params);
+
+       cxt->mstorm_st_context.rem_task_size =
+                       cpu_to_le32(task_params->rx_io_size ?
+                                   rx_params->total_buffer_size : 0);
+
+       init_sqe(task_params, tx_params, NULL,
+                (struct iscsi_common_hdr *)login_header, NULL,
+                ISCSI_TASK_TYPE_MIDPATH, false);
+
+       return 0;
+}
+
+int init_initiator_nop_out_task(struct iscsi_task_params *task_params,
+                               struct iscsi_nop_out_hdr *nop_out_pdu_header,
+                               struct scsi_sgl_task_params *tx_sgl_task_params,
+                               struct scsi_sgl_task_params *rx_sgl_task_params)
+{
+       struct iscsi_task_context *cxt;
+
+       cxt = task_params->context;
+
+       init_default_iscsi_task(task_params,
+                               (struct data_hdr *)nop_out_pdu_header,
+                               ISCSI_TASK_TYPE_MIDPATH);
+
+       if (nop_out_pdu_header->itt == ISCSI_ITT_ALL_ONES)
+               set_local_completion_context(task_params->context);
+
+       if (task_params->tx_io_size)
+               init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
+                                     &cxt->ystorm_st_context.state.data_desc,
+                                     tx_sgl_task_params);
+
+       if (task_params->rx_io_size)
+               init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
+                                     &cxt->mstorm_st_context.data_desc,
+                                     rx_sgl_task_params);
+
+       init_ustorm_task_contexts(&cxt->ustorm_st_context,
+                                 &cxt->ustorm_ag_context,
+                                 task_params->rx_io_size ?
+                                 rx_sgl_task_params->total_buffer_size : 0,
+                                 task_params->tx_io_size ?
+                                 tx_sgl_task_params->total_buffer_size : 0,
+                                 0, 0);
+
+       cxt->mstorm_st_context.rem_task_size =
+                               cpu_to_le32(task_params->rx_io_size ?
+                                       rx_sgl_task_params->total_buffer_size :
+                                       0);
+
+       init_sqe(task_params, tx_sgl_task_params, NULL,
+                (struct iscsi_common_hdr *)nop_out_pdu_header, NULL,
+                ISCSI_TASK_TYPE_MIDPATH, false);
+
+       return 0;
+}
+
+int init_initiator_logout_request_task(struct iscsi_task_params *task_params,
+                                      struct iscsi_logout_req_hdr *logout_hdr,
+                                      struct scsi_sgl_task_params *tx_params,
+                                      struct scsi_sgl_task_params *rx_params)
+{
+       struct iscsi_task_context *cxt;
+
+       cxt = task_params->context;
+
+       init_default_iscsi_task(task_params,
+                               (struct data_hdr *)logout_hdr,
+                               ISCSI_TASK_TYPE_MIDPATH);
+
+       if (task_params->tx_io_size)
+               init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
+                                     &cxt->ystorm_st_context.state.data_desc,
+                                     tx_params);
+
+       if (task_params->rx_io_size)
+               init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
+                                     &cxt->mstorm_st_context.data_desc,
+                                     rx_params);
+
+       init_ustorm_task_contexts(&cxt->ustorm_st_context,
+                                 &cxt->ustorm_ag_context,
+                                 task_params->rx_io_size ?
+                                 rx_params->total_buffer_size : 0,
+                                 task_params->tx_io_size ?
+                                 tx_params->total_buffer_size : 0,
+                                 0, 0);
+
+       cxt->mstorm_st_context.rem_task_size =
+                                       cpu_to_le32(task_params->rx_io_size ?
+                                       rx_params->total_buffer_size : 0);
+
+       init_sqe(task_params, tx_params, NULL,
+                (struct iscsi_common_hdr *)logout_hdr, NULL,
+                ISCSI_TASK_TYPE_MIDPATH, false);
+
+       return 0;
+}
+
+int init_initiator_tmf_request_task(struct iscsi_task_params *task_params,
+                                   struct iscsi_tmf_request_hdr *tmf_header)
+{
+       init_default_iscsi_task(task_params, (struct data_hdr *)tmf_header,
+                               ISCSI_TASK_TYPE_MIDPATH);
+
+       init_sqe(task_params, NULL, NULL,
+                (struct iscsi_common_hdr *)tmf_header, NULL,
+                ISCSI_TASK_TYPE_MIDPATH, false);
+
+       return 0;
+}
+
+int init_initiator_text_request_task(struct iscsi_task_params *task_params,
+                                    struct iscsi_text_request_hdr *text_header,
+                                    struct scsi_sgl_task_params *tx_params,
+                                    struct scsi_sgl_task_params *rx_params)
+{
+       struct iscsi_task_context *cxt;
+
+       cxt = task_params->context;
+
+       init_default_iscsi_task(task_params,
+                               (struct data_hdr *)text_header,
+                               ISCSI_TASK_TYPE_MIDPATH);
+
+       if (task_params->tx_io_size)
+               init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
+                                     &cxt->ystorm_st_context.state.data_desc,
+                                     tx_params);
+
+       if (task_params->rx_io_size)
+               init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
+                                     &cxt->mstorm_st_context.data_desc,
+                                     rx_params);
+
+       cxt->mstorm_st_context.rem_task_size =
+                               cpu_to_le32(task_params->rx_io_size ?
+                                       rx_params->total_buffer_size : 0);
+
+       init_ustorm_task_contexts(&cxt->ustorm_st_context,
+                                 &cxt->ustorm_ag_context,
+                                 task_params->rx_io_size ?
+                                 rx_params->total_buffer_size : 0,
+                                 task_params->tx_io_size ?
+                                 tx_params->total_buffer_size : 0, 0, 0);
+
+       init_sqe(task_params, tx_params, NULL,
+                (struct iscsi_common_hdr *)text_header, NULL,
+                ISCSI_TASK_TYPE_MIDPATH, false);
+
+       return 0;
+}
+
+int init_cleanup_task(struct iscsi_task_params *task_params)
+{
+       init_sqe(task_params, NULL, NULL, NULL, NULL, ISCSI_TASK_TYPE_MIDPATH,
+                true);
+       return 0;
+}
diff --git a/drivers/scsi/qedi/qedi_fw_iscsi.h b/drivers/scsi/qedi/qedi_fw_iscsi.h
new file mode 100644 (file)
index 0000000..b6f24f9
--- /dev/null
@@ -0,0 +1,117 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QEDI_FW_ISCSI_H_
+#define _QEDI_FW_ISCSI_H_
+
+#include "qedi_fw_scsi.h"
+
+struct iscsi_task_params {
+       struct iscsi_task_context *context;
+       struct iscsi_wqe          *sqe;
+       u32                       tx_io_size;
+       u32                       rx_io_size;
+       u16                       conn_icid;
+       u16                       itid;
+       u8                        cq_rss_number;
+};
+
+struct iscsi_conn_params {
+       u32     first_burst_length;
+       u32     max_send_pdu_length;
+       u32     max_burst_length;
+       bool    initial_r2t;
+       bool    immediate_data;
+};
+
+/* @brief init_initiator_read_iscsi_task - initializes iSCSI Initiator Read
+ * task context.
+ *
+ * @param task_params    - Pointer to task parameters struct
+ * @param conn_params    - Connection Parameters
+ * @param cmd_params     - command specific parameters
+ * @param cmd_pdu_header  - PDU Header Parameters
+ * @param sgl_task_params - Pointer to SGL task params
+ * @param dif_task_params - Pointer to DIF parameters struct
+ */
+int init_initiator_rw_iscsi_task(struct iscsi_task_params *task_params,
+                                struct iscsi_conn_params *conn_params,
+                                struct scsi_initiator_cmd_params *cmd_params,
+                                struct iscsi_cmd_hdr *cmd_pdu_header,
+                                struct scsi_sgl_task_params *tx_sgl_params,
+                                struct scsi_sgl_task_params *rx_sgl_params,
+                                struct scsi_dif_task_params *dif_task_params);
+
+/* @brief init_initiator_login_request_task - initializes iSCSI Initiator Login
+ * Request task context.
+ *
+ * @param task_params            - Pointer to task parameters struct
+ * @param login_req_pdu_header    - PDU Header Parameters
+ * @param tx_sgl_task_params     - Pointer to SGL task params
+ * @param rx_sgl_task_params     - Pointer to SGL task params
+ */
+int init_initiator_login_request_task(struct iscsi_task_params *task_params,
+                                     struct iscsi_login_req_hdr *login_header,
+                                     struct scsi_sgl_task_params *tx_params,
+                                     struct scsi_sgl_task_params *rx_params);
+
+/* @brief init_initiator_nop_out_task - initializes iSCSI Initiator NOP Out
+ * task context.
+ *
+ * @param task_params          - Pointer to task parameters struct
+ * @param nop_out_pdu_header    - PDU Header Parameters
+ * @param tx_sgl_task_params   - Pointer to SGL task params
+ * @param rx_sgl_task_params   - Pointer to SGL task params
+ */
+int init_initiator_nop_out_task(struct iscsi_task_params *task_params,
+                               struct iscsi_nop_out_hdr *nop_out_pdu_header,
+                               struct scsi_sgl_task_params *tx_sgl_params,
+                               struct scsi_sgl_task_params *rx_sgl_params);
+
+/* @brief init_initiator_logout_request_task - initializes iSCSI Initiator
+ * Logout Request task context.
+ *
+ * @param task_params          - Pointer to task parameters struct
+ * @param logout_pdu_header  - PDU Header Parameters
+ * @param tx_sgl_task_params   - Pointer to SGL task params
+ * @param rx_sgl_task_params   - Pointer to SGL task params
+ */
+int init_initiator_logout_request_task(struct iscsi_task_params *task_params,
+                                      struct iscsi_logout_req_hdr *logout_hdr,
+                                      struct scsi_sgl_task_params *tx_params,
+                                      struct scsi_sgl_task_params *rx_params);
+
+/* @brief init_initiator_tmf_request_task - initializes iSCSI Initiator TMF
+ * task context.
+ *
+ * @param task_params  - Pointer to task parameters struct
+ * @param tmf_pdu_header - PDU Header Parameters
+ */
+int init_initiator_tmf_request_task(struct iscsi_task_params *task_params,
+                                   struct iscsi_tmf_request_hdr *tmf_header);
+
+/* @brief init_initiator_text_request_task - initializes iSCSI Initiator Text
+ * Request task context.
+ *
+ * @param task_params               - Pointer to task parameters struct
+ * @param text_request_pdu_header    - PDU Header Parameters
+ * @param tx_sgl_task_params        - Pointer to Tx SGL task params
+ * @param rx_sgl_task_params        - Pointer to Rx SGL task params
+ */
+int init_initiator_text_request_task(struct iscsi_task_params *task_params,
+                                    struct iscsi_text_request_hdr *text_header,
+                                    struct scsi_sgl_task_params *tx_params,
+                                    struct scsi_sgl_task_params *rx_params);
+
+/* @brief init_cleanup_task - initializes Clean task (SQE)
+ *
+ * @param task_params - Pointer to task parameters struct
+ */
+int init_cleanup_task(struct iscsi_task_params *task_params);
+#endif
diff --git a/drivers/scsi/qedi/qedi_fw_scsi.h b/drivers/scsi/qedi/qedi_fw_scsi.h
new file mode 100644 (file)
index 0000000..cdaf918
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QEDI_FW_SCSI_H_
+#define _QEDI_FW_SCSI_H_
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include "qedi_hsi.h"
+#include <linux/qed/qed_if.h>
+
+struct scsi_sgl_task_params {
+       struct scsi_sge *sgl;
+       struct regpair  sgl_phys_addr;
+       u32             total_buffer_size;
+       u16             num_sges;
+       bool            small_mid_sge;
+};
+
+struct scsi_dif_task_params {
+       u32     initial_ref_tag;
+       bool    initial_ref_tag_is_valid;
+       u16     application_tag;
+       u16     application_tag_mask;
+       u16     dif_block_size_log;
+       bool    dif_on_network;
+       bool    dif_on_host;
+       u8      host_guard_type;
+       u8      protection_type;
+       u8      ref_tag_mask;
+       bool    crc_seed;
+       bool    tx_dif_conn_err_en;
+       bool    ignore_app_tag;
+       bool    keep_ref_tag_const;
+       bool    validate_guard;
+       bool    validate_app_tag;
+       bool    validate_ref_tag;
+       bool    forward_guard;
+       bool    forward_app_tag;
+       bool    forward_ref_tag;
+       bool    forward_app_tag_with_mask;
+       bool    forward_ref_tag_with_mask;
+};
+
+struct scsi_initiator_cmd_params {
+       struct scsi_sge extended_cdb_sge;
+       struct regpair  sense_data_buffer_phys_addr;
+};
+#endif
index 40c0ada0180631b81ec0d2738da41bf474e2c933..fbab6e0514f07bf0f4a9ac481cb712c58d113b7f 100644 (file)
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2016  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
+#ifndef _COMMON_HSI_H
+#define _COMMON_HSI_H
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+
+/* dma_addr_t manip */
+#define DMA_LO_LE(x)           cpu_to_le32(lower_32_bits(x))
+#define DMA_HI_LE(x)           cpu_to_le32(upper_32_bits(x))
+#define DMA_REGPAIR_LE(x, val) do { \
+                                       (x).hi = DMA_HI_LE((val)); \
+                                       (x).lo = DMA_LO_LE((val)); \
+                               } while (0)
+
+#define HILO_GEN(hi, lo, type)  ((((type)(hi)) << 32) + (lo))
+#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64)
+#define HILO_64_REGPAIR(regpair)        (HILO_64(regpair.hi, regpair.lo))
+#define HILO_DMA_REGPAIR(regpair)      ((dma_addr_t)HILO_64_REGPAIR(regpair))
+
 #ifndef __COMMON_HSI__
 #define __COMMON_HSI__
 
-#define CORE_SPQE_PAGE_SIZE_BYTES                       4096
 
 #define X_FINAL_CLEANUP_AGG_INT 1
+
+#define EVENT_RING_PAGE_SIZE_BYTES          4096
+
 #define NUM_OF_GLOBAL_QUEUES                            128
+#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE        64
+
+#define ISCSI_CDU_TASK_SEG_TYPE       0
+#define FCOE_CDU_TASK_SEG_TYPE        0
+#define RDMA_CDU_TASK_SEG_TYPE        1
+
+#define FW_ASSERT_GENERAL_ATTN_IDX    32
+
+#define MAX_PINNED_CCFC                 32
 
 /* Queue Zone sizes in bytes */
 #define TSTORM_QZONE_SIZE 8
-#define MSTORM_QZONE_SIZE 0
+#define MSTORM_QZONE_SIZE 16
 #define USTORM_QZONE_SIZE 8
 #define XSTORM_QZONE_SIZE 8
 #define YSTORM_QZONE_SIZE 0
 #define PSTORM_QZONE_SIZE 0
 
-#define ETH_MAX_NUM_RX_QUEUES_PER_VF 16
+#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG        7
+#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DEFAULT   16
+#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DOUBLE    48
+#define ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD      112
+
+/********************************/
+/* CORE (LIGHT L2) FW CONSTANTS */
+/********************************/
+
+#define CORE_LL2_MAX_RAMROD_PER_CON    8
+#define CORE_LL2_TX_BD_PAGE_SIZE_BYTES 4096
+#define CORE_LL2_RX_BD_PAGE_SIZE_BYTES 4096
+#define CORE_LL2_RX_CQE_PAGE_SIZE_BYTES        4096
+#define CORE_LL2_RX_NUM_NEXT_PAGE_BDS  1
+
+#define CORE_LL2_TX_MAX_BDS_PER_PACKET 12
+
+#define CORE_SPQE_PAGE_SIZE_BYTES      4096
+
+#define MAX_NUM_LL2_RX_QUEUES          32
+#define MAX_NUM_LL2_TX_STATS_COUNTERS  32
 
 #define FW_MAJOR_VERSION       8
-#define FW_MINOR_VERSION       10
-#define FW_REVISION_VERSION    5
+#define FW_MINOR_VERSION       15
+#define FW_REVISION_VERSION    3
 #define FW_ENGINEERING_VERSION 0
 
 /***********************/
 #define NUM_OF_LCIDS           (320)
 #define NUM_OF_LTIDS           (320)
 
+/* Clock values */
+#define MASTER_CLK_FREQ_E4     (375e6)
+#define STORM_CLK_FREQ_E4      (1000e6)
+#define CLK25M_CLK_FREQ_E4     (25e6)
+
+/* Global PXP windows (GTT) */
+#define NUM_OF_GTT             19
+#define GTT_DWORD_SIZE_BITS    10
+#define GTT_BYTE_SIZE_BITS     (GTT_DWORD_SIZE_BITS + 2)
+#define GTT_DWORD_SIZE         BIT(GTT_DWORD_SIZE_BITS)
+
+/* Tools Version */
+#define TOOLS_VERSION 10
+
 /*****************/
 /* CDU CONSTANTS */
 /*****************/
 #define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT              (17)
 #define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK             (0x1ffff)
 
+#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT       (12)
+#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK      (0xfff)
 /*****************/
 /* DQ CONSTANTS  */
 /*****************/
 
 /* DEMS */
 #define DQ_DEMS_LEGACY                 0
+#define DQ_DEMS_TOE_MORE_TO_SEND       3
+#define DQ_DEMS_TOE_LOCAL_ADV_WND      4
+#define DQ_DEMS_ROCE_CQ_CONS           7
 
 /* XCM agg val selection */
 #define DQ_XCM_AGG_VAL_SEL_WORD2  0
 #define        DQ_XCM_ETH_TX_BD_CONS_CMD       DQ_XCM_AGG_VAL_SEL_WORD3
 #define        DQ_XCM_ETH_TX_BD_PROD_CMD       DQ_XCM_AGG_VAL_SEL_WORD4
 #define        DQ_XCM_ETH_GO_TO_BD_CONS_CMD    DQ_XCM_AGG_VAL_SEL_WORD5
+#define DQ_XCM_FCOE_SQ_CONS_CMD             DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_FCOE_SQ_PROD_CMD             DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_FCOE_X_FERQ_PROD_CMD         DQ_XCM_AGG_VAL_SEL_WORD5
+#define DQ_XCM_ISCSI_SQ_CONS_CMD       DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_ISCSI_SQ_PROD_CMD       DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
+#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD   DQ_XCM_AGG_VAL_SEL_REG6
+#define DQ_XCM_ROCE_SQ_PROD_CMD        DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_TOE_TX_BD_PROD_CMD      DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD        DQ_XCM_AGG_VAL_SEL_REG3
+#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4
 
 /* UCM agg val selection (HW) */
 #define        DQ_UCM_AGG_VAL_SEL_WORD0        0
 #define        DQ_XCM_AGG_FLG_SHIFT_CF23       7
 
 /* XCM agg counter flag selection */
-#define DQ_XCM_CORE_DQ_CF_CMD          (1 << DQ_XCM_AGG_FLG_SHIFT_CF18)
-#define DQ_XCM_CORE_TERMINATE_CMD      (1 << DQ_XCM_AGG_FLG_SHIFT_CF19)
-#define DQ_XCM_CORE_SLOW_PATH_CMD      (1 << DQ_XCM_AGG_FLG_SHIFT_CF22)
-#define DQ_XCM_ETH_DQ_CF_CMD           (1 << DQ_XCM_AGG_FLG_SHIFT_CF18)
-#define DQ_XCM_ETH_TERMINATE_CMD       (1 << DQ_XCM_AGG_FLG_SHIFT_CF19)
-#define DQ_XCM_ETH_SLOW_PATH_CMD       (1 << DQ_XCM_AGG_FLG_SHIFT_CF22)
-#define DQ_XCM_ETH_TPH_EN_CMD          (1 << DQ_XCM_AGG_FLG_SHIFT_CF23)
+#define DQ_XCM_CORE_DQ_CF_CMD          BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_CORE_TERMINATE_CMD      BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_CORE_SLOW_PATH_CMD      BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ETH_DQ_CF_CMD           BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_ETH_TERMINATE_CMD       BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_ETH_SLOW_PATH_CMD       BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ETH_TPH_EN_CMD          BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
+#define DQ_XCM_FCOE_SLOW_PATH_CMD           BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ISCSI_DQ_FLUSH_CMD      BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_ISCSI_SLOW_PATH_CMD     BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
+#define DQ_XCM_TOE_DQ_FLUSH_CMD                BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_TOE_SLOW_PATH_CMD       BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
 
 /* UCM agg counter flag selection (HW) */
 #define        DQ_UCM_AGG_FLG_SHIFT_CF0        0
 #define        DQ_UCM_AGG_FLG_SHIFT_RULE1EN    7
 
 /* UCM agg counter flag selection (FW) */
-#define DQ_UCM_ETH_PMD_TX_ARM_CMD      (1 << DQ_UCM_AGG_FLG_SHIFT_CF4)
-#define DQ_UCM_ETH_PMD_RX_ARM_CMD      (1 << DQ_UCM_AGG_FLG_SHIFT_CF5)
-
+#define DQ_UCM_ETH_PMD_TX_ARM_CMD      BIT(DQ_UCM_AGG_FLG_SHIFT_CF4)
+#define DQ_UCM_ETH_PMD_RX_ARM_CMD      BIT(DQ_UCM_AGG_FLG_SHIFT_CF5)
+#define DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD   BIT(DQ_UCM_AGG_FLG_SHIFT_CF4)
+#define DQ_UCM_ROCE_CQ_ARM_CF_CMD      BIT(DQ_UCM_AGG_FLG_SHIFT_CF5)
+#define DQ_UCM_TOE_TIMER_STOP_ALL_CMD  BIT(DQ_UCM_AGG_FLG_SHIFT_CF3)
+#define DQ_UCM_TOE_SLOW_PATH_CF_CMD    BIT(DQ_UCM_AGG_FLG_SHIFT_CF4)
+#define DQ_UCM_TOE_DQ_CF_CMD           BIT(DQ_UCM_AGG_FLG_SHIFT_CF5)
+
+/* TCM agg counter flag selection (HW) */
+#define DQ_TCM_AGG_FLG_SHIFT_CF0       0
+#define DQ_TCM_AGG_FLG_SHIFT_CF1       1
+#define DQ_TCM_AGG_FLG_SHIFT_CF2       2
+#define DQ_TCM_AGG_FLG_SHIFT_CF3       3
+#define DQ_TCM_AGG_FLG_SHIFT_CF4       4
+#define DQ_TCM_AGG_FLG_SHIFT_CF5       5
+#define DQ_TCM_AGG_FLG_SHIFT_CF6       6
+#define DQ_TCM_AGG_FLG_SHIFT_CF7       7
+/* TCM agg counter flag selection (FW) */
+#define DQ_TCM_FCOE_FLUSH_Q0_CMD            BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
+#define DQ_TCM_FCOE_DUMMY_TIMER_CMD         BIT(DQ_TCM_AGG_FLG_SHIFT_CF2)
+#define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD      BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
+#define DQ_TCM_ISCSI_FLUSH_Q0_CMD      BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
+#define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD        BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
+#define DQ_TCM_TOE_FLUSH_Q0_CMD                BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
+#define DQ_TCM_TOE_TIMER_STOP_ALL_CMD  BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
+#define DQ_TCM_IWARP_POST_RQ_CF_CMD    BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
+
+/* PWM address mapping */
+#define DQ_PWM_OFFSET_DPM_BASE 0x0
+#define DQ_PWM_OFFSET_DPM_END  0x27
+#define DQ_PWM_OFFSET_XCM16_BASE       0x40
+#define DQ_PWM_OFFSET_XCM32_BASE       0x44
+#define DQ_PWM_OFFSET_UCM16_BASE       0x48
+#define DQ_PWM_OFFSET_UCM32_BASE       0x4C
+#define DQ_PWM_OFFSET_UCM16_4  0x50
+#define DQ_PWM_OFFSET_TCM16_BASE       0x58
+#define DQ_PWM_OFFSET_TCM32_BASE       0x5C
+#define DQ_PWM_OFFSET_XCM_FLAGS        0x68
+#define DQ_PWM_OFFSET_UCM_FLAGS        0x69
+#define DQ_PWM_OFFSET_TCM_FLAGS        0x6B
+
+#define DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD         (DQ_PWM_OFFSET_XCM16_BASE + 2)
+#define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT   (DQ_PWM_OFFSET_UCM32_BASE)
+#define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_16BIT   (DQ_PWM_OFFSET_UCM16_4)
+#define DQ_PWM_OFFSET_UCM_RDMA_INT_TIMEOUT     (DQ_PWM_OFFSET_UCM16_BASE + 2)
+#define DQ_PWM_OFFSET_UCM_RDMA_ARM_FLAGS       (DQ_PWM_OFFSET_UCM_FLAGS)
+#define DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD         (DQ_PWM_OFFSET_TCM16_BASE + 1)
+#define DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD                (DQ_PWM_OFFSET_TCM16_BASE + 3)
 #define        DQ_REGION_SHIFT (12)
 
 /* DPM */
  */
 #define CM_TX_PQ_BASE  0x200
 
+/* number of global Vport/QCN rate limiters */
+#define MAX_QM_GLOBAL_RLS      256
 /* QM registers data */
 #define QM_LINE_CRD_REG_WIDTH          16
-#define QM_LINE_CRD_REG_SIGN_BIT       (1 << (QM_LINE_CRD_REG_WIDTH - 1))
+#define QM_LINE_CRD_REG_SIGN_BIT       BIT((QM_LINE_CRD_REG_WIDTH - 1))
 #define QM_BYTE_CRD_REG_WIDTH          24
-#define QM_BYTE_CRD_REG_SIGN_BIT       (1 << (QM_BYTE_CRD_REG_WIDTH - 1))
+#define QM_BYTE_CRD_REG_SIGN_BIT       BIT((QM_BYTE_CRD_REG_WIDTH - 1))
 #define QM_WFQ_CRD_REG_WIDTH           32
-#define QM_WFQ_CRD_REG_SIGN_BIT                (1 << (QM_WFQ_CRD_REG_WIDTH - 1))
+#define QM_WFQ_CRD_REG_SIGN_BIT                BIT((QM_WFQ_CRD_REG_WIDTH - 1))
 #define QM_RL_CRD_REG_WIDTH            32
-#define QM_RL_CRD_REG_SIGN_BIT         (1 << (QM_RL_CRD_REG_WIDTH - 1))
+#define QM_RL_CRD_REG_SIGN_BIT         BIT((QM_RL_CRD_REG_WIDTH - 1))
 
 /*****************/
 /* CAU CONSTANTS */
 /* PXP CONSTANTS */
 /*****************/
 
+/* Bars for Blocks */
+#define PXP_BAR_GRC    0
+#define PXP_BAR_TSDM   0
+#define PXP_BAR_USDM   0
+#define PXP_BAR_XSDM   0
+#define PXP_BAR_MSDM   0
+#define PXP_BAR_YSDM   0
+#define PXP_BAR_PSDM   0
+#define PXP_BAR_IGU    0
+#define PXP_BAR_DQ     1
+
 /* PTT and GTT */
 #define PXP_NUM_PF_WINDOWS             12
 #define PXP_PER_PF_ENTRY_SIZE          8
        (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + \
         PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1)
 
+/* PF BAR */
+#define PXP_BAR0_START_GRC     0x0000
+#define PXP_BAR0_GRC_LENGTH    0x1C00000
+#define PXP_BAR0_END_GRC       (PXP_BAR0_START_GRC + \
+                                PXP_BAR0_GRC_LENGTH - 1)
+
+#define PXP_BAR0_START_IGU     0x1C00000
+#define PXP_BAR0_IGU_LENGTH    0x10000
+#define PXP_BAR0_END_IGU       (PXP_BAR0_START_IGU + \
+                                PXP_BAR0_IGU_LENGTH - 1)
+
+#define PXP_BAR0_START_TSDM    0x1C80000
+#define PXP_BAR0_SDM_LENGTH    0x40000
+#define PXP_BAR0_SDM_RESERVED_LENGTH   0x40000
+#define PXP_BAR0_END_TSDM      (PXP_BAR0_START_TSDM + \
+                                PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_START_MSDM    0x1D00000
+#define PXP_BAR0_END_MSDM      (PXP_BAR0_START_MSDM + \
+                                PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_START_USDM    0x1D80000
+#define PXP_BAR0_END_USDM      (PXP_BAR0_START_USDM + \
+                                PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_START_XSDM    0x1E00000
+#define PXP_BAR0_END_XSDM      (PXP_BAR0_START_XSDM + \
+                                PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_START_YSDM    0x1E80000
+#define PXP_BAR0_END_YSDM      (PXP_BAR0_START_YSDM + \
+                                PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_START_PSDM    0x1F00000
+#define PXP_BAR0_END_PSDM      (PXP_BAR0_START_PSDM + \
+                                PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_FIRST_INVALID_ADDRESS (PXP_BAR0_END_PSDM + 1)
+
+/* VF BAR */
+#define PXP_VF_BAR0    0
+
+#define PXP_VF_BAR0_START_GRC  0x3E00
+#define PXP_VF_BAR0_GRC_LENGTH 0x200
+#define PXP_VF_BAR0_END_GRC    (PXP_VF_BAR0_START_GRC + \
+                                PXP_VF_BAR0_GRC_LENGTH - 1)
 
 #define PXP_VF_BAR0_START_IGU                   0
 #define PXP_VF_BAR0_IGU_LENGTH                  0x3000
 #define PXP_NUM_ILT_RECORDS_BB 7600
 #define PXP_NUM_ILT_RECORDS_K2 11000
 #define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
+#define PXP_QUEUES_ZONE_MAX_NUM 320
+/*****************/
+/* PRM CONSTANTS */
+/*****************/
+#define PRM_DMA_PAD_BYTES_NUM  2
+/******************/
+/* SDMs CONSTANTS */
+/******************/
+#define SDM_OP_GEN_TRIG_NONE   0
+#define SDM_OP_GEN_TRIG_WAKE_THREAD    1
+#define SDM_OP_GEN_TRIG_AGG_INT        2
+#define SDM_OP_GEN_TRIG_LOADER 4
+#define SDM_OP_GEN_TRIG_INDICATE_ERROR 6
+#define SDM_OP_GEN_TRIG_RELEASE_THREAD 7
 
 #define SDM_COMP_TYPE_NONE              0
 #define SDM_COMP_TYPE_WAKE_THREAD       1
 /* PRS CONSTANTS */
 /*****************/
 
+#define PRS_GFT_CAM_LINES_NO_MATCH     31
+
 /* Async data KCQ CQE */
 struct async_data {
        __le32  cid;
@@ -440,20 +671,6 @@ struct coalescing_timeset {
 #define        COALESCING_TIMESET_VALID_SHIFT          7
 };
 
-struct common_prs_pf_msg_info {
-       __le32 value;
-#define        COMMON_PRS_PF_MSG_INFO_NPAR_DEFAULT_PF_MASK     0x1
-#define        COMMON_PRS_PF_MSG_INFO_NPAR_DEFAULT_PF_SHIFT    0
-#define        COMMON_PRS_PF_MSG_INFO_FW_DEBUG_1_MASK          0x1
-#define        COMMON_PRS_PF_MSG_INFO_FW_DEBUG_1_SHIFT         1
-#define        COMMON_PRS_PF_MSG_INFO_FW_DEBUG_2_MASK          0x1
-#define        COMMON_PRS_PF_MSG_INFO_FW_DEBUG_2_SHIFT         2
-#define        COMMON_PRS_PF_MSG_INFO_FW_DEBUG_3_MASK          0x1
-#define        COMMON_PRS_PF_MSG_INFO_FW_DEBUG_3_SHIFT         3
-#define        COMMON_PRS_PF_MSG_INFO_RESERVED_MASK            0xFFFFFFF
-#define        COMMON_PRS_PF_MSG_INFO_RESERVED_SHIFT           4
-};
-
 struct common_queue_zone {
        __le16 ring_drv_data_consumer;
        __le16 reserved;
@@ -473,6 +690,29 @@ struct vf_pf_channel_eqe_data {
        struct regpair msg_addr;
 };
 
+struct iscsi_eqe_data {
+       __le32 cid;
+       __le16 conn_id;
+       u8 error_code;
+       u8 error_pdu_opcode_reserved;
+#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_MASK           0x3F
+#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_SHIFT          0
+#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_VALID_MASK     0x1
+#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_VALID_SHIFT     6
+#define ISCSI_EQE_DATA_RESERVED0_MASK                  0x1
+#define ISCSI_EQE_DATA_RESERVED0_SHIFT                 7
+};
+
+struct rdma_eqe_destroy_qp {
+       __le32 cid;
+       u8 reserved[4];
+};
+
+union rdma_eqe_data {
+       struct regpair async_handle;
+       struct rdma_eqe_destroy_qp rdma_destroy_qp_data;
+};
+
 struct malicious_vf_eqe_data {
        u8 vf_id;
        u8 err_id;
@@ -488,6 +728,8 @@ struct initial_cleanup_eqe_data {
 union event_ring_data {
        u8 bytes[8];
        struct vf_pf_channel_eqe_data vf_pf_channel;
+       struct iscsi_eqe_data iscsi_info;
+       union rdma_eqe_data rdma_data;
        struct malicious_vf_eqe_data malicious_vf;
        struct initial_cleanup_eqe_data vf_init_cleanup;
 };
@@ -518,7 +760,7 @@ enum mf_mode {
 /* Per-protocol connection types */
 enum protocol_type {
        PROTOCOLID_ISCSI,
-       PROTOCOLID_RESERVED2,
+       PROTOCOLID_FCOE,
        PROTOCOLID_ROCE,
        PROTOCOLID_CORE,
        PROTOCOLID_ETH,
@@ -616,6 +858,52 @@ enum db_dest {
        MAX_DB_DEST
 };
 
+/* Enum of doorbell DPM types */
+enum db_dpm_type {
+       DPM_LEGACY,
+       DPM_ROCE,
+       DPM_L2_INLINE,
+       DPM_L2_BD,
+       MAX_DB_DPM_TYPE
+};
+
+/* Structure for doorbell data, in L2 DPM mode, for 1st db in a DPM burst */
+struct db_l2_dpm_data {
+       __le16 icid;
+       __le16 bd_prod;
+       __le32 params;
+#define DB_L2_DPM_DATA_SIZE_MASK       0x3F
+#define DB_L2_DPM_DATA_SIZE_SHIFT      0
+#define DB_L2_DPM_DATA_DPM_TYPE_MASK   0x3
+#define DB_L2_DPM_DATA_DPM_TYPE_SHIFT  6
+#define DB_L2_DPM_DATA_NUM_BDS_MASK    0xFF
+#define DB_L2_DPM_DATA_NUM_BDS_SHIFT   8
+#define DB_L2_DPM_DATA_PKT_SIZE_MASK   0x7FF
+#define DB_L2_DPM_DATA_PKT_SIZE_SHIFT  16
+#define DB_L2_DPM_DATA_RESERVED0_MASK  0x1
+#define DB_L2_DPM_DATA_RESERVED0_SHIFT 27
+#define DB_L2_DPM_DATA_SGE_NUM_MASK    0x7
+#define DB_L2_DPM_DATA_SGE_NUM_SHIFT   28
+#define DB_L2_DPM_DATA_RESERVED1_MASK  0x1
+#define DB_L2_DPM_DATA_RESERVED1_SHIFT 31
+};
+
+/* Structure for SGE in a DPM doorbell of type DPM_L2_BD */
+struct db_l2_dpm_sge {
+       struct regpair addr;
+       __le16 nbytes;
+       __le16 bitfields;
+#define DB_L2_DPM_SGE_TPH_ST_INDEX_MASK        0x1FF
+#define DB_L2_DPM_SGE_TPH_ST_INDEX_SHIFT 0
+#define DB_L2_DPM_SGE_RESERVED0_MASK   0x3
+#define DB_L2_DPM_SGE_RESERVED0_SHIFT  9
+#define DB_L2_DPM_SGE_ST_VALID_MASK    0x1
+#define DB_L2_DPM_SGE_ST_VALID_SHIFT   11
+#define DB_L2_DPM_SGE_RESERVED1_MASK   0xF
+#define DB_L2_DPM_SGE_RESERVED1_SHIFT  12
+       __le32 reserved2;
+};
+
 /* Structure for doorbell address, in legacy mode */
 struct db_legacy_addr {
        __le32 addr;
@@ -627,6 +915,49 @@ struct db_legacy_addr {
 #define DB_LEGACY_ADDR_ICID_SHIFT      5
 };
 
+/* Structure for doorbell address, in PWM mode */
+struct db_pwm_addr {
+       __le32 addr;
+#define DB_PWM_ADDR_RESERVED0_MASK     0x7
+#define DB_PWM_ADDR_RESERVED0_SHIFT 0
+#define DB_PWM_ADDR_OFFSET_MASK        0x7F
+#define DB_PWM_ADDR_OFFSET_SHIFT       3
+#define DB_PWM_ADDR_WID_MASK   0x3
+#define DB_PWM_ADDR_WID_SHIFT  10
+#define DB_PWM_ADDR_DPI_MASK   0xFFFF
+#define DB_PWM_ADDR_DPI_SHIFT  12
+#define DB_PWM_ADDR_RESERVED1_MASK     0xF
+#define DB_PWM_ADDR_RESERVED1_SHIFT 28
+};
+
+/* Parameters to RoCE firmware, passed in EDPM doorbell */
+struct db_roce_dpm_params {
+       __le32 params;
+#define DB_ROCE_DPM_PARAMS_SIZE_MASK           0x3F
+#define DB_ROCE_DPM_PARAMS_SIZE_SHIFT          0
+#define DB_ROCE_DPM_PARAMS_DPM_TYPE_MASK       0x3
+#define DB_ROCE_DPM_PARAMS_DPM_TYPE_SHIFT      6
+#define DB_ROCE_DPM_PARAMS_OPCODE_MASK         0xFF
+#define DB_ROCE_DPM_PARAMS_OPCODE_SHIFT                8
+#define DB_ROCE_DPM_PARAMS_WQE_SIZE_MASK       0x7FF
+#define DB_ROCE_DPM_PARAMS_WQE_SIZE_SHIFT      16
+#define DB_ROCE_DPM_PARAMS_RESERVED0_MASK      0x1
+#define DB_ROCE_DPM_PARAMS_RESERVED0_SHIFT     27
+#define DB_ROCE_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
+#define DB_ROCE_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
+#define DB_ROCE_DPM_PARAMS_S_FLG_MASK          0x1
+#define DB_ROCE_DPM_PARAMS_S_FLG_SHIFT         29
+#define DB_ROCE_DPM_PARAMS_RESERVED1_MASK      0x3
+#define DB_ROCE_DPM_PARAMS_RESERVED1_SHIFT     30
+};
+
+/* Structure for doorbell data, in ROCE DPM mode, for 1st db in a DPM burst */
+struct db_roce_dpm_data {
+       __le16 icid;
+       __le16 prod_val;
+       struct db_roce_dpm_params params;
+};
+
 /* Igu interrupt command */
 enum igu_int_cmd {
        IGU_INT_ENABLE  = 0,
@@ -764,6 +1095,19 @@ struct pxp_ptt_entry {
        struct pxp_pretend_cmd  pretend;
 };
 
+/* VF Zone A Permission Register. */
+struct pxp_vf_zone_a_permission {
+       __le32 control;
+#define PXP_VF_ZONE_A_PERMISSION_VFID_MASK     0xFF
+#define PXP_VF_ZONE_A_PERMISSION_VFID_SHIFT    0
+#define PXP_VF_ZONE_A_PERMISSION_VALID_MASK    0x1
+#define PXP_VF_ZONE_A_PERMISSION_VALID_SHIFT   8
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_MASK        0x7F
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_SHIFT 9
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_MASK        0xFFFF
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT 16
+};
+
 /* RSS hash type */
 struct rdif_task_context {
        __le32 initial_ref_tag;
@@ -831,6 +1175,7 @@ struct rdif_task_context {
        __le32 reserved2;
 };
 
+/* RSS hash type */
 enum rss_hash_type {
        RSS_HASH_TYPE_DEFAULT   = 0,
        RSS_HASH_TYPE_IPV4      = 1,
@@ -942,7 +1287,7 @@ struct tdif_task_context {
 };
 
 struct timers_context {
-       __le32 logical_client0;
+       __le32 logical_client_0;
 #define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK     0xFFFFFFF
 #define TIMERS_CONTEXT_EXPIRATIONTIMELC0_SHIFT    0
 #define TIMERS_CONTEXT_VALIDLC0_MASK              0x1
@@ -951,7 +1296,7 @@ struct timers_context {
 #define TIMERS_CONTEXT_ACTIVELC0_SHIFT            29
 #define TIMERS_CONTEXT_RESERVED0_MASK             0x3
 #define TIMERS_CONTEXT_RESERVED0_SHIFT            30
-       __le32 logical_client1;
+       __le32 logical_client_1;
 #define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK     0xFFFFFFF
 #define TIMERS_CONTEXT_EXPIRATIONTIMELC1_SHIFT    0
 #define TIMERS_CONTEXT_VALIDLC1_MASK              0x1
@@ -960,7 +1305,7 @@ struct timers_context {
 #define TIMERS_CONTEXT_ACTIVELC1_SHIFT            29
 #define TIMERS_CONTEXT_RESERVED1_MASK             0x3
 #define TIMERS_CONTEXT_RESERVED1_SHIFT            30
-       __le32 logical_client2;
+       __le32 logical_client_2;
 #define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK     0xFFFFFFF
 #define TIMERS_CONTEXT_EXPIRATIONTIMELC2_SHIFT    0
 #define TIMERS_CONTEXT_VALIDLC2_MASK              0x1
@@ -978,3 +1323,4 @@ struct timers_context {
 #define TIMERS_CONTEXT_RESERVED3_SHIFT            29
 };
 #endif /* __COMMON_HSI__ */
+#endif
index b5ebc697d05f28c7cca61335c3d5faec54189c45..34d93eb5bfba346019ba1d2c9014ab8a2fa5fd8f 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef __ETH_COMMON__
 /* ETH FW CONSTANTS */
 /********************/
 #define ETH_HSI_VER_MAJOR                   3
-#define ETH_HSI_VER_MINOR                   0
-#define ETH_CACHE_LINE_SIZE                 64
+#define ETH_HSI_VER_MINOR      10
+
+#define ETH_HSI_VER_NO_PKT_LEN_TUNN    5
 
+#define ETH_CACHE_LINE_SIZE                 64
+#define ETH_RX_CQE_GAP 32
 #define ETH_MAX_RAMROD_PER_CON                          8
 #define ETH_TX_BD_PAGE_SIZE_BYTES                       4096
 #define ETH_RX_BD_PAGE_SIZE_BYTES                       4096
 #define ETH_RX_CQE_PAGE_SIZE_BYTES                      4096
 #define ETH_RX_NUM_NEXT_PAGE_BDS                        2
 
+#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET          253
+#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET          251
+
 #define ETH_TX_MIN_BDS_PER_NON_LSO_PKT                          1
 #define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET                       18
+#define ETH_TX_MAX_BDS_PER_LSO_PACKET  255
 #define ETH_TX_MAX_LSO_HDR_NBD                                          4
 #define ETH_TX_MIN_BDS_PER_LSO_PKT                                      3
 #define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT       3
 #define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT            2
 #define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE          2
-#define ETH_TX_MAX_NON_LSO_PKT_LEN                  (9700 - (4 + 12 + 8))
+#define ETH_TX_MAX_NON_LSO_PKT_LEN     (9700 - (4 + 4 + 12 + 8))
 #define ETH_TX_MAX_LSO_HDR_BYTES                    510
+#define ETH_TX_LSO_WINDOW_BDS_NUM      (18 - 1)
+#define ETH_TX_LSO_WINDOW_MIN_LEN      9700
+#define ETH_TX_MAX_LSO_PAYLOAD_LEN     0xFE000
+#define ETH_TX_NUM_SAME_AS_LAST_ENTRIES        320
+#define ETH_TX_INACTIVE_SAME_AS_LAST   0xFFFF
 
 #define ETH_NUM_STATISTIC_COUNTERS                      MAX_NUM_VPORTS
+#define ETH_NUM_STATISTIC_COUNTERS_DOUBLE_VF_ZONE \
+       (ETH_NUM_STATISTIC_COUNTERS - MAX_NUM_VFS / 2)
+#define ETH_NUM_STATISTIC_COUNTERS_QUAD_VF_ZONE \
+       (ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4)
 
 /* Maximum number of buffers, used for RX packet placement */
 #define ETH_RX_MAX_BUFF_PER_PKT             5
@@ -59,6 +99,8 @@
 #define ETH_TPA_CQE_CONT_LEN_LIST_SIZE    6
 #define ETH_TPA_CQE_END_LEN_LIST_SIZE     4
 
+/* Control frame check constants */
+#define ETH_CTL_FRAME_ETH_TYPE_NUM     4
 
 struct eth_tx_1st_bd_flags {
        u8 bitfields;
@@ -82,10 +124,10 @@ struct eth_tx_1st_bd_flags {
 
 /* The parsing information data fo rthe first tx bd of a given packet. */
 struct eth_tx_data_1st_bd {
-       __le16                          vlan;
-       u8                              nbds;
-       struct eth_tx_1st_bd_flags      bd_flags;
-       __le16                          bitfields;
+       __le16 vlan;
+       u8 nbds;
+       struct eth_tx_1st_bd_flags bd_flags;
+       __le16 bitfields;
 #define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK  0x1
 #define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0
 #define ETH_TX_DATA_1ST_BD_RESERVED0_MASK          0x1
@@ -96,7 +138,7 @@ struct eth_tx_data_1st_bd {
 
 /* The parsing information data for the second tx bd of a given packet. */
 struct eth_tx_data_2nd_bd {
-       __le16  tunn_ip_size;
+       __le16 tunn_ip_size;
        __le16  bitfields1;
 #define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK  0xF
 #define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
@@ -125,9 +167,14 @@ struct eth_tx_data_2nd_bd {
 #define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT                13
 };
 
+/* Firmware data for L2-EDPM packet. */
+struct eth_edpm_fw_data {
+       struct eth_tx_data_1st_bd data_1st_bd;
+       struct eth_tx_data_2nd_bd data_2nd_bd;
+       __le32 reserved;
+};
+
 struct eth_fast_path_cqe_fw_debug {
-       u8 reserved0;
-       u8 reserved1;
        __le16 reserved2;
 };
 
@@ -148,6 +195,17 @@ struct eth_tunnel_parsing_flags {
 #define        ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT     7
 };
 
+/* PMD flow control bits */
+struct eth_pmd_flow_flags {
+       u8 flags;
+#define ETH_PMD_FLOW_FLAGS_VALID_MASK  0x1
+#define ETH_PMD_FLOW_FLAGS_VALID_SHIFT 0
+#define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK 0x1
+#define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT        1
+#define ETH_PMD_FLOW_FLAGS_RESERVED_MASK 0x3F
+#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2
+};
+
 /* Regular ETH Rx FP CQE. */
 struct eth_fast_path_rx_reg_cqe {
        u8 type;
@@ -166,64 +224,63 @@ struct eth_fast_path_rx_reg_cqe {
        u8 placement_offset;
        struct eth_tunnel_parsing_flags tunnel_pars_flags;
        u8 bd_num;
-       u8 reserved[7];
+       u8 reserved[9];
        struct eth_fast_path_cqe_fw_debug fw_debug;
        u8 reserved1[3];
-       u8 flags;
-#define ETH_FAST_PATH_RX_REG_CQE_VALID_MASK          0x1
-#define ETH_FAST_PATH_RX_REG_CQE_VALID_SHIFT         0
-#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_MASK   0x1
-#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_SHIFT  1
-#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_MASK      0x3F
-#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_SHIFT     2
+       struct eth_pmd_flow_flags pmd_flags;
 };
 
 /* TPA-continue ETH Rx FP CQE. */
 struct eth_fast_path_rx_tpa_cont_cqe {
-       u8      type;
-       u8      tpa_agg_index;
-       __le16  len_list[ETH_TPA_CQE_CONT_LEN_LIST_SIZE];
-       u8      reserved[5];
-       u8      reserved1;
-       __le16  reserved2[ETH_TPA_CQE_CONT_LEN_LIST_SIZE];
+       u8 type;
+       u8 tpa_agg_index;
+       __le16 len_list[ETH_TPA_CQE_CONT_LEN_LIST_SIZE];
+       u8 reserved;
+       u8 reserved1;
+       __le16 reserved2[ETH_TPA_CQE_CONT_LEN_LIST_SIZE];
+       u8 reserved3[3];
+       struct eth_pmd_flow_flags pmd_flags;
 };
 
 /* TPA-end ETH Rx FP CQE. */
 struct eth_fast_path_rx_tpa_end_cqe {
-       u8      type;
-       u8      tpa_agg_index;
-       __le16  total_packet_len;
-       u8      num_of_bds;
-       u8      end_reason;
-       __le16  num_of_coalesced_segs;
-       __le32  ts_delta;
-       __le16  len_list[ETH_TPA_CQE_END_LEN_LIST_SIZE];
-       u8      reserved1[3];
-       u8      reserved2;
-       __le16  reserved3[ETH_TPA_CQE_END_LEN_LIST_SIZE];
+       u8 type;
+       u8 tpa_agg_index;
+       __le16 total_packet_len;
+       u8 num_of_bds;
+       u8 end_reason;
+       __le16 num_of_coalesced_segs;
+       __le32 ts_delta;
+       __le16 len_list[ETH_TPA_CQE_END_LEN_LIST_SIZE];
+       __le16 reserved3[ETH_TPA_CQE_END_LEN_LIST_SIZE];
+       __le16 reserved1;
+       u8 reserved2;
+       struct eth_pmd_flow_flags pmd_flags;
 };
 
 /* TPA-start ETH Rx FP CQE. */
 struct eth_fast_path_rx_tpa_start_cqe {
-       u8      type;
-       u8      bitfields;
+       u8 type;
+       u8 bitfields;
 #define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK  0x7
 #define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0
 #define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK             0xF
 #define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT            3
 #define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK      0x1
 #define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT     7
-       __le16  seg_len;
+       __le16 seg_len;
        struct parsing_and_err_flags pars_flags;
-       __le16  vlan_tag;
-       __le32  rss_hash;
-       __le16  len_on_first_bd;
-       u8      placement_offset;
+       __le16 vlan_tag;
+       __le32 rss_hash;
+       __le16 len_on_first_bd;
+       u8 placement_offset;
        struct eth_tunnel_parsing_flags tunnel_pars_flags;
-       u8      tpa_agg_index;
-       u8      header_len;
-       __le16  ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
+       u8 tpa_agg_index;
+       u8 header_len;
+       __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
        struct eth_fast_path_cqe_fw_debug fw_debug;
+       u8 reserved;
+       struct eth_pmd_flow_flags pmd_flags;
 };
 
 /* The L4 pseudo checksum mode for Ethernet */
@@ -245,15 +302,7 @@ struct eth_slow_path_rx_cqe {
        u8      reserved[25];
        __le16  echo;
        u8      reserved1;
-       u8      flags;
-/* for PMD mode - valid indication */
-#define ETH_SLOW_PATH_RX_CQE_VALID_MASK         0x1
-#define ETH_SLOW_PATH_RX_CQE_VALID_SHIFT        0
-/* for PMD mode - valid toggle indication */
-#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_MASK  0x1
-#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_SHIFT 1
-#define ETH_SLOW_PATH_RX_CQE_RESERVED2_MASK     0x3F
-#define ETH_SLOW_PATH_RX_CQE_RESERVED2_SHIFT    2
+       struct eth_pmd_flow_flags pmd_flags;
 };
 
 /* union for all ETH Rx CQE types */
@@ -276,6 +325,11 @@ enum eth_rx_cqe_type {
        MAX_ETH_RX_CQE_TYPE
 };
 
+struct eth_rx_pmd_cqe {
+       union eth_rx_cqe cqe;
+       u8 reserved[ETH_RX_CQE_GAP];
+};
+
 enum eth_rx_tunn_type {
        ETH_RX_NO_TUNN,
        ETH_RX_TUNN_GENEVE,
@@ -313,8 +367,8 @@ struct eth_tx_2nd_bd {
 
 /* The parsing information data for the third tx bd of a given packet. */
 struct eth_tx_data_3rd_bd {
-       __le16  lso_mss;
-       __le16  bitfields;
+       __le16 lso_mss;
+       __le16 bitfields;
 #define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK  0xF
 #define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0
 #define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK         0xF
@@ -323,8 +377,8 @@ struct eth_tx_data_3rd_bd {
 #define ETH_TX_DATA_3RD_BD_START_BD_SHIFT       8
 #define ETH_TX_DATA_3RD_BD_RESERVED0_MASK       0x7F
 #define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT      9
-       u8      tunn_l4_hdr_start_offset_w;
-       u8      tunn_hdr_size_w;
+       u8 tunn_l4_hdr_start_offset_w;
+       u8 tunn_hdr_size_w;
 };
 
 /* The third tx bd of a given packet */
@@ -355,10 +409,10 @@ struct eth_tx_bd {
 };
 
 union eth_tx_bd_types {
-       struct eth_tx_1st_bd    first_bd;
-       struct eth_tx_2nd_bd    second_bd;
-       struct eth_tx_3rd_bd    third_bd;
-       struct eth_tx_bd        reg_bd;
+       struct eth_tx_1st_bd first_bd;
+       struct eth_tx_2nd_bd second_bd;
+       struct eth_tx_3rd_bd third_bd;
+       struct eth_tx_bd reg_bd;
 };
 
 /* Mstorm Queue Zone */
@@ -389,8 +443,8 @@ struct eth_db_data {
 #define ETH_DB_DATA_RESERVED_SHIFT    5
 #define ETH_DB_DATA_AGG_VAL_SEL_MASK  0x3
 #define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6
-       u8      agg_flags;
-       __le16  bd_prod;
+       u8 agg_flags;
+       __le16 bd_prod;
 };
 
 #endif /* __ETH_COMMON__ */
diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h
new file mode 100644 (file)
index 0000000..947a635
--- /dev/null
@@ -0,0 +1,669 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef __FCOE_COMMON__
+#define __FCOE_COMMON__
+/*********************/
+/* FCOE FW CONSTANTS */
+/*********************/
+
+#define FC_ABTS_REPLY_MAX_PAYLOAD_LEN  12
+#define FCOE_MAX_SIZE_FCP_DATA_SUPER   (8600)
+
+struct fcoe_abts_pkt {
+       __le32 abts_rsp_fc_payload_lo;
+       __le16 abts_rsp_rx_id;
+       u8 abts_rsp_rctl;
+       u8 reserved2;
+};
+
+/* FCoE additional WQE (Sq/XferQ) information */
+union fcoe_additional_info_union {
+       __le32 previous_tid;
+       __le32 parent_tid;
+       __le32 burst_length;
+       __le32 seq_rec_updated_offset;
+};
+
+struct fcoe_exp_ro {
+       __le32 data_offset;
+       __le32 reserved;
+};
+
+union fcoe_cleanup_addr_exp_ro_union {
+       struct regpair abts_rsp_fc_payload_hi;
+       struct fcoe_exp_ro exp_ro;
+};
+
+/* FCoE Ramrod Command IDs */
+enum fcoe_completion_status {
+       FCOE_COMPLETION_STATUS_SUCCESS,
+       FCOE_COMPLETION_STATUS_FCOE_VER_ERR,
+       FCOE_COMPLETION_STATUS_SRC_MAC_ADD_ARR_ERR,
+       MAX_FCOE_COMPLETION_STATUS
+};
+
+struct fc_addr_nw {
+       u8 addr_lo;
+       u8 addr_mid;
+       u8 addr_hi;
+};
+
+/* FCoE connection offload */
+struct fcoe_conn_offload_ramrod_data {
+       struct regpair sq_pbl_addr;
+       struct regpair sq_curr_page_addr;
+       struct regpair sq_next_page_addr;
+       struct regpair xferq_pbl_addr;
+       struct regpair xferq_curr_page_addr;
+       struct regpair xferq_next_page_addr;
+       struct regpair respq_pbl_addr;
+       struct regpair respq_curr_page_addr;
+       struct regpair respq_next_page_addr;
+       __le16 dst_mac_addr_lo;
+       __le16 dst_mac_addr_mid;
+       __le16 dst_mac_addr_hi;
+       __le16 src_mac_addr_lo;
+       __le16 src_mac_addr_mid;
+       __le16 src_mac_addr_hi;
+       __le16 tx_max_fc_pay_len;
+       __le16 e_d_tov_timer_val;
+       __le16 rx_max_fc_pay_len;
+       __le16 vlan_tag;
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_MASK              0xFFF
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT             0
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_MASK                  0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_SHIFT                 12
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_MASK             0x7
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT            13
+       __le16 physical_q0;
+       __le16 rec_rr_tov_timer_val;
+       struct fc_addr_nw s_id;
+       u8 max_conc_seqs_c3;
+       struct fc_addr_nw d_id;
+       u8 flags;
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_MASK  0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_SHIFT 0
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_MASK           0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT          1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_MASK          0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT         2
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK          0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT         3
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK                 0x3
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT                4
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK            0x3
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT           6
+       __le16 conn_id;
+       u8 def_q_idx;
+       u8 reserved[5];
+};
+
+/* FCoE terminate connection request */
+struct fcoe_conn_terminate_ramrod_data {
+       struct regpair terminate_params_addr;
+};
+
+struct fcoe_slow_sgl_ctx {
+       struct regpair base_sgl_addr;
+       __le16 curr_sge_off;
+       __le16 remainder_num_sges;
+       __le16 curr_sgl_index;
+       __le16 reserved;
+};
+
+union fcoe_dix_desc_ctx {
+       struct fcoe_slow_sgl_ctx dix_sgl;
+       struct scsi_sge cached_dix_sge;
+};
+
+struct fcoe_fast_sgl_ctx {
+       struct regpair sgl_start_addr;
+       __le32 sgl_byte_offset;
+       __le16 task_reuse_cnt;
+       __le16 init_offset_in_first_sge;
+};
+
+struct fcoe_fcp_cmd_payload {
+       __le32 opaque[8];
+};
+
+struct fcoe_fcp_rsp_payload {
+       __le32 opaque[6];
+};
+
+struct fcoe_fcp_xfer_payload {
+       __le32 opaque[3];
+};
+
+/* FCoE firmware function init */
+struct fcoe_init_func_ramrod_data {
+       struct scsi_init_func_params func_params;
+       struct scsi_init_func_queues q_params;
+       __le16 mtu;
+       __le16 sq_num_pages_in_pbl;
+       __le32 reserved;
+};
+
+/* FCoE: Mode of the connection: Target or Initiator or both */
+enum fcoe_mode_type {
+       FCOE_INITIATOR_MODE = 0x0,
+       FCOE_TARGET_MODE = 0x1,
+       FCOE_BOTH_OR_NOT_CHOSEN = 0x3,
+       MAX_FCOE_MODE_TYPE
+};
+
+struct fcoe_rx_stat {
+       struct regpair fcoe_rx_byte_cnt;
+       struct regpair fcoe_rx_data_pkt_cnt;
+       struct regpair fcoe_rx_xfer_pkt_cnt;
+       struct regpair fcoe_rx_other_pkt_cnt;
+       __le32 fcoe_silent_drop_pkt_cmdq_full_cnt;
+       __le32 fcoe_silent_drop_pkt_rq_full_cnt;
+       __le32 fcoe_silent_drop_pkt_crc_error_cnt;
+       __le32 fcoe_silent_drop_pkt_task_invalid_cnt;
+       __le32 fcoe_silent_drop_total_pkt_cnt;
+       __le32 rsrv;
+};
+
+struct fcoe_stat_ramrod_data {
+       struct regpair stat_params_addr;
+};
+
+struct protection_info_ctx {
+       __le16 flags;
+#define PROTECTION_INFO_CTX_HOST_INTERFACE_MASK        0x3
+#define PROTECTION_INFO_CTX_HOST_INTERFACE_SHIFT       0
+#define PROTECTION_INFO_CTX_DIF_TO_PEER_MASK           0x1
+#define PROTECTION_INFO_CTX_DIF_TO_PEER_SHIFT          2
+#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_MASK  0x1
+#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_SHIFT 3
+#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_MASK     0xF
+#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_SHIFT    4
+#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_MASK  0x1
+#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_SHIFT 8
+#define PROTECTION_INFO_CTX_RESERVED0_MASK             0x7F
+#define PROTECTION_INFO_CTX_RESERVED0_SHIFT            9
+       u8 dix_block_size;
+       u8 dst_size;
+};
+
+union protection_info_union_ctx {
+       struct protection_info_ctx info;
+       __le32 value;
+};
+
+struct fcp_rsp_payload_padded {
+       struct fcoe_fcp_rsp_payload rsp_payload;
+       __le32 reserved[2];
+};
+
+struct fcp_xfer_payload_padded {
+       struct fcoe_fcp_xfer_payload xfer_payload;
+       __le32 reserved[5];
+};
+
+struct fcoe_tx_data_params {
+       __le32 data_offset;
+       __le32 offset_in_io;
+       u8 flags;
+#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_MASK  0x1
+#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_SHIFT 0
+#define FCOE_TX_DATA_PARAMS_DROP_DATA_MASK           0x1
+#define FCOE_TX_DATA_PARAMS_DROP_DATA_SHIFT          1
+#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_MASK       0x1
+#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_SHIFT      2
+#define FCOE_TX_DATA_PARAMS_RESERVED0_MASK           0x1F
+#define FCOE_TX_DATA_PARAMS_RESERVED0_SHIFT          3
+       u8 dif_residual;
+       __le16 seq_cnt;
+       __le16 single_sge_saved_offset;
+       __le16 next_dif_offset;
+       __le16 seq_id;
+       __le16 reserved3;
+};
+
+struct fcoe_tx_mid_path_params {
+       __le32 parameter;
+       u8 r_ctl;
+       u8 type;
+       u8 cs_ctl;
+       u8 df_ctl;
+       __le16 rx_id;
+       __le16 ox_id;
+};
+
+struct fcoe_tx_params {
+       struct fcoe_tx_data_params data;
+       struct fcoe_tx_mid_path_params mid_path;
+};
+
+union fcoe_tx_info_union_ctx {
+       struct fcoe_fcp_cmd_payload fcp_cmd_payload;
+       struct fcp_rsp_payload_padded fcp_rsp_payload;
+       struct fcp_xfer_payload_padded fcp_xfer_payload;
+       struct fcoe_tx_params tx_params;
+};
+
+struct ystorm_fcoe_task_st_ctx {
+       u8 task_type;
+       u8 sgl_mode;
+#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK  0x1
+#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 0
+#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK         0x7F
+#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT        1
+       u8 cached_dix_sge;
+       u8 expect_first_xfer;
+       __le32 num_pbf_zero_write;
+       union protection_info_union_ctx protection_info_union;
+       __le32 data_2_trns_rem;
+       struct scsi_sgl_params sgl_params;
+       u8 reserved1[12];
+       union fcoe_tx_info_union_ctx tx_info_union;
+       union fcoe_dix_desc_ctx dix_desc;
+       struct scsi_cached_sges data_desc;
+       __le16 ox_id;
+       __le16 rx_id;
+       __le32 task_rety_identifier;
+       u8 reserved2[8];
+};
+
+struct ystorm_fcoe_task_ag_ctx {
+       u8 byte0;
+       u8 byte1;
+       __le16 word0;
+       u8 flags0;
+#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK     0xF
+#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT    0
+#define YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK        0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT       4
+#define YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK        0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT       5
+#define YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK        0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT       6
+#define YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK        0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT       7
+       u8 flags1;
+#define YSTORM_FCOE_TASK_AG_CTX_CF0_MASK         0x3
+#define YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT        0
+#define YSTORM_FCOE_TASK_AG_CTX_CF1_MASK         0x3
+#define YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT        2
+#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK  0x3
+#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
+#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK       0x1
+#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT      6
+#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK       0x1
+#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT      7
+       u8 flags2;
+#define YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK        0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT       0
+#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK     0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT    1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK     0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT    2
+#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK     0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT    3
+#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK     0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT    4
+#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK     0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT    5
+#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK     0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT    6
+#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK     0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT    7
+       u8 byte2;
+       __le32 reg0;
+       u8 byte3;
+       u8 byte4;
+       __le16 rx_id;
+       __le16 word2;
+       __le16 word3;
+       __le16 word4;
+       __le16 word5;
+       __le32 reg1;
+       __le32 reg2;
+};
+
+struct tstorm_fcoe_task_ag_ctx {
+       u8 reserved;
+       u8 byte1;
+       __le16 icid;
+       u8 flags0;
+#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK     0xF
+#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT    0
+#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK        0x1
+#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT       4
+#define TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK                0x1
+#define TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT               5
+#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK     0x1
+#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT    6
+#define TSTORM_FCOE_TASK_AG_CTX_VALID_MASK               0x1
+#define TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT              7
+       u8 flags1;
+#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK        0x1
+#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT       0
+#define TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK                0x1
+#define TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT               1
+#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK       0x3
+#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT      2
+#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK           0x3
+#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT          4
+#define TSTORM_FCOE_TASK_AG_CTX_CF2_MASK                 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT                6
+       u8 flags2;
+#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK      0x3
+#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT     0
+#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK       0x3
+#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT      2
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK         0x3
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT        4
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK     0x3
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT    6
+       u8 flags3;
+#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK       0x3
+#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT      0
+#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK    0x1
+#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT   2
+#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK        0x1
+#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT       3
+#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK               0x1
+#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT              4
+#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK   0x1
+#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT  5
+#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK    0x1
+#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT   6
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK      0x1
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT     7
+       u8 flags4;
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK  0x1
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK    0x1
+#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT   1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK             0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT            2
+#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK             0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT            3
+#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK             0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT            4
+#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK             0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT            5
+#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK             0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT            6
+#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK             0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT            7
+       u8 cleanup_state;
+       __le16 last_sent_tid;
+       __le32 rec_rr_tov_exp_timeout;
+       u8 byte3;
+       u8 byte4;
+       __le16 word2;
+       __le16 word3;
+       __le16 word4;
+       __le32 data_offset_end_of_seq;
+       __le32 data_offset_next;
+};
+
+struct fcoe_tstorm_fcoe_task_st_ctx_read_write {
+       union fcoe_cleanup_addr_exp_ro_union cleanup_addr_exp_ro_union;
+       __le16 flags;
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK       0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT      0
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK   0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT  1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK        0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT       2
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK       0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT      3
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK  0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 4
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK   0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT  5
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK        0x3
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT       6
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK             0xFF
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT            8
+       __le16 seq_cnt;
+       u8 seq_id;
+       u8 ooo_rx_seq_id;
+       __le16 rx_id;
+       struct fcoe_abts_pkt abts_data;
+       __le32 e_d_tov_exp_timeout_val;
+       __le16 ooo_rx_seq_cnt;
+       __le16 reserved1;
+};
+
+struct fcoe_tstorm_fcoe_task_st_ctx_read_only {
+       u8 task_type;
+       u8 dev_type;
+       u8 conf_supported;
+       u8 glbl_q_num;
+       __le32 cid;
+       __le32 fcp_cmd_trns_size;
+       __le32 rsrv;
+};
+
+struct tstorm_fcoe_task_st_ctx {
+       struct fcoe_tstorm_fcoe_task_st_ctx_read_write read_write;
+       struct fcoe_tstorm_fcoe_task_st_ctx_read_only read_only;
+};
+
+struct mstorm_fcoe_task_ag_ctx {
+       u8 byte0;
+       u8 byte1;
+       __le16 icid;
+       u8 flags0;
+#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK    0xF
+#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT   0
+#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK       0x1
+#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT      4
+#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK         0x1
+#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT        5
+#define MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK               0x1
+#define MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT              6
+#define MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK               0x1
+#define MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT              7
+       u8 flags1;
+#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK      0x3
+#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT     0
+#define MSTORM_FCOE_TASK_AG_CTX_CF1_MASK                0x3
+#define MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT               2
+#define MSTORM_FCOE_TASK_AG_CTX_CF2_MASK                0x3
+#define MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT               4
+#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK   0x1
+#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT  6
+#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK              0x1
+#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT             7
+       u8 flags2;
+#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK              0x1
+#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT             0
+#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK            0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT           1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK            0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT           2
+#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK            0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT           3
+#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK            0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT           4
+#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK            0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT           5
+#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK  0x1
+#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6
+#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK            0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT           7
+       u8 cleanup_state;
+       __le32 received_bytes;
+       u8 byte3;
+       u8 glbl_q_num;
+       __le16 word1;
+       __le16 tid_to_xfer;
+       __le16 word3;
+       __le16 word4;
+       __le16 word5;
+       __le32 expected_bytes;
+       __le32 reg2;
+};
+
+struct mstorm_fcoe_task_st_ctx {
+       struct regpair rsp_buf_addr;
+       __le32 rsrv[2];
+       struct scsi_sgl_params sgl_params;
+       __le32 data_2_trns_rem;
+       __le32 data_buffer_offset;
+       __le16 parent_id;
+       __le16 flags;
+#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_MASK     0xF
+#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_SHIFT    0
+#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_MASK        0x3
+#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_SHIFT       4
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_MASK           0x1
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_SHIFT          6
+#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_MASK  0x1
+#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_SHIFT 7
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_MASK        0x3
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_SHIFT       8
+#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_MASK  0x1
+#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_SHIFT 10
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_MASK    0x1
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_SHIFT   11
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_MASK         0x1
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_SHIFT        12
+#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK           0x1
+#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT          13
+#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_MASK              0x3
+#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_SHIFT             14
+       struct scsi_cached_sges data_desc;
+};
+
+struct ustorm_fcoe_task_ag_ctx {
+       u8 reserved;
+       u8 byte1;
+       __le16 icid;
+       u8 flags0;
+#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK  0xF
+#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK     0x1
+#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT    4
+#define USTORM_FCOE_TASK_AG_CTX_BIT1_MASK             0x1
+#define USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT            5
+#define USTORM_FCOE_TASK_AG_CTX_CF0_MASK              0x3
+#define USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT             6
+       u8 flags1;
+#define USTORM_FCOE_TASK_AG_CTX_CF1_MASK              0x3
+#define USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT             0
+#define USTORM_FCOE_TASK_AG_CTX_CF2_MASK              0x3
+#define USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT             2
+#define USTORM_FCOE_TASK_AG_CTX_CF3_MASK              0x3
+#define USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT             4
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK     0x3
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT    6
+       u8 flags2;
+#define USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK            0x1
+#define USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT           0
+#define USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK            0x1
+#define USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT           1
+#define USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK            0x1
+#define USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT           2
+#define USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK            0x1
+#define USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT           3
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK  0x1
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
+#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK          0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT         5
+#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK          0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT         6
+#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK          0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT         7
+       u8 flags3;
+#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK          0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT         0
+#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK          0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT         1
+#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK          0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT         2
+#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK          0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT         3
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK   0xF
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT  4
+       __le32 dif_err_intervals;
+       __le32 dif_error_1st_interval;
+       __le32 global_cq_num;
+       __le32 reg3;
+       __le32 reg4;
+       __le32 reg5;
+};
+
+struct fcoe_task_context {
+       struct ystorm_fcoe_task_st_ctx ystorm_st_context;
+       struct regpair ystorm_st_padding[2];
+       struct tdif_task_context tdif_context;
+       struct ystorm_fcoe_task_ag_ctx ystorm_ag_context;
+       struct tstorm_fcoe_task_ag_ctx tstorm_ag_context;
+       struct timers_context timer_context;
+       struct tstorm_fcoe_task_st_ctx tstorm_st_context;
+       struct regpair tstorm_st_padding[2];
+       struct mstorm_fcoe_task_ag_ctx mstorm_ag_context;
+       struct mstorm_fcoe_task_st_ctx mstorm_st_context;
+       struct ustorm_fcoe_task_ag_ctx ustorm_ag_context;
+       struct rdif_task_context rdif_context;
+};
+
+struct fcoe_tx_stat {
+       struct regpair fcoe_tx_byte_cnt;
+       struct regpair fcoe_tx_data_pkt_cnt;
+       struct regpair fcoe_tx_xfer_pkt_cnt;
+       struct regpair fcoe_tx_other_pkt_cnt;
+};
+
+struct fcoe_wqe {
+       __le16 task_id;
+       __le16 flags;
+#define FCOE_WQE_REQ_TYPE_MASK       0xF
+#define FCOE_WQE_REQ_TYPE_SHIFT      0
+#define FCOE_WQE_SGL_MODE_MASK       0x1
+#define FCOE_WQE_SGL_MODE_SHIFT      4
+#define FCOE_WQE_CONTINUATION_MASK   0x1
+#define FCOE_WQE_CONTINUATION_SHIFT  5
+#define FCOE_WQE_SEND_AUTO_RSP_MASK  0x1
+#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 6
+#define FCOE_WQE_RESERVED_MASK       0x1
+#define FCOE_WQE_RESERVED_SHIFT      7
+#define FCOE_WQE_NUM_SGES_MASK       0xF
+#define FCOE_WQE_NUM_SGES_SHIFT      8
+#define FCOE_WQE_RESERVED1_MASK      0xF
+#define FCOE_WQE_RESERVED1_SHIFT     12
+       union fcoe_additional_info_union additional_info_union;
+};
+
+struct xfrqe_prot_flags {
+       u8 flags;
+#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK  0xF
+#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0
+#define XFRQE_PROT_FLAGS_DIF_TO_PEER_MASK             0x1
+#define XFRQE_PROT_FLAGS_DIF_TO_PEER_SHIFT            4
+#define XFRQE_PROT_FLAGS_HOST_INTERFACE_MASK          0x3
+#define XFRQE_PROT_FLAGS_HOST_INTERFACE_SHIFT         5
+#define XFRQE_PROT_FLAGS_RESERVED_MASK                0x1
+#define XFRQE_PROT_FLAGS_RESERVED_SHIFT               7
+};
+
+struct fcoe_db_data {
+       u8 params;
+#define FCOE_DB_DATA_DEST_MASK         0x3
+#define FCOE_DB_DATA_DEST_SHIFT        0
+#define FCOE_DB_DATA_AGG_CMD_MASK      0x3
+#define FCOE_DB_DATA_AGG_CMD_SHIFT     2
+#define FCOE_DB_DATA_BYPASS_EN_MASK    0x1
+#define FCOE_DB_DATA_BYPASS_EN_SHIFT   4
+#define FCOE_DB_DATA_RESERVED_MASK     0x1
+#define FCOE_DB_DATA_RESERVED_SHIFT    5
+#define FCOE_DB_DATA_AGG_VAL_SEL_MASK  0x3
+#define FCOE_DB_DATA_AGG_VAL_SEL_SHIFT 6
+       u8 agg_flags;
+       __le16 sq_prod;
+};
+#endif /* __FCOE_COMMON__ */
index b3c0feb15ae918147190ace46924cbf76734bf6f..69949f8e354b0447c7950884bd205622a4653ab2 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef __ISCSI_COMMON__
 /* iSCSI HSI constants */
 #define ISCSI_DEFAULT_MTU       (1500)
 
-/* Current iSCSI HSI version number composed of two fields (16 bit) */
-#define ISCSI_HSI_MAJOR_VERSION (0)
-#define ISCSI_HSI_MINOR_VERSION (0)
-
 /* KWQ (kernel work queue) layer codes */
 #define ISCSI_SLOW_PATH_LAYER_CODE   (6)
 
-/* CQE completion status */
-#define ISCSI_EQE_COMPLETION_SUCCESS (0x0)
-#define ISCSI_EQE_RST_CONN_RCVD (0x1)
-
 /* iSCSI parameter defaults */
 #define ISCSI_DEFAULT_HEADER_DIGEST         (0)
 #define ISCSI_DEFAULT_DATA_DIGEST           (0)
 #define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T   (1)
 #define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T   (0xff)
 
+#define ISCSI_AHS_CNTL_SIZE 4
+
+#define ISCSI_WQE_NUM_SGES_SLOWIO           (0xf)
+
 /* iSCSI reserved params */
 #define ISCSI_ITT_ALL_ONES     (0xffffffff)
 #define ISCSI_TTT_ALL_ONES     (0xffffffff)
@@ -149,19 +169,6 @@ struct iscsi_async_msg_hdr {
        __le32 reserved7;
 };
 
-struct iscsi_sge {
-       struct regpair sge_addr;
-       __le16 sge_len;
-       __le16 reserved0;
-       __le32 reserved1;
-};
-
-struct iscsi_cached_sge_ctx {
-       struct iscsi_sge sge;
-       struct regpair reserved;
-       __le32 dsgl_curr_offset[2];
-};
-
 struct iscsi_cmd_hdr {
        __le16 reserved1;
        u8 flags_attr;
@@ -205,8 +212,13 @@ struct iscsi_common_hdr {
 #define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT  0
 #define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK  0xFF
 #define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24
-       __le32 lun_reserved[4];
-       __le32 data[6];
+       struct regpair lun_reserved;
+       __le32 itt;
+       __le32 ttt;
+       __le32 cmdstat_sn;
+       __le32 exp_statcmd_sn;
+       __le32 max_cmd_sn;
+       __le32 data[3];
 };
 
 struct iscsi_conn_offload_params {
@@ -222,8 +234,10 @@ struct iscsi_conn_offload_params {
 #define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0
 #define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK     0x1
 #define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT    1
-#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK       0x3F
-#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT      2
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT        2
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK       0x1F
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT      3
        u8 pbl_page_size_log;
        u8 pbe_page_size_log;
        u8 default_cq;
@@ -254,8 +268,12 @@ struct iscsi_conn_update_ramrod_params {
 #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT    2
 #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK  0x1
 #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK       0xF
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT      4
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_MASK  0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT 4
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK  0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT 5
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK       0x3
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT      6
        u8 reserved0[3];
        __le32 max_seq_size;
        __le32 max_send_pdu_length;
@@ -288,7 +306,7 @@ struct iscsi_ext_cdb_cmd_hdr {
        __le32 expected_transfer_length;
        __le32 cmd_sn;
        __le32 exp_stat_sn;
-       struct iscsi_sge cdb_sge;
+       struct scsi_sge cdb_sge;
 };
 
 struct iscsi_login_req_hdr {
@@ -311,7 +329,7 @@ struct iscsi_login_req_hdr {
 #define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT  0
 #define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK  0xFF
 #define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24
-       __le32 isid_TABC;
+       __le32 isid_tabc;
        __le16 tsih;
        __le16 isid_d;
        __le32 itt;
@@ -464,7 +482,7 @@ struct iscsi_login_response_hdr {
 #define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT  0
 #define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK  0xFF
 #define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
-       __le32 isid_TABC;
+       __le32 isid_tabc;
        __le16 tsih;
        __le16 isid_d;
        __le32 itt;
@@ -495,8 +513,8 @@ struct iscsi_logout_response_hdr {
        __le32 exp_cmd_sn;
        __le32 max_cmd_sn;
        __le32 reserved4;
-       __le16 time2retain;
-       __le16 time2wait;
+       __le16 time_2_retain;
+       __le16 time_2_wait;
        __le32 reserved5[1];
 };
 
@@ -578,7 +596,7 @@ struct iscsi_tmf_response_hdr {
 #define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
        struct regpair reserved0;
        __le32 itt;
-       __le32 rtt;
+       __le32 reserved1;
        __le32 stat_sn;
        __le32 exp_cmd_sn;
        __le32 max_cmd_sn;
@@ -617,7 +635,7 @@ struct iscsi_reject_hdr {
 #define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK  0xFF
 #define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24
        struct regpair reserved0;
-       __le32 reserved1;
+       __le32 all_ones;
        __le32 reserved2;
        __le32 stat_sn;
        __le32 exp_cmd_sn;
@@ -664,7 +682,9 @@ struct iscsi_cqe_solicited {
        __le16 itid;
        u8 task_type;
        u8 fw_dbg_field;
-       __le32 reserved1[2];
+       u8 caused_conn_err;
+       u8 reserved0[3];
+       __le32 reserved1[1];
        union iscsi_task_hdr iscsi_hdr;
 };
 
@@ -688,8 +708,7 @@ union iscsi_cqe {
 enum iscsi_cqes_type {
        ISCSI_CQE_TYPE_SOLICITED = 1,
        ISCSI_CQE_TYPE_UNSOLICITED,
-       ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE
-          ,
+       ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE,
        ISCSI_CQE_TYPE_TASK_CLEANUP,
        ISCSI_CQE_TYPE_DUMMY,
        MAX_ISCSI_CQES_TYPE
@@ -704,35 +723,6 @@ enum iscsi_cqe_unsolicited_type {
        MAX_ISCSI_CQE_UNSOLICITED_TYPE
 };
 
-struct iscsi_virt_sgl_ctx {
-       struct regpair sgl_base;
-       struct regpair dsgl_base;
-       __le32 sgl_initial_offset;
-       __le32 dsgl_initial_offset;
-       __le32 dsgl_curr_offset[2];
-};
-
-struct iscsi_sgl_var_params {
-       u8 sgl_ptr;
-       u8 dsgl_ptr;
-       __le16 sge_offset;
-       __le16 dsge_offset;
-};
-
-struct iscsi_phys_sgl_ctx {
-       struct regpair sgl_base;
-       struct regpair dsgl_base;
-       u8 sgl_size;
-       u8 dsgl_size;
-       __le16 reserved;
-       struct iscsi_sgl_var_params var_params[2];
-};
-
-union iscsi_data_desc_ctx {
-       struct iscsi_virt_sgl_ctx virt_sgl;
-       struct iscsi_phys_sgl_ctx phys_sgl;
-       struct iscsi_cached_sge_ctx cached_sge;
-};
 
 struct iscsi_debug_modes {
        u8 flags;
@@ -748,8 +738,10 @@ struct iscsi_debug_modes {
 #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4
 #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK              0x1
 #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT             5
-#define ISCSI_DEBUG_MODES_RESERVED0_MASK                       0x3
-#define ISCSI_DEBUG_MODES_RESERVED0_SHIFT                      6
+#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_MASK     0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_SHIFT    6
+#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_MASK             0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_SHIFT            7
 };
 
 struct iscsi_dif_flags {
@@ -769,9 +761,9 @@ enum iscsi_eqe_opcode {
        ISCSI_EVENT_TYPE_UPDATE_CONN,
        ISCSI_EVENT_TYPE_CLEAR_SQ,
        ISCSI_EVENT_TYPE_TERMINATE_CONN,
+       ISCSI_EVENT_TYPE_MAC_UPDATE_CONN,
        ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE,
        ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE,
-       RESERVED8,
        RESERVED9,
        ISCSI_EVENT_TYPE_START_OF_ERROR_TYPES = 10,
        ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD,
@@ -783,7 +775,6 @@ enum iscsi_eqe_opcode {
        ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2,
        ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR,
        ISCSI_EVENT_TYPE_TCP_CONN_ERROR,
-       ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES,
        MAX_ISCSI_EQE_OPCODE
 };
 
@@ -833,31 +824,11 @@ enum iscsi_error_types {
        ISCSI_CONN_ERROR_PROTOCOL_ERR_DIF_TX,
        ISCSI_CONN_ERROR_SENSE_DATA_LENGTH,
        ISCSI_CONN_ERROR_DATA_PLACEMENT_ERROR,
+       ISCSI_CONN_ERROR_INVALID_ITT,
        ISCSI_ERROR_UNKNOWN,
        MAX_ISCSI_ERROR_TYPES
 };
 
-struct iscsi_mflags {
-       u8 mflags;
-#define ISCSI_MFLAGS_SLOW_IO_MASK     0x1
-#define ISCSI_MFLAGS_SLOW_IO_SHIFT    0
-#define ISCSI_MFLAGS_SINGLE_SGE_MASK  0x1
-#define ISCSI_MFLAGS_SINGLE_SGE_SHIFT 1
-#define ISCSI_MFLAGS_RESERVED_MASK    0x3F
-#define ISCSI_MFLAGS_RESERVED_SHIFT   2
-};
-
-struct iscsi_sgl {
-       struct regpair sgl_addr;
-       __le16 updated_sge_size;
-       __le16 updated_sge_offset;
-       __le32 byte_offset;
-};
-
-union iscsi_mstorm_sgl {
-       struct iscsi_sgl sgl_struct;
-       struct iscsi_sge single_sge;
-};
 
 enum iscsi_ramrod_cmd_id {
        ISCSI_RAMROD_CMD_ID_UNUSED = 0,
@@ -867,15 +838,16 @@ enum iscsi_ramrod_cmd_id {
        ISCSI_RAMROD_CMD_ID_UPDATE_CONN = 4,
        ISCSI_RAMROD_CMD_ID_TERMINATION_CONN = 5,
        ISCSI_RAMROD_CMD_ID_CLEAR_SQ = 6,
+       ISCSI_RAMROD_CMD_ID_MAC_UPDATE = 7,
        MAX_ISCSI_RAMROD_CMD_ID
 };
 
 struct iscsi_reg1 {
        __le32 reg1_map;
-#define ISCSI_REG1_NUM_FAST_SGES_MASK  0x7
-#define ISCSI_REG1_NUM_FAST_SGES_SHIFT 0
-#define ISCSI_REG1_RESERVED1_MASK      0x1FFFFFFF
-#define ISCSI_REG1_RESERVED1_SHIFT     3
+#define ISCSI_REG1_NUM_SGES_MASK   0xF
+#define ISCSI_REG1_NUM_SGES_SHIFT  0
+#define ISCSI_REG1_RESERVED1_MASK  0xFFFFFFF
+#define ISCSI_REG1_RESERVED1_SHIFT 4
 };
 
 union iscsi_seq_num {
@@ -883,6 +855,16 @@ union iscsi_seq_num {
        __le16 r2t_sn;
 };
 
+struct iscsi_spe_conn_mac_update {
+       struct iscsi_slow_path_hdr hdr;
+       __le16 conn_id;
+       __le32 fw_cid;
+       __le16 remote_mac_addr_lo;
+       __le16 remote_mac_addr_mid;
+       __le16 remote_mac_addr_hi;
+       u8 reserved0[2];
+};
+
 struct iscsi_spe_conn_offload {
        struct iscsi_slow_path_hdr hdr;
        __le16 conn_id;
@@ -933,22 +915,33 @@ struct iscsi_spe_func_init {
 };
 
 struct ystorm_iscsi_task_state {
-       union iscsi_data_desc_ctx sgl_ctx_union;
-       __le32 buffer_offset[2];
-       __le16 bytes_nxt_dif;
-       __le16 rxmit_bytes_nxt_dif;
-       union iscsi_seq_num seq_num_union;
-       u8 dif_bytes_leftover;
-       u8 rxmit_dif_bytes_leftover;
-       __le16 reuse_count;
-       struct iscsi_dif_flags dif_flags;
-       u8 local_comp;
+       struct scsi_cached_sges data_desc;
+       struct scsi_sgl_params sgl_params;
        __le32 exp_r2t_sn;
-       __le32 sgl_offset[2];
+       __le32 buffer_offset;
+       union iscsi_seq_num seq_num;
+       struct iscsi_dif_flags dif_flags;
+       u8 flags;
+#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_MASK  0x1
+#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT 0
+#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK     0x1
+#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT    1
+#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK   0x3F
+#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT  2
+};
+
+struct ystorm_iscsi_task_rxmit_opt {
+       __le32 fast_rxmit_sge_offset;
+       __le32 scan_start_buffer_offset;
+       __le32 fast_rxmit_buffer_offset;
+       u8 scan_start_sgl_index;
+       u8 fast_rxmit_sgl_index;
+       __le16 reserved;
 };
 
 struct ystorm_iscsi_task_st_ctx {
        struct ystorm_iscsi_task_state state;
+       struct ystorm_iscsi_task_rxmit_opt rxmit_opt;
        union iscsi_task_hdr pdu_hdr;
 };
 
@@ -1118,25 +1111,16 @@ struct ustorm_iscsi_task_ag_ctx {
 };
 
 struct mstorm_iscsi_task_st_ctx {
-       union iscsi_mstorm_sgl sgl_union;
-       struct iscsi_dif_flags dif_flags;
-       struct iscsi_mflags flags;
-       u8 sgl_size;
-       u8 host_sge_index;
-       __le16 dix_cur_sge_offset;
-       __le16 dix_cur_sge_size;
-       __le32 data_offset_rtid;
-       u8 dif_offset;
-       u8 dix_sgl_size;
-       u8 dix_sge_index;
+       struct scsi_cached_sges data_desc;
+       struct scsi_sgl_params sgl_params;
+       __le32 rem_task_size;
+       __le32 data_buffer_offset;
        u8 task_type;
+       struct iscsi_dif_flags dif_flags;
+       u8 reserved0[2];
        struct regpair sense_db;
-       struct regpair dix_sgl_cur_sge;
-       __le32 rem_task_size;
-       __le16 reuse_count;
-       __le16 dif_data_residue;
-       u8 reserved0[4];
-       __le32 reserved1[1];
+       __le32 expected_itt;
+       __le32 reserved1;
 };
 
 struct ustorm_iscsi_task_st_ctx {
@@ -1150,7 +1134,7 @@ struct ustorm_iscsi_task_st_ctx {
 #define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT            0
 #define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK             0x7F
 #define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT            1
-       u8 reserved2;
+       struct iscsi_dif_flags dif_flags;
        __le16 reserved3;
        __le32 reserved4;
        __le32 reserved5;
@@ -1173,10 +1157,10 @@ struct ustorm_iscsi_task_st_ctx {
 #define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT           2
 #define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK        0x1
 #define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT       3
-#define USTORM_ISCSI_TASK_ST_CTX_TOTALDATAACKED_DONE_MASK   0x1
-#define USTORM_ISCSI_TASK_ST_CTX_TOTALDATAACKED_DONE_SHIFT  4
-#define USTORM_ISCSI_TASK_ST_CTX_HQSCANNED_DONE_MASK        0x1
-#define USTORM_ISCSI_TASK_ST_CTX_HQSCANNED_DONE_SHIFT       5
+#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK  0x1
+#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4
+#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_MASK        0x1
+#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT       5
 #define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK         0x1
 #define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT        6
 #define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK             0x1
@@ -1186,7 +1170,6 @@ struct ustorm_iscsi_task_st_ctx {
 
 struct iscsi_task_context {
        struct ystorm_iscsi_task_st_ctx ystorm_st_context;
-       struct regpair ystorm_st_padding[2];
        struct ystorm_iscsi_task_ag_ctx ystorm_ag_context;
        struct regpair ystorm_ag_padding[2];
        struct tdif_task_context tdif_context;
@@ -1238,32 +1221,22 @@ struct iscsi_uhqe {
 #define ISCSI_UHQE_TASK_ID_LO_SHIFT         24
 };
 
-struct iscsi_wqe_field {
-       __le32 contlen_cdbsize_field;
-#define ISCSI_WQE_FIELD_CONT_LEN_MASK  0xFFFFFF
-#define ISCSI_WQE_FIELD_CONT_LEN_SHIFT 0
-#define ISCSI_WQE_FIELD_CDB_SIZE_MASK  0xFF
-#define ISCSI_WQE_FIELD_CDB_SIZE_SHIFT 24
-};
-
-union iscsi_wqe_field_union {
-       struct iscsi_wqe_field cont_field;
-       __le32 prev_tid;
-};
 
 struct iscsi_wqe {
        __le16 task_id;
        u8 flags;
 #define ISCSI_WQE_WQE_TYPE_MASK        0x7
 #define ISCSI_WQE_WQE_TYPE_SHIFT       0
-#define ISCSI_WQE_NUM_FAST_SGES_MASK   0x7
-#define ISCSI_WQE_NUM_FAST_SGES_SHIFT  3
-#define ISCSI_WQE_PTU_INVALIDATE_MASK  0x1
-#define ISCSI_WQE_PTU_INVALIDATE_SHIFT 6
+#define ISCSI_WQE_NUM_SGES_MASK  0xF
+#define ISCSI_WQE_NUM_SGES_SHIFT 3
 #define ISCSI_WQE_RESPONSE_MASK        0x1
 #define ISCSI_WQE_RESPONSE_SHIFT       7
        struct iscsi_dif_flags prot_flags;
-       union iscsi_wqe_field_union cont_prevtid_union;
+       __le32 contlen_cdbsize;
+#define ISCSI_WQE_CONT_LEN_MASK  0xFFFFFF
+#define ISCSI_WQE_CONT_LEN_SHIFT 0
+#define ISCSI_WQE_CDB_SIZE_MASK  0xFF
+#define ISCSI_WQE_CDB_SIZE_SHIFT 24
 };
 
 enum iscsi_wqe_type {
@@ -1284,17 +1257,15 @@ struct iscsi_xhqe {
        u8 total_ahs_length;
        u8 opcode;
        u8 flags;
-#define ISCSI_XHQE_NUM_FAST_SGES_MASK  0x7
-#define ISCSI_XHQE_NUM_FAST_SGES_SHIFT 0
-#define ISCSI_XHQE_FINAL_MASK          0x1
-#define ISCSI_XHQE_FINAL_SHIFT         3
-#define ISCSI_XHQE_SUPER_IO_MASK       0x1
-#define ISCSI_XHQE_SUPER_IO_SHIFT      4
-#define ISCSI_XHQE_STATUS_BIT_MASK     0x1
-#define ISCSI_XHQE_STATUS_BIT_SHIFT    5
-#define ISCSI_XHQE_RESERVED_MASK       0x3
-#define ISCSI_XHQE_RESERVED_SHIFT      6
-       union iscsi_seq_num seq_num_union;
+#define ISCSI_XHQE_FINAL_MASK       0x1
+#define ISCSI_XHQE_FINAL_SHIFT      0
+#define ISCSI_XHQE_STATUS_BIT_MASK  0x1
+#define ISCSI_XHQE_STATUS_BIT_SHIFT 1
+#define ISCSI_XHQE_NUM_SGES_MASK    0xF
+#define ISCSI_XHQE_NUM_SGES_SHIFT   2
+#define ISCSI_XHQE_RESERVED0_MASK   0x3
+#define ISCSI_XHQE_RESERVED0_SHIFT  6
+       union iscsi_seq_num seq_num;
        __le16 reserved1;
 };
 
@@ -1302,14 +1273,6 @@ struct mstorm_iscsi_stats_drv {
        struct regpair iscsi_rx_dropped_pdus_task_not_valid;
 };
 
-struct ooo_opaque {
-       __le32 cid;
-       u8 drop_isle;
-       u8 drop_size;
-       u8 ooo_opcode;
-       u8 ooo_isle;
-};
-
 struct pstorm_iscsi_stats_drv {
        struct regpair iscsi_tx_bytes_cnt;
        struct regpair iscsi_tx_packet_cnt;
index 7e441bdeabdc9a583daec851b5781416cbdd1d57..5cd7a4608c9b7699ee649aeafda7f5953434c24d 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef _QED_CHAIN_H
 #include <linux/slab.h>
 #include <linux/qed/common_hsi.h>
 
-/* dma_addr_t manip */
-#define DMA_LO_LE(x)            cpu_to_le32(lower_32_bits(x))
-#define DMA_HI_LE(x)            cpu_to_le32(upper_32_bits(x))
-#define DMA_REGPAIR_LE(x, val)  do { \
-                                       (x).hi = DMA_HI_LE((val)); \
-                                       (x).lo = DMA_LO_LE((val)); \
-                               } while (0)
-
-#define HILO_GEN(hi, lo, type)  ((((type)(hi)) << 32) + (lo))
-#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64)
-#define HILO_64_REGPAIR(regpair)        (HILO_64(regpair.hi, regpair.lo))
-#define HILO_DMA_REGPAIR(regpair)      ((dma_addr_t)HILO_64_REGPAIR(regpair))
-
 enum qed_chain_mode {
        /* Each Page contains a next pointer at its end */
        QED_CHAIN_MODE_NEXT_PTR,
@@ -69,23 +80,6 @@ struct qed_chain_pbl_u32 {
        u32 cons_page_idx;
 };
 
-struct qed_chain_pbl {
-       /* Base address of a pre-allocated buffer for pbl */
-       dma_addr_t      p_phys_table;
-       void            *p_virt_table;
-
-       /* Table for keeping the virtual addresses of the chain pages,
-        * respectively to the physical addresses in the pbl table.
-        */
-       void **pp_virt_addr_tbl;
-
-       /* Index to current used page by producer/consumer */
-       union {
-               struct qed_chain_pbl_u16 pbl16;
-               struct qed_chain_pbl_u32 pbl32;
-       } u;
-};
-
 struct qed_chain_u16 {
        /* Cyclic index of next element to produce/consme */
        u16 prod_idx;
@@ -99,46 +93,78 @@ struct qed_chain_u32 {
 };
 
 struct qed_chain {
-       void                    *p_virt_addr;
-       dma_addr_t              p_phys_addr;
-       void                    *p_prod_elem;
-       void                    *p_cons_elem;
+       /* fastpath portion of the chain - required for commands such
+        * as produce / consume.
+        */
+       /* Point to next element to produce/consume */
+       void *p_prod_elem;
+       void *p_cons_elem;
+
+       /* Fastpath portions of the PBL [if exists] */
+       struct {
+               /* Table for keeping the virtual addresses of the chain pages,
+                * respectively to the physical addresses in the pbl table.
+                */
+               void **pp_virt_addr_tbl;
 
-       enum qed_chain_mode     mode;
-       enum qed_chain_use_mode intended_use; /* used to produce/consume */
-       enum qed_chain_cnt_type cnt_type;
+               union {
+                       struct qed_chain_pbl_u16 u16;
+                       struct qed_chain_pbl_u32 u32;
+               } c;
+       } pbl;
 
        union {
                struct qed_chain_u16 chain16;
                struct qed_chain_u32 chain32;
        } u;
 
+       /* Capacity counts only usable elements */
+       u32 capacity;
        u32 page_cnt;
 
-       /* Number of elements - capacity is for usable elements only,
-        * while size will contain total number of elements [for entire chain].
+       enum qed_chain_mode mode;
+
+       /* Elements information for fast calculations */
+       u16 elem_per_page;
+       u16 elem_per_page_mask;
+       u16 elem_size;
+       u16 next_page_mask;
+       u16 usable_per_page;
+       u8 elem_unusable;
+
+       u8 cnt_type;
+
+       /* Slowpath of the chain - required for initialization and destruction,
+        * but isn't involved in regular functionality.
         */
-       u32 capacity;
+
+       /* Base address of a pre-allocated buffer for pbl */
+       struct {
+               dma_addr_t p_phys_table;
+               void *p_virt_table;
+       } pbl_sp;
+
+       /* Address of first page of the chain - the address is required
+        * for fastpath operation [consume/produce] but only for the the SINGLE
+        * flavour which isn't considered fastpath [== SPQ].
+        */
+       void *p_virt_addr;
+       dma_addr_t p_phys_addr;
+
+       /* Total number of elements [for entire chain] */
        u32 size;
 
-       /* Elements information for fast calculations */
-       u16                     elem_per_page;
-       u16                     elem_per_page_mask;
-       u16                     elem_unusable;
-       u16                     usable_per_page;
-       u16                     elem_size;
-       u16                     next_page_mask;
-       struct qed_chain_pbl    pbl;
+       u8 intended_use;
 };
 
 #define QED_CHAIN_PBL_ENTRY_SIZE        (8)
 #define QED_CHAIN_PAGE_SIZE             (0x1000)
 #define ELEMS_PER_PAGE(elem_size)       (QED_CHAIN_PAGE_SIZE / (elem_size))
 
-#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)     \
-       ((mode == QED_CHAIN_MODE_NEXT_PTR) ?         \
-        (1 + ((sizeof(struct qed_chain_next) - 1) / \
-              (elem_size))) : 0)
+#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)        \
+       (((mode) == QED_CHAIN_MODE_NEXT_PTR) ?           \
+        (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / \
+                  (elem_size))) : 0)
 
 #define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
        ((u32)(ELEMS_PER_PAGE(elem_size) -     \
@@ -199,7 +225,7 @@ static inline u16 qed_chain_get_usable_per_page(struct qed_chain *p_chain)
        return p_chain->usable_per_page;
 }
 
-static inline u16 qed_chain_get_unusable_per_page(struct qed_chain *p_chain)
+static inline u8 qed_chain_get_unusable_per_page(struct qed_chain *p_chain)
 {
        return p_chain->elem_unusable;
 }
@@ -211,7 +237,7 @@ static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain)
 
 static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain)
 {
-       return p_chain->pbl.p_phys_table;
+       return p_chain->pbl_sp.p_phys_table;
 }
 
 /**
@@ -227,10 +253,10 @@ static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain)
 static inline void
 qed_chain_advance_page(struct qed_chain *p_chain,
                       void **p_next_elem, void *idx_to_inc, void *page_to_inc)
-
 {
        struct qed_chain_next *p_next = NULL;
        u32 page_index = 0;
+
        switch (p_chain->mode) {
        case QED_CHAIN_MODE_NEXT_PTR:
                p_next = *p_next_elem;
@@ -318,7 +344,7 @@ static inline void *qed_chain_produce(struct qed_chain *p_chain)
                if ((p_chain->u.chain16.prod_idx &
                     p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
                        p_prod_idx = &p_chain->u.chain16.prod_idx;
-                       p_prod_page_idx = &p_chain->pbl.u.pbl16.prod_page_idx;
+                       p_prod_page_idx = &p_chain->pbl.c.u16.prod_page_idx;
                        qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
                                               p_prod_idx, p_prod_page_idx);
                }
@@ -327,7 +353,7 @@ static inline void *qed_chain_produce(struct qed_chain *p_chain)
                if ((p_chain->u.chain32.prod_idx &
                     p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
                        p_prod_idx = &p_chain->u.chain32.prod_idx;
-                       p_prod_page_idx = &p_chain->pbl.u.pbl32.prod_page_idx;
+                       p_prod_page_idx = &p_chain->pbl.c.u32.prod_page_idx;
                        qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
                                               p_prod_idx, p_prod_page_idx);
                }
@@ -391,7 +417,7 @@ static inline void *qed_chain_consume(struct qed_chain *p_chain)
                if ((p_chain->u.chain16.cons_idx &
                     p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
                        p_cons_idx = &p_chain->u.chain16.cons_idx;
-                       p_cons_page_idx = &p_chain->pbl.u.pbl16.cons_page_idx;
+                       p_cons_page_idx = &p_chain->pbl.c.u16.cons_page_idx;
                        qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
                                               p_cons_idx, p_cons_page_idx);
                }
@@ -400,8 +426,8 @@ static inline void *qed_chain_consume(struct qed_chain *p_chain)
                if ((p_chain->u.chain32.cons_idx &
                     p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
                        p_cons_idx = &p_chain->u.chain32.cons_idx;
-                       p_cons_page_idx = &p_chain->pbl.u.pbl32.cons_page_idx;
-               qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
+                       p_cons_page_idx = &p_chain->pbl.c.u32.cons_page_idx;
+                       qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
                                               p_cons_idx, p_cons_page_idx);
                }
                p_chain->u.chain32.cons_idx++;
@@ -442,25 +468,26 @@ static inline void qed_chain_reset(struct qed_chain *p_chain)
                u32 reset_val = p_chain->page_cnt - 1;
 
                if (is_chain_u16(p_chain)) {
-                       p_chain->pbl.u.pbl16.prod_page_idx = (u16)reset_val;
-                       p_chain->pbl.u.pbl16.cons_page_idx = (u16)reset_val;
+                       p_chain->pbl.c.u16.prod_page_idx = (u16)reset_val;
+                       p_chain->pbl.c.u16.cons_page_idx = (u16)reset_val;
                } else {
-                       p_chain->pbl.u.pbl32.prod_page_idx = reset_val;
-                       p_chain->pbl.u.pbl32.cons_page_idx = reset_val;
+                       p_chain->pbl.c.u32.prod_page_idx = reset_val;
+                       p_chain->pbl.c.u32.cons_page_idx = reset_val;
                }
        }
 
        switch (p_chain->intended_use) {
-       case QED_CHAIN_USE_TO_CONSUME_PRODUCE:
-       case QED_CHAIN_USE_TO_PRODUCE:
-               /* Do nothing */
-               break;
-
        case QED_CHAIN_USE_TO_CONSUME:
                /* produce empty elements */
                for (i = 0; i < p_chain->capacity; i++)
                        qed_chain_recycle_consumed(p_chain);
                break;
+
+       case QED_CHAIN_USE_TO_CONSUME_PRODUCE:
+       case QED_CHAIN_USE_TO_PRODUCE:
+       default:
+               /* Do nothing */
+               break;
        }
 }
 
@@ -486,13 +513,13 @@ static inline void qed_chain_init_params(struct qed_chain *p_chain,
        p_chain->p_virt_addr = NULL;
        p_chain->p_phys_addr = 0;
        p_chain->elem_size      = elem_size;
-       p_chain->intended_use = intended_use;
+       p_chain->intended_use = (u8)intended_use;
        p_chain->mode           = mode;
-       p_chain->cnt_type = cnt_type;
+       p_chain->cnt_type = (u8)cnt_type;
 
-       p_chain->elem_per_page          = ELEMS_PER_PAGE(elem_size);
+       p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
        p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
-       p_chain->elem_per_page_mask     = p_chain->elem_per_page - 1;
+       p_chain->elem_per_page_mask = p_chain->elem_per_page - 1;
        p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
        p_chain->next_page_mask = (p_chain->usable_per_page &
                                   p_chain->elem_per_page_mask);
@@ -501,8 +528,8 @@ static inline void qed_chain_init_params(struct qed_chain *p_chain,
        p_chain->capacity = p_chain->usable_per_page * page_cnt;
        p_chain->size = p_chain->elem_per_page * page_cnt;
 
-       p_chain->pbl.p_phys_table = 0;
-       p_chain->pbl.p_virt_table = NULL;
+       p_chain->pbl_sp.p_phys_table = 0;
+       p_chain->pbl_sp.p_virt_table = NULL;
        p_chain->pbl.pp_virt_addr_tbl = NULL;
 }
 
@@ -543,8 +570,8 @@ static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain,
                                          dma_addr_t p_phys_pbl,
                                          void **pp_virt_addr_tbl)
 {
-       p_chain->pbl.p_phys_table = p_phys_pbl;
-       p_chain->pbl.p_virt_table = p_virt_pbl;
+       p_chain->pbl_sp.p_phys_table = p_phys_pbl;
+       p_chain->pbl_sp.p_virt_table = p_virt_pbl;
        p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl;
 }
 
index 4475a9d8ae15ca0e56b564a66fef0e4eceb624d7..1eba803cb7f10a0c96a6e8c1cc40cfddfb386b71 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef _QED_ETH_IF_H
 #include <linux/qed/qed_if.h>
 #include <linux/qed/qed_iov_if.h>
 
+struct qed_queue_start_common_params {
+       /* Should always be relative to entity sending this. */
+       u8 vport_id;
+       u16 queue_id;
+
+       /* Relative, but relevant only for PFs */
+       u8 stats_id;
+
+       /* These are always absolute */
+       u16 sb;
+       u8 sb_idx;
+};
+
+struct qed_rxq_start_ret_params {
+       void __iomem *p_prod;
+       void *p_handle;
+};
+
+struct qed_txq_start_ret_params {
+       void __iomem *p_doorbell;
+       void *p_handle;
+};
+
 struct qed_dev_eth_info {
        struct qed_dev_info common;
 
@@ -22,11 +69,15 @@ struct qed_dev_eth_info {
        u8      num_tc;
 
        u8      port_mac[ETH_ALEN];
-       u8      num_vlan_filters;
+       u16     num_vlan_filters;
+       u16     num_mac_filters;
+
+       /* Legacy VF - this affects the datapath, so qede has to know */
+       bool is_legacy;
 };
 
 struct qed_update_vport_rss_params {
-       u16     rss_ind_table[128];
+       void    *rss_ind_table[128];
        u32     rss_key[10];
        u8      rss_caps;
 };
@@ -45,6 +96,7 @@ struct qed_update_vport_params {
 
 struct qed_start_vport_params {
        bool remove_inner_vlan;
+       bool handle_ptp_pkts;
        bool gro_enable;
        bool drop_ttl0;
        u8 vport_id;
@@ -52,18 +104,6 @@ struct qed_start_vport_params {
        bool clear_stats;
 };
 
-struct qed_stop_rxq_params {
-       u8 rss_id;
-       u8 rx_queue_id;
-       u8 vport_id;
-       bool eq_completion_only;
-};
-
-struct qed_stop_txq_params {
-       u8 rss_id;
-       u8 tx_queue_id;
-};
-
 enum qed_filter_rx_mode_type {
        QED_FILTER_RX_MODE_TYPE_REGULAR,
        QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC,
@@ -108,15 +148,6 @@ struct qed_filter_params {
        union qed_filter_type_params filter;
 };
 
-struct qed_queue_start_common_params {
-       u8 rss_id;
-       u8 queue_id;
-       u8 vport_id;
-       u16 sb;
-       u16 sb_idx;
-       u16 vf_qid;
-};
-
 struct qed_tunn_params {
        u16 vxlan_port;
        u8 update_vxlan_port;
@@ -126,7 +157,16 @@ struct qed_tunn_params {
 
 struct qed_eth_cb_ops {
        struct qed_common_cb_ops common;
-       void (*force_mac) (void *dev, u8 *mac);
+       void (*force_mac) (void *dev, u8 *mac, bool forced);
+};
+
+#define QED_MAX_PHC_DRIFT_PPB   291666666
+
+enum qed_ptp_filter_type {
+       QED_PTP_FILTER_L2,
+       QED_PTP_FILTER_IPV4,
+       QED_PTP_FILTER_IPV4_IPV6,
+       QED_PTP_FILTER_L2_IPV4_IPV6
 };
 
 #ifdef CONFIG_DCB
@@ -188,6 +228,17 @@ struct qed_eth_dcbnl_ops {
 };
 #endif
 
+struct qed_eth_ptp_ops {
+       int (*hwtstamp_tx_on)(struct qed_dev *);
+       int (*cfg_rx_filters)(struct qed_dev *, enum qed_ptp_filter_type);
+       int (*read_rx_ts)(struct qed_dev *, u64 *);
+       int (*read_tx_ts)(struct qed_dev *, u64 *);
+       int (*read_cc)(struct qed_dev *, u64 *);
+       int (*disable)(struct qed_dev *);
+       int (*adjfreq)(struct qed_dev *, s32);
+       int (*enable)(struct qed_dev *);
+};
+
 struct qed_eth_ops {
        const struct qed_common_ops *common;
 #ifdef CONFIG_QED_SRIOV
@@ -196,6 +247,7 @@ struct qed_eth_ops {
 #ifdef CONFIG_DCB
        const struct qed_eth_dcbnl_ops *dcb;
 #endif
+       const struct qed_eth_ptp_ops *ptp;
 
        int (*fill_dev_info)(struct qed_dev *cdev,
                             struct qed_dev_eth_info *info);
@@ -216,24 +268,24 @@ struct qed_eth_ops {
                            struct qed_update_vport_params *params);
 
        int (*q_rx_start)(struct qed_dev *cdev,
+                         u8 rss_num,
                          struct qed_queue_start_common_params *params,
                          u16 bd_max_bytes,
                          dma_addr_t bd_chain_phys_addr,
                          dma_addr_t cqe_pbl_addr,
                          u16 cqe_pbl_size,
-                         void __iomem **pp_prod);
+                         struct qed_rxq_start_ret_params *ret_params);
 
-       int (*q_rx_stop)(struct qed_dev *cdev,
-                        struct qed_stop_rxq_params *params);
+       int (*q_rx_stop)(struct qed_dev *cdev, u8 rss_id, void *handle);
 
        int (*q_tx_start)(struct qed_dev *cdev,
+                         u8 rss_num,
                          struct qed_queue_start_common_params *params,
                          dma_addr_t pbl_addr,
                          u16 pbl_size,
-                         void __iomem **pp_doorbell);
+                         struct qed_txq_start_ret_params *ret_params);
 
-       int (*q_tx_stop)(struct qed_dev *cdev,
-                        struct qed_stop_txq_params *params);
+       int (*q_tx_stop)(struct qed_dev *cdev, u8 rss_id, void *handle);
 
        int (*filter_config)(struct qed_dev *cdev,
                             struct qed_filter_params *params);
@@ -249,6 +301,14 @@ struct qed_eth_ops {
 
        int (*tunn_config)(struct qed_dev *cdev,
                           struct qed_tunn_params *params);
+
+       int (*ntuple_filter_config)(struct qed_dev *cdev, void *cookie,
+                                   dma_addr_t mapping, u16 length,
+                                   u16 vport_id, u16 rx_queue_id,
+                                   bool add_filter);
+
+       int (*configure_arfs_searcher)(struct qed_dev *cdev,
+                                      bool en_searcher);
 };
 
 const struct qed_eth_ops *qed_get_eth_ops(void);
diff --git a/include/linux/qed/qed_fcoe_if.h b/include/linux/qed/qed_fcoe_if.h
new file mode 100644 (file)
index 0000000..bd6bcb8
--- /dev/null
@@ -0,0 +1,145 @@
+#ifndef _QED_FCOE_IF_H
+#define _QED_FCOE_IF_H
+#include <linux/types.h>
+#include <linux/qed/qed_if.h>
+struct qed_fcoe_stats {
+       u64 fcoe_rx_byte_cnt;
+       u64 fcoe_rx_data_pkt_cnt;
+       u64 fcoe_rx_xfer_pkt_cnt;
+       u64 fcoe_rx_other_pkt_cnt;
+       u32 fcoe_silent_drop_pkt_cmdq_full_cnt;
+       u32 fcoe_silent_drop_pkt_rq_full_cnt;
+       u32 fcoe_silent_drop_pkt_crc_error_cnt;
+       u32 fcoe_silent_drop_pkt_task_invalid_cnt;
+       u32 fcoe_silent_drop_total_pkt_cnt;
+
+       u64 fcoe_tx_byte_cnt;
+       u64 fcoe_tx_data_pkt_cnt;
+       u64 fcoe_tx_xfer_pkt_cnt;
+       u64 fcoe_tx_other_pkt_cnt;
+};
+
+struct qed_dev_fcoe_info {
+       struct qed_dev_info common;
+
+       void __iomem *primary_dbq_rq_addr;
+       void __iomem *secondary_bdq_rq_addr;
+};
+
+struct qed_fcoe_params_offload {
+       dma_addr_t sq_pbl_addr;
+       dma_addr_t sq_curr_page_addr;
+       dma_addr_t sq_next_page_addr;
+
+       u8 src_mac[ETH_ALEN];
+       u8 dst_mac[ETH_ALEN];
+
+       u16 tx_max_fc_pay_len;
+       u16 e_d_tov_timer_val;
+       u16 rec_tov_timer_val;
+       u16 rx_max_fc_pay_len;
+       u16 vlan_tag;
+
+       struct fc_addr_nw s_id;
+       u8 max_conc_seqs_c3;
+       struct fc_addr_nw d_id;
+       u8 flags;
+       u8 def_q_idx;
+};
+
+#define MAX_TID_BLOCKS_FCOE (512)
+struct qed_fcoe_tid {
+       u32 size;               /* In bytes per task */
+       u32 num_tids_per_block;
+       u8 *blocks[MAX_TID_BLOCKS_FCOE];
+};
+
+struct qed_fcoe_cb_ops {
+       struct qed_common_cb_ops common;
+        u32 (*get_login_failures)(void *cookie);
+};
+
+void qed_fcoe_set_pf_params(struct qed_dev *cdev,
+                           struct qed_fcoe_pf_params *params);
+
+/**
+ * struct qed_fcoe_ops - qed FCoE operations.
+ * @common:            common operations pointer
+ * @fill_dev_info:     fills FCoE specific information
+ *                     @param cdev
+ *                     @param info
+ *                     @return 0 on sucesss, otherwise error value.
+ * @register_ops:      register FCoE operations
+ *                     @param cdev
+ *                     @param ops - specified using qed_iscsi_cb_ops
+ *                     @param cookie - driver private
+ * @ll2:               light L2 operations pointer
+ * @start:             fcoe in FW
+ *                     @param cdev
+ *                     @param tasks - qed will fill information about tasks
+ *                     return 0 on success, otherwise error value.
+ * @stop:              stops fcoe in FW
+ *                     @param cdev
+ *                     return 0 on success, otherwise error value.
+ * @acquire_conn:      acquire a new fcoe connection
+ *                     @param cdev
+ *                     @param handle - qed will fill handle that should be
+ *                             used henceforth as identifier of the
+ *                             connection.
+ *                     @param p_doorbell - qed will fill the address of the
+ *                             doorbell.
+ *                     return 0 on sucesss, otherwise error value.
+ * @release_conn:      release a previously acquired fcoe connection
+ *                     @param cdev
+ *                     @param handle - the connection handle.
+ *                     return 0 on success, otherwise error value.
+ * @offload_conn:      configures an offloaded connection
+ *                     @param cdev
+ *                     @param handle - the connection handle.
+ *                     @param conn_info - the configuration to use for the
+ *                             offload.
+ *                     return 0 on success, otherwise error value.
+ * @destroy_conn:      stops an offloaded connection
+ *                     @param cdev
+ *                     @param handle - the connection handle.
+ *                     @param terminate_params
+ *                     return 0 on success, otherwise error value.
+ * @get_stats:         gets FCoE related statistics
+ *                     @param cdev
+ *                     @param stats - pointer to struck that would be filled
+ *                             we stats
+ *                     return 0 on success, error otherwise.
+ */
+struct qed_fcoe_ops {
+       const struct qed_common_ops *common;
+
+       int (*fill_dev_info)(struct qed_dev *cdev,
+                            struct qed_dev_fcoe_info *info);
+
+       void (*register_ops)(struct qed_dev *cdev,
+                            struct qed_fcoe_cb_ops *ops, void *cookie);
+
+       const struct qed_ll2_ops *ll2;
+
+       int (*start)(struct qed_dev *cdev, struct qed_fcoe_tid *tasks);
+
+       int (*stop)(struct qed_dev *cdev);
+
+       int (*acquire_conn)(struct qed_dev *cdev,
+                           u32 *handle,
+                           u32 *fw_cid, void __iomem **p_doorbell);
+
+       int (*release_conn)(struct qed_dev *cdev, u32 handle);
+
+       int (*offload_conn)(struct qed_dev *cdev,
+                           u32 handle,
+                           struct qed_fcoe_params_offload *conn_info);
+       int (*destroy_conn)(struct qed_dev *cdev,
+                           u32 handle, dma_addr_t terminate_params);
+
+       int (*get_stats)(struct qed_dev *cdev, struct qed_fcoe_stats *stats);
+};
+
+const struct qed_fcoe_ops *qed_get_fcoe_ops(void);
+void qed_put_fcoe_ops(void);
+#endif
index d6c4177df7cb690537384d8626fe41690579146a..8a708d3efe76197051938dc8ec2518c068155f84 100644 (file)
@@ -1,10 +1,33 @@
 /* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * Copyright (c) 2015 QLogic Corporation
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef _QED_IF_H
@@ -34,7 +57,8 @@ enum dcbx_protocol_type {
        DCBX_MAX_PROTOCOL_TYPE
 };
 
-#ifdef CONFIG_DCB
+#define QED_ROCE_PROTOCOL_INDEX (3)
+
 #define QED_LLDP_CHASSIS_ID_STAT_LEN 4
 #define QED_LLDP_PORT_ID_STAT_LEN 4
 #define QED_DCBX_MAX_APP_PROTOCOL 32
@@ -130,7 +154,6 @@ struct qed_dcbx_get {
        struct qed_dcbx_remote_params remote;
        struct qed_dcbx_admin_params local;
 };
-#endif
 
 enum qed_led_mode {
        QED_LED_MODE_OFF,
@@ -144,6 +167,7 @@ enum qed_led_mode {
 #define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
 
 #define QED_COALESCE_MAX 0xFF
+#define QED_DEFAULT_RX_USECS 12
 
 /* forward */
 struct qed_dev;
@@ -154,6 +178,44 @@ struct qed_eth_pf_params {
         * to update_pf_params routine invoked before slowpath start
         */
        u16 num_cons;
+
+       /* To enable arfs, previous to HW-init a positive number needs to be
+        * set [as filters require allocated searcher ILT memory].
+        * This will set the maximal number of configured steering-filters.
+        */
+       u32 num_arfs_filters;
+};
+
+struct qed_fcoe_pf_params {
+       /* The following parameters are used during protocol-init */
+       u64 glbl_q_params_addr;
+       u64 bdq_pbl_base_addr[2];
+
+       /* The following parameters are used during HW-init
+        * and these parameters need to be passed as arguments
+        * to update_pf_params routine invoked before slowpath start
+        */
+       u16 num_cons;
+       u16 num_tasks;
+
+       /* The following parameters are used during protocol-init */
+       u16 sq_num_pbl_pages;
+
+       u16 cq_num_entries;
+       u16 cmdq_num_entries;
+       u16 rq_buffer_log_size;
+       u16 mtu;
+       u16 dummy_icid;
+       u16 bdq_xoff_threshold[2];
+       u16 bdq_xon_threshold[2];
+       u16 rq_buffer_size;
+       u8 num_cqs;             /* num of global CQs */
+       u8 log_page_size;
+       u8 gl_rq_pi;
+       u8 gl_cmd_pi;
+       u8 debug_mode;
+       u8 is_target;
+       u8 bdq_pbl_num_entries[2];
 };
 
 /* Most of the the parameters below are described in the FW iSCSI / TCP HSI */
@@ -163,6 +225,7 @@ struct qed_iscsi_pf_params {
        u32 max_cwnd;
        u16 cq_num_entries;
        u16 cmdq_num_entries;
+       u32 two_msl_timer;
        u16 dup_ack_threshold;
        u16 tx_sws_timer;
        u16 min_rto;
@@ -206,7 +269,6 @@ struct qed_rdma_pf_params {
         * the doorbell BAR).
         */
        u32 min_dpis;           /* number of requested DPIs */
-       u32 num_mrs;            /* number of requested memory regions */
        u32 num_qps;            /* number of requested Queue Pairs */
        u32 num_srqs;           /* number of requested SRQ */
        u8 roce_edpm_mode;      /* see QED_ROCE_EDPM_MODE_ENABLE */
@@ -218,6 +280,7 @@ struct qed_rdma_pf_params {
 
 struct qed_pf_params {
        struct qed_eth_pf_params eth_pf_params;
+       struct qed_fcoe_pf_params fcoe_pf_params;
        struct qed_iscsi_pf_params iscsi_pf_params;
        struct qed_rdma_pf_params rdma_pf_params;
 };
@@ -242,6 +305,11 @@ struct qed_sb_info {
        struct qed_dev          *cdev;
 };
 
+enum qed_dev_type {
+       QED_DEV_TYPE_BB,
+       QED_DEV_TYPE_AH,
+};
+
 struct qed_dev_info {
        unsigned long   pci_mem_start;
        unsigned long   pci_mem_end;
@@ -260,20 +328,27 @@ struct qed_dev_info {
        /* MFW version */
        u32             mfw_rev;
 
-       bool rdma_supported;
-
        u32             flash_size;
        u8              mf_mode;
        bool            tx_switching;
+       bool            rdma_supported;
+       u16             mtu;
+
+       bool wol_support;
+
+       enum qed_dev_type dev_type;
 };
 
 enum qed_sb_type {
        QED_SB_TYPE_L2_QUEUE,
+       QED_SB_TYPE_CNQ,
+       QED_SB_TYPE_STORAGE,
 };
 
 enum qed_protocol {
        QED_PROTOCOL_ETH,
        QED_PROTOCOL_ISCSI,
+       QED_PROTOCOL_FCOE,
 };
 
 struct qed_link_params {
@@ -303,9 +378,11 @@ struct qed_link_params {
 struct qed_link_output {
        bool    link_up;
 
-       u32     supported_caps;         /* In SUPPORTED defs */
-       u32     advertised_caps;        /* In ADVERTISED defs */
-       u32     lp_caps;                /* In ADVERTISED defs */
+       /* In QED_LM_* defs */
+       u32     supported_caps;
+       u32     advertised_caps;
+       u32     lp_caps;
+
        u32     speed;                  /* In Mb/s */
        u8      duplex;                 /* In DUPLEX defs */
        u8      port;                   /* In PORT defs */
@@ -341,8 +418,10 @@ struct qed_int_info {
 };
 
 struct qed_common_cb_ops {
+       void (*arfs_filter_op)(void *dev, void *fltr, u8 fw_rc);
        void    (*link_update)(void                     *dev,
                               struct qed_link_output   *link);
+       void    (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type);
 };
 
 struct qed_selftest_ops {
@@ -381,6 +460,15 @@ struct qed_selftest_ops {
  * @return 0 on success, error otherwise.
  */
        int (*selftest_clock)(struct qed_dev *cdev);
+
+/**
+ * @brief selftest_nvram - Perform nvram test
+ *
+ * @param cdev
+ *
+ * @return 0 on success, error otherwise.
+ */
+       int (*selftest_nvram) (struct qed_dev *cdev);
 };
 
 struct qed_common_ops {
@@ -437,6 +525,14 @@ struct qed_common_ops {
 
        void            (*simd_handler_clean)(struct qed_dev *cdev,
                                              int index);
+       int (*dbg_grc)(struct qed_dev *cdev,
+                      void *buffer, u32 *num_dumped_bytes);
+
+       int (*dbg_grc_size)(struct qed_dev *cdev);
+
+       int (*dbg_all_data) (struct qed_dev *cdev, void *buffer);
+
+       int (*dbg_all_data_size) (struct qed_dev *cdev);
 
 /**
  * @brief can_link_change - can the instance change the link or not
@@ -530,6 +626,41 @@ struct qed_common_ops {
  */
        int (*set_led)(struct qed_dev *cdev,
                       enum qed_led_mode mode);
+
+/**
+ * @brief update_drv_state - API to inform the change in the driver state.
+ *
+ * @param cdev
+ * @param active
+ *
+ */
+       int (*update_drv_state)(struct qed_dev *cdev, bool active);
+
+/**
+ * @brief update_mac - API to inform the change in the mac address
+ *
+ * @param cdev
+ * @param mac
+ *
+ */
+       int (*update_mac)(struct qed_dev *cdev, u8 *mac);
+
+/**
+ * @brief update_mtu - API to inform the change in the mtu
+ *
+ * @param cdev
+ * @param mtu
+ *
+ */
+       int (*update_mtu)(struct qed_dev *cdev, u16 mtu);
+
+/**
+ * @brief update_wol - update of changes in the WoL configuration
+ *
+ * @param cdev
+ * @param enabled - true iff WoL should be enabled.
+ */
+       int (*update_wol) (struct qed_dev *cdev, bool enabled);
 };
 
 #define MASK_FIELD(_name, _value) \
@@ -606,8 +737,9 @@ enum DP_MODULE {
        QED_MSG_SP      = 0x100000,
        QED_MSG_STORAGE = 0x200000,
        QED_MSG_CXT     = 0x800000,
+       QED_MSG_LL2     = 0x1000000,
        QED_MSG_ILT     = 0x2000000,
-       QED_MSG_ROCE    = 0x4000000,
+       QED_MSG_RDMA    = 0x4000000,
        QED_MSG_DEBUG   = 0x8000000,
        /* to be added...up to 0x8000000 */
 };
@@ -618,7 +750,7 @@ enum qed_mf_mode {
        QED_MF_NPAR,
 };
 
-struct qed_eth_stats {
+struct qed_eth_stats_common {
        u64     no_buff_discards;
        u64     packet_too_big_discard;
        u64     ttl0_discard;
@@ -650,11 +782,6 @@ struct qed_eth_stats {
        u64     rx_256_to_511_byte_packets;
        u64     rx_512_to_1023_byte_packets;
        u64     rx_1024_to_1518_byte_packets;
-       u64     rx_1519_to_1522_byte_packets;
-       u64     rx_1519_to_2047_byte_packets;
-       u64     rx_2048_to_4095_byte_packets;
-       u64     rx_4096_to_9216_byte_packets;
-       u64     rx_9217_to_16383_byte_packets;
        u64     rx_crc_errors;
        u64     rx_mac_crtl_frames;
        u64     rx_pause_frames;
@@ -671,14 +798,8 @@ struct qed_eth_stats {
        u64     tx_256_to_511_byte_packets;
        u64     tx_512_to_1023_byte_packets;
        u64     tx_1024_to_1518_byte_packets;
-       u64     tx_1519_to_2047_byte_packets;
-       u64     tx_2048_to_4095_byte_packets;
-       u64     tx_4096_to_9216_byte_packets;
-       u64     tx_9217_to_16383_byte_packets;
        u64     tx_pause_frames;
        u64     tx_pfc_frames;
-       u64     tx_lpi_entry_count;
-       u64     tx_total_collisions;
        u64     brb_truncates;
        u64     brb_discards;
        u64     rx_mac_bytes;
@@ -693,6 +814,34 @@ struct qed_eth_stats {
        u64     tx_mac_ctrl_frames;
 };
 
+struct qed_eth_stats_bb {
+       u64 rx_1519_to_1522_byte_packets;
+       u64 rx_1519_to_2047_byte_packets;
+       u64 rx_2048_to_4095_byte_packets;
+       u64 rx_4096_to_9216_byte_packets;
+       u64 rx_9217_to_16383_byte_packets;
+       u64 tx_1519_to_2047_byte_packets;
+       u64 tx_2048_to_4095_byte_packets;
+       u64 tx_4096_to_9216_byte_packets;
+       u64 tx_9217_to_16383_byte_packets;
+       u64 tx_lpi_entry_count;
+       u64 tx_total_collisions;
+};
+
+struct qed_eth_stats_ah {
+       u64 rx_1519_to_max_byte_packets;
+       u64 tx_1519_to_max_byte_packets;
+};
+
+struct qed_eth_stats {
+       struct qed_eth_stats_common common;
+
+       union {
+               struct qed_eth_stats_bb bb;
+               struct qed_eth_stats_ah ah;
+       };
+};
+
 #define QED_SB_IDX              0x0002
 
 #define RX_PI           0
index 5a4f8d0899e9d94f9cf87e527103b739e3bb7391..ac2e6a3199a36c88eca3e8126bab85500e37fd98 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef _QED_IOV_IF_H
@@ -29,6 +53,8 @@ struct qed_iov_hv_ops {
 
        int (*set_rate) (struct qed_dev *cdev, int vfid,
                         u32 min_rate, u32 max_rate);
+
+       int (*set_trust) (struct qed_dev *cdev, int vfid, bool trust);
 };
 
 #endif
diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h
new file mode 100644 (file)
index 0000000..3414649
--- /dev/null
@@ -0,0 +1,255 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _QED_ISCSI_IF_H
+#define _QED_ISCSI_IF_H
+#include <linux/types.h>
+#include <linux/qed/qed_if.h>
+
+typedef int (*iscsi_event_cb_t) (void *context,
+                                u8 fw_event_code, void *fw_handle);
+struct qed_iscsi_stats {
+       u64 iscsi_rx_bytes_cnt;
+       u64 iscsi_rx_packet_cnt;
+       u64 iscsi_rx_new_ooo_isle_events_cnt;
+       u32 iscsi_cmdq_threshold_cnt;
+       u32 iscsi_rq_threshold_cnt;
+       u32 iscsi_immq_threshold_cnt;
+
+       u64 iscsi_rx_dropped_pdus_task_not_valid;
+
+       u64 iscsi_rx_data_pdu_cnt;
+       u64 iscsi_rx_r2t_pdu_cnt;
+       u64 iscsi_rx_total_pdu_cnt;
+
+       u64 iscsi_tx_go_to_slow_start_event_cnt;
+       u64 iscsi_tx_fast_retransmit_event_cnt;
+
+       u64 iscsi_tx_data_pdu_cnt;
+       u64 iscsi_tx_r2t_pdu_cnt;
+       u64 iscsi_tx_total_pdu_cnt;
+
+       u64 iscsi_tx_bytes_cnt;
+       u64 iscsi_tx_packet_cnt;
+};
+
+struct qed_dev_iscsi_info {
+       struct qed_dev_info common;
+
+       void __iomem *primary_dbq_rq_addr;
+       void __iomem *secondary_bdq_rq_addr;
+
+       u8 num_cqs;
+};
+
+struct qed_iscsi_id_params {
+       u8 mac[ETH_ALEN];
+       u32 ip[4];
+       u16 port;
+};
+
+struct qed_iscsi_params_offload {
+       u8 layer_code;
+       dma_addr_t sq_pbl_addr;
+       u32 initial_ack;
+
+       struct qed_iscsi_id_params src;
+       struct qed_iscsi_id_params dst;
+       u16 vlan_id;
+       u8 tcp_flags;
+       u8 ip_version;
+       u8 default_cq;
+
+       u8 ka_max_probe_cnt;
+       u8 dup_ack_theshold;
+       u32 rcv_next;
+       u32 snd_una;
+       u32 snd_next;
+       u32 snd_max;
+       u32 snd_wnd;
+       u32 rcv_wnd;
+       u32 snd_wl1;
+       u32 cwnd;
+       u32 ss_thresh;
+       u16 srtt;
+       u16 rtt_var;
+       u32 ts_time;
+       u32 ts_recent;
+       u32 ts_recent_age;
+       u32 total_rt;
+       u32 ka_timeout_delta;
+       u32 rt_timeout_delta;
+       u8 dup_ack_cnt;
+       u8 snd_wnd_probe_cnt;
+       u8 ka_probe_cnt;
+       u8 rt_cnt;
+       u32 flow_label;
+       u32 ka_timeout;
+       u32 ka_interval;
+       u32 max_rt_time;
+       u32 initial_rcv_wnd;
+       u8 ttl;
+       u8 tos_or_tc;
+       u16 remote_port;
+       u16 local_port;
+       u16 mss;
+       u8 snd_wnd_scale;
+       u8 rcv_wnd_scale;
+       u32 ts_ticks_per_second;
+       u16 da_timeout_value;
+       u8 ack_frequency;
+};
+
+struct qed_iscsi_params_update {
+       u8 update_flag;
+#define QED_ISCSI_CONN_HD_EN            BIT(0)
+#define QED_ISCSI_CONN_DD_EN            BIT(1)
+#define QED_ISCSI_CONN_INITIAL_R2T      BIT(2)
+#define QED_ISCSI_CONN_IMMEDIATE_DATA   BIT(3)
+
+       u32 max_seq_size;
+       u32 max_recv_pdu_length;
+       u32 max_send_pdu_length;
+       u32 first_seq_length;
+       u32 exp_stat_sn;
+};
+
+#define MAX_TID_BLOCKS_ISCSI (512)
+struct qed_iscsi_tid {
+       u32 size;               /* In bytes per task */
+       u32 num_tids_per_block;
+       u8 *blocks[MAX_TID_BLOCKS_ISCSI];
+};
+
+struct qed_iscsi_cb_ops {
+       struct qed_common_cb_ops common;
+};
+
+/**
+ * struct qed_iscsi_ops - qed iSCSI operations.
+ * @common:            common operations pointer
+ * @ll2:               light L2 operations pointer
+ * @fill_dev_info:     fills iSCSI specific information
+ *                     @param cdev
+ *                     @param info
+ *                     @return 0 on sucesss, otherwise error value.
+ * @register_ops:      register iscsi operations
+ *                     @param cdev
+ *                     @param ops - specified using qed_iscsi_cb_ops
+ *                     @param cookie - driver private
+ * @start:             iscsi in FW
+ *                     @param cdev
+ *                     @param tasks - qed will fill information about tasks
+ *                     return 0 on success, otherwise error value.
+ * @stop:              iscsi in FW
+ *                     @param cdev
+ *                     return 0 on success, otherwise error value.
+ * @acquire_conn:      acquire a new iscsi connection
+ *                     @param cdev
+ *                     @param handle - qed will fill handle that should be
+ *                             used henceforth as identifier of the
+ *                             connection.
+ *                     @param p_doorbell - qed will fill the address of the
+ *                             doorbell.
+ *                     @return 0 on sucesss, otherwise error value.
+ * @release_conn:      release a previously acquired iscsi connection
+ *                     @param cdev
+ *                     @param handle - the connection handle.
+ *                     @return 0 on success, otherwise error value.
+ * @offload_conn:      configures an offloaded connection
+ *                     @param cdev
+ *                     @param handle - the connection handle.
+ *                     @param conn_info - the configuration to use for the
+ *                             offload.
+ *                     @return 0 on success, otherwise error value.
+ * @update_conn:       updates an offloaded connection
+ *                     @param cdev
+ *                     @param handle - the connection handle.
+ *                     @param conn_info - the configuration to use for the
+ *                             offload.
+ *                     @return 0 on success, otherwise error value.
+ * @destroy_conn:      stops an offloaded connection
+ *                     @param cdev
+ *                     @param handle - the connection handle.
+ *                     @return 0 on success, otherwise error value.
+ * @clear_sq:          clear all task in sq
+ *                     @param cdev
+ *                     @param handle - the connection handle.
+ *                     @return 0 on success, otherwise error value.
+ * @get_stats:         iSCSI related statistics
+ *                     @param cdev
+ *                     @param stats - pointer to struck that would be filled
+ *                             we stats
+ *                     @return 0 on success, error otherwise.
+ */
+struct qed_iscsi_ops {
+       const struct qed_common_ops *common;
+
+       const struct qed_ll2_ops *ll2;
+
+       int (*fill_dev_info)(struct qed_dev *cdev,
+                            struct qed_dev_iscsi_info *info);
+
+       void (*register_ops)(struct qed_dev *cdev,
+                            struct qed_iscsi_cb_ops *ops, void *cookie);
+
+       int (*start)(struct qed_dev *cdev,
+                    struct qed_iscsi_tid *tasks,
+                    void *event_context, iscsi_event_cb_t async_event_cb);
+
+       int (*stop)(struct qed_dev *cdev);
+
+       int (*acquire_conn)(struct qed_dev *cdev,
+                           u32 *handle,
+                           u32 *fw_cid, void __iomem **p_doorbell);
+
+       int (*release_conn)(struct qed_dev *cdev, u32 handle);
+
+       int (*offload_conn)(struct qed_dev *cdev,
+                           u32 handle,
+                           struct qed_iscsi_params_offload *conn_info);
+
+       int (*update_conn)(struct qed_dev *cdev,
+                          u32 handle,
+                          struct qed_iscsi_params_update *conn_info);
+
+       int (*destroy_conn)(struct qed_dev *cdev, u32 handle, u8 abrt_conn);
+
+       int (*clear_sq)(struct qed_dev *cdev, u32 handle);
+
+       int (*get_stats)(struct qed_dev *cdev,
+                        struct qed_iscsi_stats *stats);
+};
+
+const struct qed_iscsi_ops *qed_get_iscsi_ops(void);
+void qed_put_iscsi_ops(void);
+#endif
diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h
new file mode 100644 (file)
index 0000000..4fb4666
--- /dev/null
@@ -0,0 +1,162 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _QED_LL2_IF_H
+#define _QED_LL2_IF_H
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/qed/qed_if.h>
+
+struct qed_ll2_stats {
+       u64 gsi_invalid_hdr;
+       u64 gsi_invalid_pkt_length;
+       u64 gsi_unsupported_pkt_typ;
+       u64 gsi_crcchksm_error;
+
+       u64 packet_too_big_discard;
+       u64 no_buff_discard;
+
+       u64 rcv_ucast_bytes;
+       u64 rcv_mcast_bytes;
+       u64 rcv_bcast_bytes;
+       u64 rcv_ucast_pkts;
+       u64 rcv_mcast_pkts;
+       u64 rcv_bcast_pkts;
+
+       u64 sent_ucast_bytes;
+       u64 sent_mcast_bytes;
+       u64 sent_bcast_bytes;
+       u64 sent_ucast_pkts;
+       u64 sent_mcast_pkts;
+       u64 sent_bcast_pkts;
+};
+
+#define QED_LL2_UNUSED_HANDLE   (0xff)
+
+struct qed_ll2_cb_ops {
+       int (*rx_cb)(void *, struct sk_buff *, u32, u32);
+       int (*tx_cb)(void *, struct sk_buff *, bool);
+};
+
+struct qed_ll2_params {
+       u16 mtu;
+       bool drop_ttl0_packets;
+       bool rx_vlan_stripping;
+       u8 tx_tc;
+       bool frags_mapped;
+       u8 ll2_mac_address[ETH_ALEN];
+};
+
+struct qed_ll2_ops {
+/**
+ * @brief start - initializes ll2
+ *
+ * @param cdev
+ * @param params - protocol driver configuration for the ll2.
+ *
+ * @return 0 on success, otherwise error value.
+ */
+       int (*start)(struct qed_dev *cdev, struct qed_ll2_params *params);
+
+/**
+ * @brief stop - stops the ll2
+ *
+ * @param cdev
+ *
+ * @return 0 on success, otherwise error value.
+ */
+       int (*stop)(struct qed_dev *cdev);
+
+/**
+ * @brief start_xmit - transmits an skb over the ll2 interface
+ *
+ * @param cdev
+ * @param skb
+ *
+ * @return 0 on success, otherwise error value.
+ */
+       int (*start_xmit)(struct qed_dev *cdev, struct sk_buff *skb);
+
+/**
+ * @brief register_cb_ops - protocol driver register the callback for Rx/Tx
+ * packets. Should be called before `start'.
+ *
+ * @param cdev
+ * @param cookie - to be passed to the callback functions.
+ * @param ops - the callback functions to register for Rx / Tx.
+ *
+ * @return 0 on success, otherwise error value.
+ */
+       void (*register_cb_ops)(struct qed_dev *cdev,
+                               const struct qed_ll2_cb_ops *ops,
+                               void *cookie);
+
+/**
+ * @brief get LL2 related statistics
+ *
+ * @param cdev
+ * @param stats - pointer to struct that would be filled with stats
+ *
+ * @return 0 on success, error otherwise.
+ */
+       int (*get_stats)(struct qed_dev *cdev, struct qed_ll2_stats *stats);
+};
+
+#ifdef CONFIG_QED_LL2
+int qed_ll2_alloc_if(struct qed_dev *);
+void qed_ll2_dealloc_if(struct qed_dev *);
+#else
+static const struct qed_ll2_ops qed_ll2_ops_pass = {
+       .start = NULL,
+       .stop = NULL,
+       .start_xmit = NULL,
+       .register_cb_ops = NULL,
+       .get_stats = NULL,
+};
+
+static inline int qed_ll2_alloc_if(struct qed_dev *cdev)
+{
+       return 0;
+}
+
+static inline void qed_ll2_dealloc_if(struct qed_dev *cdev)
+{
+}
+#endif
+#endif
diff --git a/include/linux/qed/qed_roce_if.h b/include/linux/qed/qed_roce_if.h
new file mode 100644 (file)
index 0000000..f742d43
--- /dev/null
@@ -0,0 +1,604 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _QED_ROCE_IF_H
+#define _QED_ROCE_IF_H
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_ll2_if.h>
+#include <linux/qed/rdma_common.h>
+
+enum qed_roce_ll2_tx_dest {
+       /* Light L2 TX Destination to the Network */
+       QED_ROCE_LL2_TX_DEST_NW,
+
+       /* Light L2 TX Destination to the Loopback */
+       QED_ROCE_LL2_TX_DEST_LB,
+       QED_ROCE_LL2_TX_DEST_MAX
+};
+
+#define QED_RDMA_MAX_CNQ_SIZE               (0xFFFF)
+
+/* rdma interface */
+
+enum qed_roce_qp_state {
+       QED_ROCE_QP_STATE_RESET,
+       QED_ROCE_QP_STATE_INIT,
+       QED_ROCE_QP_STATE_RTR,
+       QED_ROCE_QP_STATE_RTS,
+       QED_ROCE_QP_STATE_SQD,
+       QED_ROCE_QP_STATE_ERR,
+       QED_ROCE_QP_STATE_SQE
+};
+
+enum qed_rdma_tid_type {
+       QED_RDMA_TID_REGISTERED_MR,
+       QED_RDMA_TID_FMR,
+       QED_RDMA_TID_MW_TYPE1,
+       QED_RDMA_TID_MW_TYPE2A
+};
+
+struct qed_rdma_events {
+       void *context;
+       void (*affiliated_event)(void *context, u8 fw_event_code,
+                                void *fw_handle);
+       void (*unaffiliated_event)(void *context, u8 event_code);
+};
+
+struct qed_rdma_device {
+       u32 vendor_id;
+       u32 vendor_part_id;
+       u32 hw_ver;
+       u64 fw_ver;
+
+       u64 node_guid;
+       u64 sys_image_guid;
+
+       u8 max_cnq;
+       u8 max_sge;
+       u8 max_srq_sge;
+       u16 max_inline;
+       u32 max_wqe;
+       u32 max_srq_wqe;
+       u8 max_qp_resp_rd_atomic_resc;
+       u8 max_qp_req_rd_atomic_resc;
+       u64 max_dev_resp_rd_atomic_resc;
+       u32 max_cq;
+       u32 max_qp;
+       u32 max_srq;
+       u32 max_mr;
+       u64 max_mr_size;
+       u32 max_cqe;
+       u32 max_mw;
+       u32 max_fmr;
+       u32 max_mr_mw_fmr_pbl;
+       u64 max_mr_mw_fmr_size;
+       u32 max_pd;
+       u32 max_ah;
+       u8 max_pkey;
+       u16 max_srq_wr;
+       u8 max_stats_queues;
+       u32 dev_caps;
+
+       /* Abilty to support RNR-NAK generation */
+
+#define QED_RDMA_DEV_CAP_RNR_NAK_MASK                           0x1
+#define QED_RDMA_DEV_CAP_RNR_NAK_SHIFT                  0
+       /* Abilty to support shutdown port */
+#define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_MASK                     0x1
+#define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_SHIFT                    1
+       /* Abilty to support port active event */
+#define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_MASK         0x1
+#define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_SHIFT                2
+       /* Abilty to support port change event */
+#define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_MASK         0x1
+#define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_SHIFT                3
+       /* Abilty to support system image GUID */
+#define QED_RDMA_DEV_CAP_SYS_IMAGE_MASK                 0x1
+#define QED_RDMA_DEV_CAP_SYS_IMAGE_SHIFT                        4
+       /* Abilty to support bad P_Key counter support */
+#define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_MASK                      0x1
+#define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_SHIFT                     5
+       /* Abilty to support atomic operations */
+#define QED_RDMA_DEV_CAP_ATOMIC_OP_MASK                 0x1
+#define QED_RDMA_DEV_CAP_ATOMIC_OP_SHIFT                        6
+#define QED_RDMA_DEV_CAP_RESIZE_CQ_MASK                 0x1
+#define QED_RDMA_DEV_CAP_RESIZE_CQ_SHIFT                        7
+       /* Abilty to support modifying the maximum number of
+        * outstanding work requests per QP
+        */
+#define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_MASK                     0x1
+#define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_SHIFT                    8
+       /* Abilty to support automatic path migration */
+#define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_MASK                     0x1
+#define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_SHIFT                    9
+       /* Abilty to support the base memory management extensions */
+#define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_MASK                   0x1
+#define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_SHIFT          10
+#define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_MASK                    0x1
+#define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_SHIFT                   11
+       /* Abilty to support multipile page sizes per memory region */
+#define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_MASK             0x1
+#define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_SHIFT            12
+       /* Abilty to support block list physical buffer list */
+#define QED_RDMA_DEV_CAP_BLOCK_MODE_MASK                        0x1
+#define QED_RDMA_DEV_CAP_BLOCK_MODE_SHIFT                       13
+       /* Abilty to support zero based virtual addresses */
+#define QED_RDMA_DEV_CAP_ZBVA_MASK                              0x1
+#define QED_RDMA_DEV_CAP_ZBVA_SHIFT                             14
+       /* Abilty to support local invalidate fencing */
+#define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_MASK                   0x1
+#define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_SHIFT          15
+       /* Abilty to support Loopback on QP */
+#define QED_RDMA_DEV_CAP_LB_INDICATOR_MASK                      0x1
+#define QED_RDMA_DEV_CAP_LB_INDICATOR_SHIFT                     16
+       u64 page_size_caps;
+       u8 dev_ack_delay;
+       u32 reserved_lkey;
+       u32 bad_pkey_counter;
+       struct qed_rdma_events events;
+};
+
+enum qed_port_state {
+       QED_RDMA_PORT_UP,
+       QED_RDMA_PORT_DOWN,
+};
+
+enum qed_roce_capability {
+       QED_ROCE_V1 = 1 << 0,
+       QED_ROCE_V2 = 1 << 1,
+};
+
+struct qed_rdma_port {
+       enum qed_port_state port_state;
+       int link_speed;
+       u64 max_msg_size;
+       u8 source_gid_table_len;
+       void *source_gid_table_ptr;
+       u8 pkey_table_len;
+       void *pkey_table_ptr;
+       u32 pkey_bad_counter;
+       enum qed_roce_capability capability;
+};
+
+struct qed_rdma_cnq_params {
+       u8 num_pbl_pages;
+       u64 pbl_ptr;
+};
+
+/* The CQ Mode affects the CQ doorbell transaction size.
+ * 64/32 bit machines should configure to 32/16 bits respectively.
+ */
+enum qed_rdma_cq_mode {
+       QED_RDMA_CQ_MODE_16_BITS,
+       QED_RDMA_CQ_MODE_32_BITS,
+};
+
+struct qed_roce_dcqcn_params {
+       u8 notification_point;
+       u8 reaction_point;
+
+       /* fields for notification point */
+       u32 cnp_send_timeout;
+
+       /* fields for reaction point */
+       u32 rl_bc_rate;
+       u16 rl_max_rate;
+       u16 rl_r_ai;
+       u16 rl_r_hai;
+       u16 dcqcn_g;
+       u32 dcqcn_k_us;
+       u32 dcqcn_timeout_us;
+};
+
+struct qed_rdma_start_in_params {
+       struct qed_rdma_events *events;
+       struct qed_rdma_cnq_params cnq_pbl_list[128];
+       u8 desired_cnq;
+       enum qed_rdma_cq_mode cq_mode;
+       struct qed_roce_dcqcn_params dcqcn_params;
+       u16 max_mtu;
+       u8 mac_addr[ETH_ALEN];
+       u8 iwarp_flags;
+};
+
+struct qed_rdma_add_user_out_params {
+       u16 dpi;
+       u64 dpi_addr;
+       u64 dpi_phys_addr;
+       u32 dpi_size;
+};
+
+enum roce_mode {
+       ROCE_V1,
+       ROCE_V2_IPV4,
+       ROCE_V2_IPV6,
+       MAX_ROCE_MODE
+};
+
+union qed_gid {
+       u8 bytes[16];
+       u16 words[8];
+       u32 dwords[4];
+       u64 qwords[2];
+       u32 ipv4_addr;
+};
+
+struct qed_rdma_register_tid_in_params {
+       u32 itid;
+       enum qed_rdma_tid_type tid_type;
+       u8 key;
+       u16 pd;
+       bool local_read;
+       bool local_write;
+       bool remote_read;
+       bool remote_write;
+       bool remote_atomic;
+       bool mw_bind;
+       u64 pbl_ptr;
+       bool pbl_two_level;
+       u8 pbl_page_size_log;
+       u8 page_size_log;
+       u32 fbo;
+       u64 length;
+       u64 vaddr;
+       bool zbva;
+       bool phy_mr;
+       bool dma_mr;
+
+       bool dif_enabled;
+       u64 dif_error_addr;
+       u64 dif_runt_addr;
+};
+
+struct qed_rdma_create_cq_in_params {
+       u32 cq_handle_lo;
+       u32 cq_handle_hi;
+       u32 cq_size;
+       u16 dpi;
+       bool pbl_two_level;
+       u64 pbl_ptr;
+       u16 pbl_num_pages;
+       u8 pbl_page_size_log;
+       u8 cnq_id;
+       u16 int_timeout;
+};
+
+struct qed_rdma_create_srq_in_params {
+       u64 pbl_base_addr;
+       u64 prod_pair_addr;
+       u16 num_pages;
+       u16 pd_id;
+       u16 page_size;
+};
+
+struct qed_rdma_destroy_cq_in_params {
+       u16 icid;
+};
+
+struct qed_rdma_destroy_cq_out_params {
+       u16 num_cq_notif;
+};
+
+struct qed_rdma_create_qp_in_params {
+       u32 qp_handle_lo;
+       u32 qp_handle_hi;
+       u32 qp_handle_async_lo;
+       u32 qp_handle_async_hi;
+       bool use_srq;
+       bool signal_all;
+       bool fmr_and_reserved_lkey;
+       u16 pd;
+       u16 dpi;
+       u16 sq_cq_id;
+       u16 sq_num_pages;
+       u64 sq_pbl_ptr;
+       u8 max_sq_sges;
+       u16 rq_cq_id;
+       u16 rq_num_pages;
+       u64 rq_pbl_ptr;
+       u16 srq_id;
+       u8 stats_queue;
+};
+
+struct qed_rdma_create_qp_out_params {
+       u32 qp_id;
+       u16 icid;
+       void *rq_pbl_virt;
+       dma_addr_t rq_pbl_phys;
+       void *sq_pbl_virt;
+       dma_addr_t sq_pbl_phys;
+};
+
+struct qed_rdma_modify_qp_in_params {
+       u32 modify_flags;
+#define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_MASK               0x1
+#define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_SHIFT              0
+#define QED_ROCE_MODIFY_QP_VALID_PKEY_MASK                    0x1
+#define QED_ROCE_MODIFY_QP_VALID_PKEY_SHIFT                   1
+#define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_MASK             0x1
+#define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_SHIFT            2
+#define QED_ROCE_MODIFY_QP_VALID_DEST_QP_MASK                 0x1
+#define QED_ROCE_MODIFY_QP_VALID_DEST_QP_SHIFT                3
+#define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_MASK          0x1
+#define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_SHIFT         4
+#define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_MASK                  0x1
+#define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_SHIFT                 5
+#define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_MASK                  0x1
+#define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_SHIFT                 6
+#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_MASK       0x1
+#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_SHIFT      7
+#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_MASK      0x1
+#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_SHIFT     8
+#define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_MASK             0x1
+#define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_SHIFT            9
+#define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_MASK               0x1
+#define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_SHIFT              10
+#define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_MASK           0x1
+#define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_SHIFT          11
+#define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_MASK       0x1
+#define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_SHIFT      12
+#define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_MASK     0x1
+#define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_SHIFT    13
+#define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_MASK               0x1
+#define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_SHIFT              14
+
+       enum qed_roce_qp_state new_state;
+       u16 pkey;
+       bool incoming_rdma_read_en;
+       bool incoming_rdma_write_en;
+       bool incoming_atomic_en;
+       bool e2e_flow_control_en;
+       u32 dest_qp;
+       bool lb_indication;
+       u16 mtu;
+       u8 traffic_class_tos;
+       u8 hop_limit_ttl;
+       u32 flow_label;
+       union qed_gid sgid;
+       union qed_gid dgid;
+       u16 udp_src_port;
+
+       u16 vlan_id;
+
+       u32 rq_psn;
+       u32 sq_psn;
+       u8 max_rd_atomic_resp;
+       u8 max_rd_atomic_req;
+       u32 ack_timeout;
+       u8 retry_cnt;
+       u8 rnr_retry_cnt;
+       u8 min_rnr_nak_timer;
+       bool sqd_async;
+       u8 remote_mac_addr[6];
+       u8 local_mac_addr[6];
+       bool use_local_mac;
+       enum roce_mode roce_mode;
+};
+
+struct qed_rdma_query_qp_out_params {
+       enum qed_roce_qp_state state;
+       u32 rq_psn;
+       u32 sq_psn;
+       bool draining;
+       u16 mtu;
+       u32 dest_qp;
+       bool incoming_rdma_read_en;
+       bool incoming_rdma_write_en;
+       bool incoming_atomic_en;
+       bool e2e_flow_control_en;
+       union qed_gid sgid;
+       union qed_gid dgid;
+       u32 flow_label;
+       u8 hop_limit_ttl;
+       u8 traffic_class_tos;
+       u32 timeout;
+       u8 rnr_retry;
+       u8 retry_cnt;
+       u8 min_rnr_nak_timer;
+       u16 pkey_index;
+       u8 max_rd_atomic;
+       u8 max_dest_rd_atomic;
+       bool sqd_async;
+};
+
+struct qed_rdma_create_srq_out_params {
+       u16 srq_id;
+};
+
+struct qed_rdma_destroy_srq_in_params {
+       u16 srq_id;
+};
+
+struct qed_rdma_modify_srq_in_params {
+       u32 wqe_limit;
+       u16 srq_id;
+};
+
+struct qed_rdma_stats_out_params {
+       u64 sent_bytes;
+       u64 sent_pkts;
+       u64 rcv_bytes;
+       u64 rcv_pkts;
+};
+
+struct qed_rdma_counters_out_params {
+       u64 pd_count;
+       u64 max_pd;
+       u64 dpi_count;
+       u64 max_dpi;
+       u64 cq_count;
+       u64 max_cq;
+       u64 qp_count;
+       u64 max_qp;
+       u64 tid_count;
+       u64 max_tid;
+};
+
+#define QED_ROCE_TX_HEAD_FAILURE        (1)
+#define QED_ROCE_TX_FRAG_FAILURE        (2)
+
+struct qed_roce_ll2_header {
+       void *vaddr;
+       dma_addr_t baddr;
+       size_t len;
+};
+
+struct qed_roce_ll2_buffer {
+       dma_addr_t baddr;
+       size_t len;
+};
+
+struct qed_roce_ll2_packet {
+       struct qed_roce_ll2_header header;
+       int n_seg;
+       struct qed_roce_ll2_buffer payload[RDMA_MAX_SGE_PER_SQ_WQE];
+       int roce_mode;
+       enum qed_roce_ll2_tx_dest tx_dest;
+};
+
+struct qed_roce_ll2_tx_params {
+       int reserved;
+};
+
+struct qed_roce_ll2_rx_params {
+       u16 vlan_id;
+       u8 smac[ETH_ALEN];
+       int rc;
+};
+
+struct qed_roce_ll2_cbs {
+       void (*tx_cb)(void *pdev, struct qed_roce_ll2_packet *pkt);
+
+       void (*rx_cb)(void *pdev, struct qed_roce_ll2_packet *pkt,
+                     struct qed_roce_ll2_rx_params *params);
+};
+
+struct qed_roce_ll2_params {
+       u16 max_rx_buffers;
+       u16 max_tx_buffers;
+       u16 mtu;
+       u8 mac_address[ETH_ALEN];
+       struct qed_roce_ll2_cbs cbs;
+       void *cb_cookie;
+};
+
+struct qed_roce_ll2_info {
+       u8 handle;
+       struct qed_roce_ll2_cbs cbs;
+       u8 mac_address[ETH_ALEN];
+       void *cb_cookie;
+
+       /* Lock to protect ll2 */
+       struct mutex lock;
+};
+
+enum qed_rdma_type {
+       QED_RDMA_TYPE_ROCE,
+};
+
+struct qed_dev_rdma_info {
+       struct qed_dev_info common;
+       enum qed_rdma_type rdma_type;
+};
+
+struct qed_rdma_ops {
+       const struct qed_common_ops *common;
+
+       int (*fill_dev_info)(struct qed_dev *cdev,
+                            struct qed_dev_rdma_info *info);
+       void *(*rdma_get_rdma_ctx)(struct qed_dev *cdev);
+
+       int (*rdma_init)(struct qed_dev *dev,
+                        struct qed_rdma_start_in_params *iparams);
+
+       int (*rdma_add_user)(void *rdma_cxt,
+                            struct qed_rdma_add_user_out_params *oparams);
+
+       void (*rdma_remove_user)(void *rdma_cxt, u16 dpi);
+       int (*rdma_stop)(void *rdma_cxt);
+       struct qed_rdma_device* (*rdma_query_device)(void *rdma_cxt);
+       struct qed_rdma_port* (*rdma_query_port)(void *rdma_cxt);
+       int (*rdma_get_start_sb)(struct qed_dev *cdev);
+       int (*rdma_get_min_cnq_msix)(struct qed_dev *cdev);
+       void (*rdma_cnq_prod_update)(void *rdma_cxt, u8 cnq_index, u16 prod);
+       int (*rdma_get_rdma_int)(struct qed_dev *cdev,
+                                struct qed_int_info *info);
+       int (*rdma_set_rdma_int)(struct qed_dev *cdev, u16 cnt);
+       int (*rdma_alloc_pd)(void *rdma_cxt, u16 *pd);
+       void (*rdma_dealloc_pd)(void *rdma_cxt, u16 pd);
+       int (*rdma_create_cq)(void *rdma_cxt,
+                             struct qed_rdma_create_cq_in_params *params,
+                             u16 *icid);
+       int (*rdma_destroy_cq)(void *rdma_cxt,
+                              struct qed_rdma_destroy_cq_in_params *iparams,
+                              struct qed_rdma_destroy_cq_out_params *oparams);
+       struct qed_rdma_qp *
+       (*rdma_create_qp)(void *rdma_cxt,
+                         struct qed_rdma_create_qp_in_params *iparams,
+                         struct qed_rdma_create_qp_out_params *oparams);
+
+       int (*rdma_modify_qp)(void *roce_cxt, struct qed_rdma_qp *qp,
+                             struct qed_rdma_modify_qp_in_params *iparams);
+
+       int (*rdma_query_qp)(void *rdma_cxt, struct qed_rdma_qp *qp,
+                            struct qed_rdma_query_qp_out_params *oparams);
+       int (*rdma_destroy_qp)(void *rdma_cxt, struct qed_rdma_qp *qp);
+       int
+       (*rdma_register_tid)(void *rdma_cxt,
+                            struct qed_rdma_register_tid_in_params *iparams);
+       int (*rdma_deregister_tid)(void *rdma_cxt, u32 itid);
+       int (*rdma_alloc_tid)(void *rdma_cxt, u32 *itid);
+       void (*rdma_free_tid)(void *rdma_cxt, u32 itid);
+       int (*roce_ll2_start)(struct qed_dev *cdev,
+                             struct qed_roce_ll2_params *params);
+       int (*roce_ll2_stop)(struct qed_dev *cdev);
+       int (*roce_ll2_tx)(struct qed_dev *cdev,
+                          struct qed_roce_ll2_packet *packet,
+                          struct qed_roce_ll2_tx_params *params);
+       int (*roce_ll2_post_rx_buffer)(struct qed_dev *cdev,
+                                      struct qed_roce_ll2_buffer *buf,
+                                      u64 cookie, u8 notify_fw);
+       int (*roce_ll2_set_mac_filter)(struct qed_dev *cdev,
+                                      u8 *old_mac_address,
+                                      u8 *new_mac_address);
+       int (*roce_ll2_stats)(struct qed_dev *cdev,
+                             struct qed_ll2_stats *stats);
+};
+
+const struct qed_rdma_ops *qed_get_rdma_ops(void);
+
+#endif
diff --git a/include/linux/qed/qede_roce.h b/include/linux/qed/qede_roce.h
new file mode 100644 (file)
index 0000000..3b8dd55
--- /dev/null
@@ -0,0 +1,88 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef QEDE_ROCE_H
+#define QEDE_ROCE_H
+
+struct qedr_dev;
+struct qed_dev;
+struct qede_dev;
+
+enum qede_roce_event {
+       QEDE_UP,
+       QEDE_DOWN,
+       QEDE_CHANGE_ADDR,
+       QEDE_CLOSE
+};
+
+struct qede_roce_event_work {
+       struct list_head list;
+       struct work_struct work;
+       void *ptr;
+       enum qede_roce_event event;
+};
+
+struct qedr_driver {
+       unsigned char name[32];
+
+       struct qedr_dev* (*add)(struct qed_dev *, struct pci_dev *,
+                               struct net_device *);
+
+       void (*remove)(struct qedr_dev *);
+       void (*notify)(struct qedr_dev *, enum qede_roce_event);
+};
+
+/* APIs for RoCE driver to register callback handlers,
+ * which will be invoked when device is added, removed, ifup, ifdown
+ */
+int qede_roce_register_driver(struct qedr_driver *drv);
+void qede_roce_unregister_driver(struct qedr_driver *drv);
+
+bool qede_roce_supported(struct qede_dev *dev);
+
+#if IS_ENABLED(CONFIG_QED_RDMA)
+int qede_roce_dev_add(struct qede_dev *dev);
+void qede_roce_dev_event_open(struct qede_dev *dev);
+void qede_roce_dev_event_close(struct qede_dev *dev);
+void qede_roce_dev_remove(struct qede_dev *dev);
+void qede_roce_event_changeaddr(struct qede_dev *qedr);
+#else
+static inline int qede_roce_dev_add(struct qede_dev *dev)
+{
+       return 0;
+}
+
+static inline void qede_roce_dev_event_open(struct qede_dev *dev) {}
+static inline void qede_roce_dev_event_close(struct qede_dev *dev) {}
+static inline void qede_roce_dev_remove(struct qede_dev *dev) {}
+static inline void qede_roce_event_changeaddr(struct qede_dev *qedr) {}
+#endif
+#endif
index 187991c1f43977ffea54428e4d4922f882039dc3..72c770f9f6669a5169f1780f8cae524bc6c3e0b4 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef __RDMA_COMMON__
@@ -28,6 +52,8 @@
 #define RDMA_MAX_PDS                            (64 * 1024)
 
 #define RDMA_NUM_STATISTIC_COUNTERS                     MAX_NUM_VPORTS
+#define RDMA_NUM_STATISTIC_COUNTERS_K2                  MAX_NUM_VPORTS_K2
+#define RDMA_NUM_STATISTIC_COUNTERS_BB                  MAX_NUM_VPORTS_BB
 
 #define RDMA_TASK_TYPE (PROTOCOLID_ROCE)
 
index 2eeaf3dc66464838cef8ee376be4b557e5bc1e6e..866f063026dedc6540d87d595bcacf9071f14681 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef __ROCE_COMMON__
 
 #define ROCE_MAX_QPS   (32 * 1024)
 
+enum roce_async_events_type {
+       ROCE_ASYNC_EVENT_NONE = 0,
+       ROCE_ASYNC_EVENT_COMM_EST = 1,
+       ROCE_ASYNC_EVENT_SQ_DRAINED,
+       ROCE_ASYNC_EVENT_SRQ_LIMIT,
+       ROCE_ASYNC_EVENT_LAST_WQE_REACHED,
+       ROCE_ASYNC_EVENT_CQ_ERR,
+       ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR,
+       ROCE_ASYNC_EVENT_LOCAL_CATASTROPHIC_ERR,
+       ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR,
+       ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR,
+       ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR,
+       ROCE_ASYNC_EVENT_SRQ_EMPTY,
+       ROCE_ASYNC_EVENT_DESTROY_QP_DONE,
+       MAX_ROCE_ASYNC_EVENTS_TYPE
+};
+
 #endif /* __ROCE_COMMON__ */
index 3b8e1efd9bc2c47efb2158ebae5d21575b372b20..08df82a096b62de80e51f34122f4809f0eb6d27e 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef __STORAGE_COMMON__
@@ -16,6 +40,8 @@
 #define BDQ_ID_IMM_DATA          (1)
 #define BDQ_NUM_IDS          (2)
 
+#define SCSI_NUM_SGES_SLOW_SGL_THR      8
+
 #define BDQ_MAX_EXTERNAL_RING_SIZE (1 << 15)
 
 struct scsi_bd {
@@ -28,6 +54,16 @@ struct scsi_bdq_ram_drv_data {
        __le16 reserved0[3];
 };
 
+struct scsi_sge {
+       struct regpair sge_addr;
+       __le32 sge_len;
+       __le32 reserved;
+};
+
+struct scsi_cached_sges {
+       struct scsi_sge sge[4];
+};
+
 struct scsi_drv_cmdq {
        __le16 cmdq_cons;
        __le16 reserved0;
@@ -75,11 +111,19 @@ struct scsi_ram_per_bdq_resource_drv_data {
        struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS];
 };
 
-struct scsi_sge {
-       struct regpair sge_addr;
-       __le16 sge_len;
-       __le16 reserved0;
-       __le32 reserved1;
+enum scsi_sgl_mode {
+       SCSI_TX_SLOW_SGL,
+       SCSI_FAST_SGL,
+       MAX_SCSI_SGL_MODE
+};
+
+struct scsi_sgl_params {
+       struct regpair sgl_addr;
+       __le32 sgl_total_length;
+       __le32 sge_offset;
+       __le16 sgl_num_sges;
+       u8 sgl_index;
+       u8 reserved;
 };
 
 struct scsi_terminate_extra_params {
index accba0e6b70481c47c400b233dcbe0a35c3c7fc6..a5e843268f0e9431eacd07ad5cc74e10be690b0d 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef __TCP_COMMON__
 
 #define TCP_INVALID_TIMEOUT_VAL -1
 
+struct ooo_opaque {
+       __le32 cid;
+       u8 drop_isle;
+       u8 drop_size;
+       u8 ooo_opcode;
+       u8 ooo_isle;
+};
+
 enum tcp_connect_mode {
        TCP_CONNECT_ACTIVE,
        TCP_CONNECT_PASSIVE,
@@ -18,14 +50,10 @@ enum tcp_connect_mode {
 };
 
 struct tcp_init_params {
-       __le32 max_cwnd;
-       __le16 dup_ack_threshold;
+       __le32 two_msl_timer;
        __le16 tx_sws_timer;
-       __le16 min_rto;
-       __le16 min_rto_rt;
-       __le16 max_rto;
        u8 maxfinrt;
-       u8 reserved[1];
+       u8 reserved[9];
 };
 
 enum tcp_ip_version {
@@ -145,6 +173,7 @@ enum tcp_seg_placement_event {
        TCP_EVENT_ADD_ISLE_RIGHT,
        TCP_EVENT_ADD_ISLE_LEFT,
        TCP_EVENT_JOIN,
+       TCP_EVENT_DELETE_ISLES,
        TCP_EVENT_NOP,
        MAX_TCP_SEG_PLACEMENT_EVENT
 };
index 4edb0f2b4f9f73fcab7c85081cee0729e6c231a3..5a5eef8e38042d38d4ff59b588358cfedc1cdb3c 100644 (file)
@@ -7,3 +7,4 @@ header-y += rdma_netlink.h
 header-y += rdma_user_cm.h
 header-y += hfi/
 header-y += rdma_user_rxe.h
+header-y += qedr-abi.h
diff --git a/include/uapi/rdma/qedr-abi.h b/include/uapi/rdma/qedr-abi.h
new file mode 100644 (file)
index 0000000..75c270d
--- /dev/null
@@ -0,0 +1,106 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __QEDR_USER_H__
+#define __QEDR_USER_H__
+
+#include <linux/types.h>
+
+#define QEDR_ABI_VERSION               (8)
+
+/* user kernel communication data structures. */
+
+struct qedr_alloc_ucontext_resp {
+       __u64 db_pa;
+       __u32 db_size;
+
+       __u32 max_send_wr;
+       __u32 max_recv_wr;
+       __u32 max_srq_wr;
+       __u32 sges_per_send_wr;
+       __u32 sges_per_recv_wr;
+       __u32 sges_per_srq_wr;
+       __u32 max_cqes;
+};
+
+struct qedr_alloc_pd_ureq {
+       __u64 rsvd1;
+};
+
+struct qedr_alloc_pd_uresp {
+       __u32 pd_id;
+};
+
+struct qedr_create_cq_ureq {
+       __u64 addr;
+       __u64 len;
+};
+
+struct qedr_create_cq_uresp {
+       __u32 db_offset;
+       __u16 icid;
+};
+
+struct qedr_create_qp_ureq {
+       __u32 qp_handle_hi;
+       __u32 qp_handle_lo;
+
+       /* SQ */
+       /* user space virtual address of SQ buffer */
+       __u64 sq_addr;
+
+       /* length of SQ buffer */
+       __u64 sq_len;
+
+       /* RQ */
+       /* user space virtual address of RQ buffer */
+       __u64 rq_addr;
+
+       /* length of RQ buffer */
+       __u64 rq_len;
+};
+
+struct qedr_create_qp_uresp {
+       __u32 qp_id;
+       __u32 atomic_supported;
+
+       /* SQ */
+       __u32 sq_db_offset;
+       __u16 sq_icid;
+
+       /* RQ */
+       __u32 rq_db_offset;
+       __u16 rq_icid;
+
+       __u32 rq_db2_offset;
+};
+
+#endif /* __QEDR_USER_H__ */